o
    pi{                     @   s  d dl Z d dlZd dlZd dlmZ d dlmZmZmZm	Z	m
Z
 d dlZd dlZd dlmZ d dlmZ d dlmZ ddlmZmZ ddlmZmZmZmZmZmZmZmZmZm Z m!Z! e red d	l"m#Z# e rnd d
l$m%Z% e r{d dl&m'Z'm(Z(m)Z) e*e+Z,d ddZ-dd Z.		d!de
ee/ e/f de	d de	e
e0ee0 ed f  fddZ1d"de	d fddZ2d"de	d fddZ3dd Z4G dd dZ5dS )#    N)Path)CallableDictListOptionalUnion)
model_info)HF_HUB_OFFLINE   )
ModelMixinload_state_dict)USE_PEFT_BACKEND_get_model_filedelete_adapter_layers	deprecateis_accelerate_availableis_peft_availableis_transformers_availableloggingrecurse_remove_peft_layersset_adapter_layers!set_weights_and_activate_adapters)PreTrainedModel)BaseTunerLayer)AlignDevicesHook
CpuOffloadremove_hook_from_module      ?Fc                 C   s   d|i}|   D ]6}t|tr>|dkr|| tt|jj}d|v r*||d< nd|vr6|dur6t	d|jdi | qdS )ai  
    Fuses LoRAs for the text encoder.

    Args:
        text_encoder (`torch.nn.Module`):
            The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
            attribute.
        lora_scale (`float`, defaults to 1.0):
            Controls how much to influence the outputs with the LoRA parameters.
        safe_fusing (`bool`, defaults to `False`):
            Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
        adapter_names (`List[str]` or `str`):
            The names of the adapters to use.
    
safe_merger   adapter_namesNzThe `adapter_names` argument is not supported with your PEFT version. Please upgrade to the latest version of PEFT. `pip install -U peft` )
modules
isinstancer   scale_layerlistinspect	signaturemerge
parameters
ValueError)text_encoder
lora_scalesafe_fusingr   merge_kwargsmodulesupported_merge_kwargsr    r    Y/home/ubuntu/SoloSpeech/.venv/lib/python3.10/site-packages/diffusers/loaders/lora_base.pyfuse_text_encoder_lora7   s   


r1   c                 C   s$   |   D ]}t|tr|  qdS )z
    Unfuses LoRAs for the text encoder.

    Args:
        text_encoder (`torch.nn.Module`):
            The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
            attribute.
    N)r!   r"   r   unmerge)r*   r.   r    r    r0   unfuse_text_encoder_lora[   s
   	
r3   r   r*   r   text_encoder_weightsc                 C   sF   |du rt ddd }t| tr| gn| } || |}t|| | dS )a  
    Sets the adapter layers for the text encoder.

    Args:
        adapter_names (`List[str]` or `str`):
            The names of the adapters to use.
        text_encoder (`torch.nn.Module`, *optional*):
            The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
            attribute.
        text_encoder_weights (`List[float]`, *optional*):
            The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
    NzrThe pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead.c                 S   sV   t |ts|gt|  }t| t|kr"tdt|  dt| dd |D }|S )NLength of adapter names + is not equal to the length of the weights c                 S   s   g | ]
}|d ur
|ndqS )Nr   r    ).0wr    r    r0   
<listcomp>       zJset_adapters_for_text_encoder.<locals>.process_weights.<locals>.<listcomp>)r"   r$   lenr)   )r   weightsr    r    r0   process_weights   s   
z6set_adapters_for_text_encoder.<locals>.process_weights)r)   r"   strr   )r   r*   r4   r=   r    r    r0   set_adapters_for_text_encoderi   s   
r?   c                 C       | du rt dt| dd dS )a  
    Disables the LoRA layers for the text encoder.

    Args:
        text_encoder (`torch.nn.Module`, *optional*):
            The text encoder module to disable the LoRA layers for. If `None`, it will try to get the `text_encoder`
            attribute.
    NText Encoder not found.Fenabledr)   r   r*   r    r    r0   disable_lora_for_text_encoder      	rF   c                 C   r@   )a  
    Enables the LoRA layers for the text encoder.

    Args:
        text_encoder (`torch.nn.Module`, *optional*):
            The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
            attribute.
    NrA   TrB   rD   rE   r    r    r0   enable_lora_for_text_encoder   rG   rH   c                 C   s*   t |  t| dd d ur| `d | _d S d S )Npeft_config)r   getattrrI   _hf_peft_config_loadedrE   r    r    r0   !_remove_text_encoder_monkey_patch   s
   
rL   c                   @   s  e Zd ZdZg ZdZdd Zedd Zedd Z	ed	d
 Z
edd Ze	d:ddZdd Zg dddfdee dededeee  fddZg fdee fddZ	d;deee ef deeeeee ee f  fddZd d! Zd"d# Zdeee ef fd$d%Zd&ee fd'd(Zd&eeee f fd)d*Zdee d+eejeef d&dfd,d-Z e!d.d/ Z"e!d0eeej#f d1ed2ed3ed4e$d5efd6d7Z%e&d&efd8d9Z'dS )<LoraBaseMixinz!Utility class for handling LoRAs.r   c                 K      t d)Nz)`load_lora_weights()` is not implemented.NotImplementedError)selfkwargsr    r    r0   load_lora_weights   s   zLoraBaseMixin.load_lora_weightsc                 K   rN   )Nz&`save_lora_weights()` not implemented.rO   clsrR   r    r    r0   save_lora_weights      zLoraBaseMixin.save_lora_weightsc                 K   rN   )Nz'`lora_state_dict()` is not implemented.rO   rT   r    r    r0   lora_state_dict   rW   zLoraBaseMixin.lora_state_dictc                 C   s   d}d}|durL|j du rL|j D ]9\}}t|tjrKt|drK|s)t|jt}|s@t|jt	p?t|jdo?t|jj
d t	}td t||d q||fS )ar  
        Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU.

        Args:
            _pipeline (`DiffusionPipeline`):
                The pipeline to disable offloading for.

        Returns:
            tuple:
                A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True.
        FN_hf_hookhooksr   zAccelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again.)recurse)hf_device_map
componentsitemsr"   nnModulehasattrrY   r   r   rZ   loggerinfor   )rU   	_pipelineis_model_cpu_offloadis_sequential_cpu_offload_	componentr    r    r0   _optionally_disable_offloading   s$   z,LoraBaseMixin._optionally_disable_offloadingc                 C   s  ddl m}m} d }t|ts|r|d u s|d ur`|dr`z&|d u r+| j|d|d}t||p0|||||||	|
|d
}tj	j
|dd}W n ttjfy_ } z|sS|d }W Y d }~nd }~ww |d u r|d u rp| j|d|d}t||pu|||||||	|
|d
}t|}|S |}|S )	N   LORA_WEIGHT_NAMELORA_WEIGHT_NAME_SAFE.safetensors)file_extensionlocal_files_only)	weights_name	cache_dirforce_downloadproxiesrp   tokenrevision	subfolder
user_agentcpu)devicez.bin)lora_pipelinerl   rm   r"   dictendswith_best_guess_weight_namer   safetensorstorch	load_fileIOErrorSafetensorErrorr   )rU   %pretrained_model_name_or_path_or_dictweight_nameuse_safetensorsrp   rr   rs   rt   ru   rv   rw   rx   allow_picklerl   rm   
model_file
state_dicter    r    r0   _fetch_state_dict   sj   
zLoraBaseMixin._fetch_state_dictrn   Fc                    s,  ddl m m |strtdg }tj|rd S tj|r-fddt	|D }nt
|j}fdd|D }t|dkrCd S h dttfd	d
|}t fdd|D ritt fdd
|}ntfdd|D rttfdd
|}t|dkrtd d| d|d }|S )Nrj   rk   z>When using the offline mode, you must specify a `weight_name`.c                    s   g | ]	}|  r|qS r    r}   r7   fro   r    r0   r9   G  s
    
z9LoraBaseMixin._best_guess_weight_name.<locals>.<listcomp>c                    s   g | ]}|j  r|j qS r    )	rfilenamer}   r   r   r    r0   r9   L  s    r   >   	optimizer	scheduler
checkpointc                    s   t  fddD S )Nc                 3   s    | ]}| vV  qd S Nr    )r7   	substringxr    r0   	<genexpr>U  s    zJLoraBaseMixin._best_guess_weight_name.<locals>.<lambda>.<locals>.<genexpr>)allr   )unallowed_substringsr   r0   <lambda>U  s    z7LoraBaseMixin._best_guess_weight_name.<locals>.<lambda>c                 3       | ]}|  V  qd S r   r   r   rl   r    r0   r   X      z8LoraBaseMixin._best_guess_weight_name.<locals>.<genexpr>c                    
   |   S r   r   r   r   r    r0   r   Y     
 c                 3   r   r   r   r   rm   r    r0   r   Z  r   c                    r   r   r   r   r   r    r0   r   [  r   z9Provided path contains more than one weights file in the z~ format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one  `.safetensors` or `.bin` file in  .)r{   rl   rm   r	   r)   ospathisfileisdirlistdirr   siblingsr;   r$   filterany)rU   r   ro   rp   targeted_filesfiles_in_repor   r    )rl   rm   ro   r   r0   r~   9  s8   

z%LoraBaseMixin._best_guess_weight_namec                 C   sZ   t std| jD ]!}t| |d}|dur*t|jtr |  q	t|jtr*t	| q	dS )z
        Unloads the LoRA parameters.

        Examples:

        ```python
        >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
        >>> pipeline.unload_lora_weights()
        >>> ...
        ```
        )PEFT backend is required for this method.N)
r   r)   _lora_loadable_modulesrJ   
issubclass	__class__r   unload_lorar   rL   rQ   rh   modelr    r    r0   unload_lora_weightsd  s   

z!LoraBaseMixin.unload_lora_weightsr   Nr]   r+   r,   r   c           	      K   s   d|v rd}t dd| d|v rd}t dd| d|v r$d}t dd| t|dkr.td	|D ]8}|| jvrBt| d
| jdt| |d}|durht|jtrZ|j|||d t|jt	rht
||||d q0|  jd7  _dS )a  
        Fuses the LoRA parameters into the original parameters of the corresponding blocks.

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
            lora_scale (`float`, defaults to 1.0):
                Controls how much to influence the outputs with the LoRA parameters.
            safe_fusing (`bool`, defaults to `False`):
                Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
            adapter_names (`List[str]`, *optional*):
                Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.

        Example:

        ```py
        from diffusers import DiffusionPipeline
        import torch

        pipeline = DiffusionPipeline.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
        pipeline.fuse_lora(lora_scale=0.7)
        ```
        	fuse_unetzPassing `fuse_unet` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_unet` will be removed in a future version.1.0.0fuse_transformerzPassing `fuse_transformer` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_transformer` will be removed in a future version.fuse_text_encoderzPassing `fuse_text_encoder` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_text_encoder` will be removed in a future version.r   %`components` cannot be an empty list.- is not found in self._lora_loadable_modules=r   N)r,   r   )r+   r,   r   rj   )r   r;   r)   r   rJ   r   r   r   	fuse_lorar   r1   num_fused_loras)	rQ   r]   r+   r,   r   rR   depr_messagefuse_componentr   r    r    r0   r   {  sH   '
zLoraBaseMixin.fuse_lorac                 K   s   d|v rd}t dd| d|v rd}t dd| d|v r$d}t dd| t|dkr.td	|D ]4}|| jvrBt| d
| jdt| |d}|durdt|jttfrd|	 D ]}t
|trc|  qXq0|  jd8  _dS )a  
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
            unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
            unfuse_text_encoder (`bool`, defaults to `True`):
                Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
        unfuse_unetzPassing `unfuse_unet` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_unet` will be removed in a future version.r   unfuse_transformerzPassing `unfuse_transformer` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_transformer` will be removed in a future version.unfuse_text_encoderzPassing `unfuse_text_encoder` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_text_encoder` will be removed in a future version.r   r   r   r   Nrj   )r   r;   r)   r   rJ   r   r   r   r   r!   r"   r   r2   r   )rQ   r]   rR   r   r   r   r.   r    r    r0   unfuse_lora  sD   

zLoraBaseMixin.unfuse_loraadapter_weightsc                    s  t |tr|gn|}t|}t |ts|gt| }t|t|kr1tdt| dt| |   dd   D } fdd|D }i }| j	D ]}t
| |}t||D ]X\}}	t |	tr|	|d }
|
d ur|t| |s|td| d| d	 |
d ur||| vrtd
| d| d| d| d| d||  d	 n|	}
||g  || |
 qXt|jtr||||  qLt|jtrt||||  qLd S )Nr5   r6   c                 S   s   h | ]	}|D ]}|qqS r    r    )r7   adaptersadapterr    r    r0   	<setcomp>  s
    z-LoraBaseMixin.set_adapters.<locals>.<setcomp>c                    s$   i | ]   fd d  D qS )c                    s   g | ]
\}} |v r|qS r    r    )r7   partr   r   r    r0   r9     r:   z9LoraBaseMixin.set_adapters.<locals>.<dictcomp>.<listcomp>)r^   )r7   list_adaptersr   r0   
<dictcomp>  s    z.LoraBaseMixin.set_adapters.<locals>.<dictcomp>zLora weight dict contains z< weights but will be ignored because pipeline does not have r   zLora weight dict for adapter 'z' contains z",but this will be ignored because z does not contain weights for z.Valid parts for z are: )r"   r>   copydeepcopyr$   r;   r)   get_list_adaptersvaluesr   rJ   zipr|   popra   rb   warning
setdefaultappendr   r   r   set_adaptersr   r?   )rQ   r   r   all_adaptersinvert_list_adapters_component_adapter_weightsrh   r   adapter_namer<   component_adapter_weightsr    r   r0   r     s^   





	zLoraBaseMixin.set_adaptersc                 C   Z   t std| jD ]!}t| |d }|d ur*t|jtr |  q	t|jtr*t	| q	d S Nr   )
r   r)   r   rJ   r   r   r   disable_lorar   rF   r   r    r    r0   r   A     

zLoraBaseMixin.disable_lorac                 C   r   r   )
r   r)   r   rJ   r   r   r   enable_lorar   rH   r   r    r    r0   r   M  r   zLoraBaseMixin.enable_lorac                 C   sx   t stdt|tr|g}| jD ](}t| |d}|dur9t|jtr)|	| qt|jt
r9|D ]}t|| q1qdS )a   
        Args:
        Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s).
            adapter_names (`Union[List[str], str]`):
                The names of the adapter to delete. Can be a single string or a list of strings
        r   N)r   r)   r"   r>   r   rJ   r   r   r   delete_adaptersr   r   )rQ   r   rh   r   r   r    r    r0   r   Y  s   

zLoraBaseMixin.delete_adaptersreturnc                 C   sb   t stdg }| jD ]#}t| |d}|dur.t|jtr.| D ]}t|t	r-|j
} nq!q|S )a  
        Gets the list of the current active adapters.

        Example:

        ```python
        from diffusers import DiffusionPipeline

        pipeline = DiffusionPipeline.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0",
        ).to("cuda")
        pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
        pipeline.get_active_adapters()
        ```
        iPEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`N)r   r)   r   rJ   r   r   r   r!   r"   r   active_adapters)rQ   r   rh   r   r.   r    r    r0   get_active_adapterso  s   

z!LoraBaseMixin.get_active_adaptersc                 C   s`   t stdi }| jD ]"}t| |d}|dur-t|jttfr-t|dr-t	|j
 ||< q|S )zR
        Gets the current list of all available adapters in the pipeline.
        r   NrI   )r   r)   r   rJ   r   r   r   r   ra   r$   rI   keys)rQ   r   rh   r   r    r    r0   r     s   
zLoraBaseMixin.get_list_adaptersrz   c                 C   s   t std| jD ]B}t| |d}|durK| D ]1}t|trJ|D ]'}|j| | |j	| | t
|drI|jdurI|j| ||j|< q"qq	dS )a  
        Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case
        you want to load multiple adapters and free some GPU memory.

        Args:
            adapter_names (`List[str]`):
                List of adapters to send device to.
            device (`Union[torch.device, str, int]`):
                Device to send the adapters to. Can be either a torch device, a str or an integer.
        r   Nlora_magnitude_vector)r   r)   r   rJ   r!   r"   r   lora_Atolora_Bra   r   )rQ   r   rz   rh   r   r.   r   r    r    r0   set_lora_device  s$   

zLoraBaseMixin.set_lora_devicec                    s4   t | tjjr|  n| } fdd| D }|S )Nc                    s    i | ]\}}  d | |qS )r   r    )r7   module_nameparamprefixr    r0   r     s     z.LoraBaseMixin.pack_weights.<locals>.<dictcomp>)r"   r   r_   r`   r   r^   )layersr   layers_weightslayers_state_dictr    r   r0   pack_weights  s   zLoraBaseMixin.pack_weightsr   save_directoryis_main_processr   save_functionsafe_serializationc           	      C   s   ddl m}m} tj|rtd| d d S |d u r'|r$dd }ntj	}tj
|dd |d u r9|r7|}n|}t|| }|| | td	|  d S )
Nrj   rk   zProvided path (z#) should be a directory, not a filec                 S   s   t jj| |ddidS )Nformatpt)metadata)r   r   	save_file)r<   filenamer    r    r0   r     s   z6LoraBaseMixin.write_lora_layers.<locals>.save_functionT)exist_okzModel weights saved in )r{   rl   rm   r   r   r   rb   errorr   savemakedirsr   as_posixrc   )	r   r   r   r   r   r   rl   rm   	save_pathr    r    r0   write_lora_layers  s    	

zLoraBaseMixin.write_lora_layersc                 C   s   t | dr| jS dS )N_lora_scaler   )ra   r  )rQ   r    r    r0   r+     s   zLoraBaseMixin.lora_scale)rn   Fr   )(__name__
__module____qualname____doc__r   r   rS   classmethodrV   rX   ri   r   r~   r   r   r>   floatboolr   r   r   r   r   r   r   r   r   r   r   r   rz   intr   staticmethodr   Tensorr   r  propertyr+   r    r    r    r0   rM      sz    


"
L*

Q;
=!&
#rM   )r   FN)NNr   )6r   r%   r   pathlibr   typingr   r   r   r   r   r   r   torch.nnr_   huggingface_hubr   huggingface_hub.constantsr	   models.modeling_utilsr   r   utilsr   r   r   r   r   r   r   r   r   r   r   transformersr   peft.tuners.tuners_utilsr   accelerate.hooksr   r   r   
get_loggerr  rb   r1   r3   r>   r	  r?   rF   rH   rL   rM   r    r    r    r0   <module>   sD   4

$
,