o
    piI                     @   s   d dl mZmZmZ d dlZd dlm  mZ d dlmZ ddl	m
Z
mZ ddlmZ e r6d dlmZmZ eeZdd	 Zd
d ZddefddZG dd dejjZG dd dejZG dd dejZG dd dejZG dd dejZdS )    )OptionalTupleUnionN)nn   )	deprecatelogging)is_transformers_available)CLIPTextModelCLIPTextModelWithProjectionc                 C   s`   g }t | ttfr't| jjjD ]\}}d| d}|j}|||f q|S t	d| j
j )Ntext_model.encoder.layers.z
.self_attnz.do not know how to get attention modules for: )
isinstancer
   r   	enumerate
text_modelencoderlayers	self_attnappend
ValueError	__class____name__)text_encoderattn_modulesilayernamemod r   S/home/ubuntu/SoloSpeech/.venv/lib/python3.10/site-packages/diffusers/models/lora.pytext_encoder_attn_modules)   s   r   c                 C   s`   g }t | ttfr't| jjjD ]\}}|j}d| d}|||f q|S t	d| j
j )Nr   z.mlpz(do not know how to get mlp modules for: )r   r
   r   r   r   r   r   mlpr   r   r   r   )r   mlp_modulesr   r   mlp_modr   r   r   r   text_encoder_mlp_modules7   s   r#         ?
lora_scalec                 C   sp   t | D ]\}}t|jtr||j_||j_||j_||j_qt| D ]\}}t|j	tr5||j	_||j
_q#d S N)r   r   q_projPatchedLoraProjectionr%   k_projv_projout_projr#   fc1fc2)r   r%   _attn_module
mlp_moduler   r   r   adjust_lora_scale_text_encoderE   s   r1   c                       sN   e Zd Zd fdd	Zdddd fd	d

ZdddZdd Zdd Z  ZS )r(      N   c           	         sp   d}t dd| t   ddlm} || _| jjj}|d u r$| jjj}|| jj	| jj
||||d| _|| _d S )NzsUse of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r(   1.0.0r   )LoRALinearLayer)network_alphadevicedtyperank)r   super__init__models.lorar5   regular_linear_layerweightr7   r8   in_featuresout_featureslora_linear_layerr%   )	selfr=   r%   r6   r9   r8   deprecation_messager5   r7   r   r   r   r;   T   s"   



	zPatchedLoraProjection.__init__ Fdestinationprefix	keep_varsc                   s6   | j d u r| jj||||dS t j||||dS )NrF   )rA   r=   
state_dictr:   )rB   rG   rH   rI   argsrD   r   r   rJ   o   s
   
z PatchedLoraProjection.state_dictr$   c           	   	   C   s  | j d u rd S | jjjj| jjjj}}| jjj }| j jjj }| j jjj }| j j	d ur;|| j j	 | j j
 }||t|d d d f |d d d f d   }|rft|  rftd|  d|j||d| jj_d | _ | | _| | _|| _d S Nr   aThis LoRA weight seems to be broken. Encountered NaN values when trying to fuse LoRA weights for  .LoRA weights will not be fused.r7   r8   )rA   r=   r>   datar8   r7   floatupdownr6   r9   torchbmmisnananyitemr   tocpuw_upw_downr%   	rB   r%   safe_fusingr8   r7   w_origr[   r\   fused_weightr   r   r   
_fuse_loraw   s(   
0


z PatchedLoraProjection._fuse_lorac              	   C   s   t | dd d urt | dd d usd S | jjj}|j|j}}| jj|d }| j	| }| | j
t|d d d f |d d d f d   }|j||d| jj_d | _d | _	d S Nr[   r\   r7   r   rO   )getattrr=   r>   rP   r8   r7   r[   rY   rQ   r\   r%   rT   rU   rB   r`   r8   r7   r[   r\   unfused_weightr   r   r   _unfuse_lora   s    
6
z"PatchedLoraProjection._unfuse_lorac                 C   s>   | j d u rd| _ | jd u r| |S | || j | |  S )Nr$   )r%   rA   r=   )rB   inputr   r   r   forward   s
   


zPatchedLoraProjection.forward)r2   Nr3   Nr$   F)	r   
__module____qualname__r;   rJ   ra   rg   ri   __classcell__r   r   rD   r   r(   S   s    
 r(   c                       sr   e Zd ZdZ				ddedededee deeej	e
f  d	eej f fd
dZdejdejfddZ  ZS )r5   a  
    A linear layer that is used with LoRA.

    Parameters:
        in_features (`int`):
            Number of input features.
        out_features (`int`):
            Number of output features.
        rank (`int`, `optional`, defaults to 4):
            The rank of the LoRA layer.
        network_alpha (`float`, `optional`, defaults to `None`):
            The value of the network alpha used for stable learning and preventing underflow. This value has the same
            meaning as the `--network_alpha` option in the kohya-ss trainer script. See
            https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
        device (`torch.device`, `optional`, defaults to `None`):
            The device to use for the layer's weights.
        dtype (`torch.dtype`, `optional`, defaults to `None`):
            The dtype to use for the layer's weights.
    r3   Nr?   r@   r9   r6   r7   r8   c                    s   t    d}tdd| tj||d||d| _tj||d||d| _|| _|| _|| _	|| _
tjj| jjd| d tj| jj d S )NzmUse of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r5   r4   F)biasr7   r8   r2   std)r:   r;   r   r   LinearrS   rR   r6   r9   r@   r?   initnormal_r>   zeros_)rB   r?   r@   r9   r6   r7   r8   rC   rD   r   r   r;      s   
	zLoRALinearLayer.__init__hidden_statesreturnc                 C   N   |j }| jjj }| ||}| |}| jd ur"|| j| j 9 }||S r&   r8   rS   r>   rY   rR   r6   r9   rB   ru   
orig_dtyper8   down_hidden_statesup_hidden_statesr   r   r   ri         



zLoRALinearLayer.forward)r3   NNN)r   rk   rl   __doc__intr   rQ   r   rT   r7   strr8   r;   Tensorri   rm   r   r   rD   r   r5      s(    r5   c                       s   e Zd ZdZ					ddededed	eeeeef f d
eeeeef f deeeeef ef dee	 f fddZ
dejdejfddZ  ZS )LoRAConv2dLayera"  
    A convolutional layer that is used with LoRA.

    Parameters:
        in_features (`int`):
            Number of input features.
        out_features (`int`):
            Number of output features.
        rank (`int`, `optional`, defaults to 4):
            The rank of the LoRA layer.
        kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1):
            The kernel size of the convolution.
        stride (`int` or `tuple` of two `int`, `optional`, defaults to 1):
            The stride of the convolution.
        padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0):
            The padding of the convolution.
        network_alpha (`float`, `optional`, defaults to `None`):
            The value of the network alpha used for stable learning and preventing underflow. This value has the same
            meaning as the `--network_alpha` option in the kohya-ss trainer script. See
            https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
    r3   r2   r2   r   Nr?   r@   r9   kernel_sizestridepaddingr6   c           	         s   t    d}tdd| tj|||||dd| _tj||dddd| _|| _|| _tj	j
| jjd| d	 tj	| jj d S )
NzmUse of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r   r4   F)r   r   r   rn   r   )r   r   rn   r2   ro   )r:   r;   r   r   Conv2drS   rR   r6   r9   rr   rs   r>   rt   )	rB   r?   r@   r9   r   r   r   r6   rC   rD   r   r   r;     s   

zLoRAConv2dLayer.__init__ru   rv   c                 C   rw   r&   rx   ry   r   r   r   ri     r}   zLoRAConv2dLayer.forward)r3   r   r   r   N)r   rk   rl   r~   r   r   r   r   r   rQ   r;   rT   r   ri   rm   r   r   rD   r   r      s.    r   c                       sz   e Zd ZdZdddee f fddZdee fddZddede	fddZ
dd ZddejdedejfddZ  ZS )LoRACompatibleConvz;
    A convolutional layer that can be used with LoRA.
    N
lora_layerr   c                   ,   d}t dd| t j|i | || _d S )NzpUse of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r   r4   r   r:   r;   r   rB   r   rK   kwargsrC   rD   r   r   r;   0     
zLoRACompatibleConv.__init__c                 C      d}t dd| || _d S NznUse of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.set_lora_layerr4   r   r   rB   r   rC   r   r   r   r   7  s   
z!LoRACompatibleConv.set_lora_layerr$   Fr%   r^   c           
      C   s   | j d u rd S | jjj| jjj}}| jj }| j jjj }| j jjj }| j jd ur8|| j j | j j	 }t
|jdd|jdd}||j}|||  }	|ret
|	  retd|  d|	j||d| j_d | _ | | _| | _|| _d S )Nr2   	start_dimrM   rN   rO   )r   r>   rP   r8   r7   rQ   rR   rS   r6   r9   rT   mmflattenreshapeshaperV   rW   rX   r   rY   rZ   r[   r\   _lora_scale)
rB   r%   r^   r8   r7   r_   r[   r\   fusionr`   r   r   r   ra   =  s,   



zLoRACompatibleConv._fuse_lorac                 C   s   t | dd d urt | dd d usd S | jj}|jj|jj}}| jj|d | _| j| | _t	
| jjdd| jjdd}||j}| | j|  }|j||d| j_d | _d | _d S )Nr[   r\   rc   r2   r   rO   )rd   r>   rP   r8   r7   r[   rY   rQ   r\   rT   r   r   r   r   r   )rB   r`   r8   r7   r   rf   r   r   r   rg   _  s     
zLoRACompatibleConv._unfuse_loraru   scalerv   c              	   C   sj   | j dkrtj|| j| j d}d}n| j}t|| j| j| j|| j	| j
}| jd u r,|S ||| |  S )Nzeros)mode)r   r   )padding_modeFpad _reversed_padding_repeated_twicer   conv2dr>   rn   r   dilationgroupsr   )rB   ru   r   r   original_outputsr   r   r   ri   q  s   

zLoRACompatibleConv.forwardrj   r$   )r   rk   rl   r~   r   r   r;   r   rQ   boolra   rg   rT   r   ri   rm   r   r   rD   r   r   +  s    "$r   c                       s~   e Zd ZdZdddee f fddZdee fddZddede	fddZ
dd Zddejdedejf fddZ  ZS )LoRACompatibleLinearz4
    A Linear layer that can be used with LoRA.
    Nr   r   c                   r   )NzrUse of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r   r4   r   r   rD   r   r   r;     r   zLoRACompatibleLinear.__init__c                 C   r   r   r   r   r   r   r   r     s   
z#LoRACompatibleLinear.set_lora_layerr$   Fr%   r^   c           	   	   C   s   | j d u rd S | jjj| jjj}}| jj }| j jjj }| j jjj }| j jd ur8|| j j | j j	 }||t
|d d d f |d d d f d   }|rct
|  rctd|  d|j||d| j_d | _ | | _| | _|| _d S rL   )r   r>   rP   r8   r7   rQ   rR   rS   r6   r9   rT   rU   rV   rW   rX   r   rY   rZ   r[   r\   r   r]   r   r   r   ra     s(   
0


zLoRACompatibleLinear._fuse_lorac              	   C   s   t | dd d urt | dd d usd S | jj}|j|j}}| jj|d }| j| }| | j	t
|d d d f |d d d f d   }|j||d| j_d | _d | _d S rb   )rd   r>   rP   r8   r7   r[   rY   rQ   r\   r   rT   rU   re   r   r   r   rg     s    6
z!LoRACompatibleLinear._unfuse_loraru   r   rv   c                    s8   | j d u rt |}|S t |||  |  }|S r&   )r   r:   ri   )rB   ru   r   outrD   r   r   ri     s
   
zLoRACompatibleLinear.forwardrj   r   )r   rk   rl   r~   r   r5   r;   r   rQ   r   ra   rg   rT   r   ri   rm   r   r   rD   r   r     s     (r   r   ) typingr   r   r   rT   torch.nn.functionalr   
functionalr   utilsr   r   utils.import_utilsr	   transformersr
   r   
get_loggerr   loggerr   r#   rQ   r1   Moduler(   r5   r   r   r   rq   r   r   r   r   r   <module>   s"   
\<@W