o
    ۷iII                     @   s   d dl Z d dlm  mZ d dl mZ ddlmZmZ ddlm	Z	 e	 r,d dl
mZmZ eeZdejfdd	Zdejfd
dZddefddZG dd de jjZG dd dejZG dd dejZG dd dejZG dd dejZdS )    N)nn   )	deprecatelogging)is_transformers_available)CLIPTextModelCLIPTextModelWithProjectiontext_encoderc                 C   s`   g }t | ttfr't| jjjD ]\}}d| d}|j}|||f q|S t	d| j
j )Ntext_model.encoder.layers.z
.self_attnz.do not know how to get attention modules for: )
isinstancer   r   	enumerate
text_modelencoderlayers	self_attnappend
ValueError	__class____name__)r	   attn_modulesilayernamemod r   K/home/ubuntu/vllm_env/lib/python3.10/site-packages/diffusers/models/lora.pytext_encoder_attn_modules'   s   r   c                 C   s`   g }t | ttfr't| jjjD ]\}}|j}d| d}|||f q|S t	d| j
j )Nr
   z.mlpz(do not know how to get mlp modules for: )r   r   r   r   r   r   r   mlpr   r   r   r   )r	   mlp_modulesr   r   mlp_modr   r   r   r   text_encoder_mlp_modules5   s   r          ?
lora_scalec                 C   sp   t | D ]\}}t|jtr||j_||j_||j_||j_qt| D ]\}}t|j	tr5||j	_||j
_q#d S N)r   r   q_projPatchedLoraProjectionr"   k_projv_projout_projr    fc1fc2)r	   r"   _attn_module
mlp_moduler   r   r   adjust_lora_scale_text_encoderC   s   r.   c                       sN   e Zd Zd fdd	Zdddd fd	d

ZdddZdd Zdd Z  ZS )r%      N   c           	         sp   d}t dd| t   ddlm} || _| jjj}|d u r$| jjj}|| jj	| jj
||||d| _|| _d S )NzsUse of `PatchedLoraProjection` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r%   1.0.0r   )LoRALinearLayer)network_alphadevicedtyperank)r   super__init__models.lorar2   regular_linear_layerweightr4   r5   in_featuresout_featureslora_linear_layerr"   )	selfr:   r"   r3   r6   r5   deprecation_messager2   r4   r   r   r   r8   R   s"   



	zPatchedLoraProjection.__init__ Fdestinationprefix	keep_varsc                   s6   | j d u r| jj||||dS t j||||dS )NrC   )r>   r:   
state_dictr7   )r?   rD   rE   rF   argsrA   r   r   rG   m   s
   
z PatchedLoraProjection.state_dictr!   c           	   	   C   s  | j d u rd S | jjjj| jjjj}}| jjj }| j jjj }| j jjj }| j j	d ur;|| j j	 | j j
 }||t|d d d f |d d d f d   }|rft|  rftd|  d|j||d| jj_d | _ | | _| | _|| _d S Nr   aThis LoRA weight seems to be broken. Encountered NaN values when trying to fuse LoRA weights for  .LoRA weights will not be fused.r4   r5   )r>   r:   r;   datar5   r4   floatupdownr3   r6   torchbmmisnananyitemr   tocpuw_upw_downr"   	r?   r"   safe_fusingr5   r4   w_origrX   rY   fused_weightr   r   r   
_fuse_lorau   s(   
0


z PatchedLoraProjection._fuse_lorac              	   C   s   t | dd d urt | dd d usd S | jjj}|j|j}}| jj|d }| j	| }| | j
t|d d d f |d d d f d   }|j||d| jj_d | _d | _	d S NrX   rY   r4   r   rL   )getattrr:   r;   rM   r5   r4   rX   rV   rN   rY   r"   rQ   rR   r?   r]   r5   r4   rX   rY   unfused_weightr   r   r   _unfuse_lora   s    
6
z"PatchedLoraProjection._unfuse_lorac                 C   s>   | j d u rd| _ | jd u r| |S | || j | |  S )Nr!   )r"   r>   r:   )r?   inputr   r   r   forward   s
   


zPatchedLoraProjection.forward)r/   Nr0   Nr!   F)	r   
__module____qualname__r8   rG   r^   rd   rf   __classcell__r   r   rA   r   r%   Q   s    
 r%   c                       sn   e Zd ZdZ				ddededededB dejeB dB d	ej	dB f fd
dZ
dejdejfddZ  ZS )r2   a  
    A linear layer that is used with LoRA.

    Parameters:
        in_features (`int`):
            Number of input features.
        out_features (`int`):
            Number of output features.
        rank (`int`, `optional`, defaults to 4):
            The rank of the LoRA layer.
        network_alpha (`float`, `optional`, defaults to `None`):
            The value of the network alpha used for stable learning and preventing underflow. This value has the same
            meaning as the `--network_alpha` option in the kohya-ss trainer script. See
            https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
        device (`torch.device`, `optional`, defaults to `None`):
            The device to use for the layer's weights.
        dtype (`torch.dtype`, `optional`, defaults to `None`):
            The dtype to use for the layer's weights.
    r0   Nr<   r=   r6   r3   r4   r5   c                    s   t    d}tdd| tj||d||d| _tj||d||d| _|| _|| _|| _	|| _
tjj| jjd| d tj| jj d S )NzmUse of `LoRALinearLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r2   r1   F)biasr4   r5   r/   std)r7   r8   r   r   LinearrP   rO   r3   r6   r=   r<   initnormal_r;   zeros_)r?   r<   r=   r6   r3   r4   r5   r@   rA   r   r   r8      s   
	zLoRALinearLayer.__init__hidden_statesreturnc                 C   N   |j }| jjj }| ||}| |}| jd ur"|| j| j 9 }||S r#   r5   rP   r;   rV   rO   r3   r6   r?   rr   
orig_dtyper5   down_hidden_statesup_hidden_statesr   r   r   rf         



zLoRALinearLayer.forward)r0   NNN)r   rh   ri   __doc__intrN   rQ   r4   strr5   r8   Tensorrf   rj   r   r   rA   r   r2      s(    r2   c                       s   e Zd ZdZ					ddededed	eeeef B d
eeeef B deeeef B eB dedB f fddZde	j
de	j
fddZ  ZS )LoRAConv2dLayera"  
    A convolutional layer that is used with LoRA.

    Parameters:
        in_features (`int`):
            Number of input features.
        out_features (`int`):
            Number of output features.
        rank (`int`, `optional`, defaults to 4):
            The rank of the LoRA layer.
        kernel_size (`int` or `tuple` of two `int`, `optional`, defaults to 1):
            The kernel size of the convolution.
        stride (`int` or `tuple` of two `int`, `optional`, defaults to 1):
            The stride of the convolution.
        padding (`int` or `tuple` of two `int` or `str`, `optional`, defaults to 0):
            The padding of the convolution.
        network_alpha (`float`, `optional`, defaults to `None`):
            The value of the network alpha used for stable learning and preventing underflow. This value has the same
            meaning as the `--network_alpha` option in the kohya-ss trainer script. See
            https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
    r0   r/   r/   r   Nr<   r=   r6   kernel_sizestridepaddingr3   c           	         s   t    d}tdd| tj|||||dd| _tj||dddd| _|| _|| _tj	j
| jjd| d	 tj	| jj d S )
NzmUse of `LoRAConv2dLayer` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r   r1   F)r   r   r   rk   r   )r   r   rk   r/   rl   )r7   r8   r   r   Conv2drP   rO   r3   r6   ro   rp   r;   rq   )	r?   r<   r=   r6   r   r   r   r3   r@   rA   r   r   r8      s   

zLoRAConv2dLayer.__init__rr   rs   c                 C   rt   r#   ru   rv   r   r   r   rf     rz   zLoRAConv2dLayer.forward)r0   r   r   r   N)r   rh   ri   r{   r|   tupler}   rN   r8   rQ   r~   rf   rj   r   r   rA   r   r      s.    r   c                       sz   e Zd ZdZdddedB f fddZdedB fddZddedefddZ	dd Z
ddejdedejfddZ  ZS )LoRACompatibleConvz;
    A convolutional layer that can be used with LoRA.
    N
lora_layerr   c                   ,   d}t dd| t j|i | || _d S )NzpUse of `LoRACompatibleConv` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r   r1   r   r7   r8   r   r?   r   rH   kwargsr@   rA   r   r   r8   .     
zLoRACompatibleConv.__init__c                 C      d}t dd| || _d S NznUse of `set_lora_layer()` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.set_lora_layerr1   r   r   r?   r   r@   r   r   r   r   5  s   
z!LoRACompatibleConv.set_lora_layerr!   Fr"   r[   c           
      C   s   | j d u rd S | jjj| jjj}}| jj }| j jjj }| j jjj }| j jd ur8|| j j | j j	 }t
|jdd|jdd}||j}|||  }	|ret
|	  retd|  d|	j||d| j_d | _ | | _| | _|| _d S )Nr/   	start_dimrJ   rK   rL   )r   r;   rM   r5   r4   rN   rO   rP   r3   r6   rQ   mmflattenreshapeshaperS   rT   rU   r   rV   rW   rX   rY   _lora_scale)
r?   r"   r[   r5   r4   r\   rX   rY   fusionr]   r   r   r   r^   ;  s,   



zLoRACompatibleConv._fuse_lorac                 C   s   t | dd d urt | dd d usd S | jj}|jj|jj}}| jj|d | _| j| | _t	
| jjdd| jjdd}||j}| | j|  }|j||d| j_d | _d | _d S )NrX   rY   r`   r/   r   rL   )ra   r;   rM   r5   r4   rX   rV   rN   rY   rQ   r   r   r   r   r   )r?   r]   r5   r4   r   rc   r   r   r   rd   ]  s     
zLoRACompatibleConv._unfuse_lorarr   scalers   c              	   C   sj   | j dkrtj|| j| j d}d}n| j}t|| j| j| j|| j	| j
}| jd u r,|S ||| |  S )Nzeros)mode)r   r   )padding_modeFpad _reversed_padding_repeated_twicer   conv2dr;   rk   r   dilationgroupsr   )r?   rr   r   r   original_outputsr   r   r   rf   o  s   

zLoRACompatibleConv.forwardrg   r!   )r   rh   ri   r{   r   r8   r   rN   boolr^   rd   rQ   r~   rf   rj   r   r   rA   r   r   )  s    "$r   c                       s~   e Zd ZdZdddedB f fddZdedB fddZddedefddZ	dd Z
ddejdedejf fddZ  ZS )LoRACompatibleLinearz4
    A Linear layer that can be used with LoRA.
    Nr   r   c                   r   )NzrUse of `LoRACompatibleLinear` is deprecated. Please switch to PEFT backend by installing PEFT: `pip install peft`.r   r1   r   r   rA   r   r   r8     r   zLoRACompatibleLinear.__init__c                 C   r   r   r   r   r   r   r   r     s   
z#LoRACompatibleLinear.set_lora_layerr!   Fr"   r[   c           	   	   C   s   | j d u rd S | jjj| jjj}}| jj }| j jjj }| j jjj }| j jd ur8|| j j | j j	 }||t
|d d d f |d d d f d   }|rct
|  rctd|  d|j||d| j_d | _ | | _| | _|| _d S rI   )r   r;   rM   r5   r4   rN   rO   rP   r3   r6   rQ   rR   rS   rT   rU   r   rV   rW   rX   rY   r   rZ   r   r   r   r^     s(   
0


zLoRACompatibleLinear._fuse_lorac              	   C   s   t | dd d urt | dd d usd S | jj}|j|j}}| jj|d }| j| }| | j	t
|d d d f |d d d f d   }|j||d| j_d | _d | _d S r_   )ra   r;   rM   r5   r4   rX   rV   rN   rY   r   rQ   rR   rb   r   r   r   rd     s    6
z!LoRACompatibleLinear._unfuse_lorarr   r   rs   c                    s8   | j d u rt |}|S t |||  |  }|S r#   )r   r7   rf   )r?   rr   r   outrA   r   r   rf     s
   
zLoRACompatibleLinear.forwardrg   r   )r   rh   ri   r{   r2   r8   r   rN   r   r^   rd   rQ   r~   rf   rj   r   r   rA   r   r     s     (r   r   )rQ   torch.nn.functionalr   
functionalr   utilsr   r   utils.import_utilsr   transformersr   r   
get_loggerr   loggerModuler   r    rN   r.   r%   r2   r   r   r   rn   r   r   r   r   r   <module>   s    
\<@W