o
    Niq                     @  s~   d dl mZ d dlZd dlmZmZ d dlZd dlmZ d dl	m
Z
mZ ddlmZ ddlmZ G d	d
 d
eZdddZdS )    )annotationsN)AnyOptional)is_torchao_available)BaseTunerLayercheck_adapters_to_merge   )
LoraConfig)Linearc                      sL   e Zd ZdZ fddZdd ZddddZdddZd fddZ  Z	S )TorchaoLoraLinearz>LoRA layer implementation for Linear layers using torchao datac                  sB   | ddrt| jj dt j|i | || _|   d S )N	lora_biasFz0 does not support lora_bias yet, set it to False)get
ValueError	__class____name__super__init__get_apply_tensor_subclass_check_dtype_supported)selfr   argskwargsr    L/home/ubuntu/.local/lib/python3.10/site-packages/peft/tuners/lora/torchao.pyr       s
   zTorchaoLoraLinear.__init__c                 C  s^   |   }|j}t|dr|jjjtjks!t|dr+|jjjtjkr-t	t
| j dd S d S )Ntensor_impllayout_tensorz$ only supports int8 weights for now.)get_base_layerweighthasattrr   datadtypetorchint8r   r   typer   )r   
base_layerr   r   r   r   r   *   s   z(TorchaoLoraLinear._check_dtype_supportedFN
safe_mergebooladapter_namesOptional[list[str]]returnNonec           	      C  s   ddl m} t| |}|sd S |   |  }|j}|D ]N}z| }W n ty> } zdt|j	 d}t||d }~ww |rPt
| sPtd| d|| |7 }|`||_|||   ~| j| qd S )Nr   	quantize_Weights of type zI do not support dequantization (yet), which is needed to support merging.z1NaNs detected in the merged weights. The adapter z seems to be broken)torchaor-   r   r   r   r   
dequantizeNotImplementedErrorr$   r   r"   isfiniteallr   get_delta_weightr   merged_adaptersappend)	r   r&   r(   r-   r%   r   active_adapterexcmsgr   r   r   merge8   s6   


zTorchaoLoraLinear.mergec              
   C  s   ddl m} | jstd d S t| jdkri| j }|| j	 vr$q| 
 }|j}z| }W n tyK } zdt|j d}t||d }~ww || |8 }|`||_|||   ~t| jdksd S d S )Nr   r,   z Already unmerged. Nothing to do.r.   zK do not support dequantization (yet), which is needed to support unmerging.)r/   r-   mergedwarningswarnlenr5   poplora_Akeysr   r   r0   r1   r$   r   r4   r   )r   r-   r7   r%   r   r8   r9   r   r   r   unmerge]   s0   


zTorchaoLoraLinear.unmergestrc                   s    t   }|dd| jj S )Nzlora.Linearzlora.)r   __repr__replacer   r   )r   repr   r   r   rD   ~   s   
zTorchaoLoraLinear.__repr__)FN)r&   r'   r(   r)   r*   r+   )r*   r+   )r*   rC   )
r   
__module____qualname____doc__r   r   r:   rB   rD   __classcell__r   r   r   r   r      s    

%!r   targettorch.nn.Moduleadapter_namerC   lora_configr	   r   r   r*   Optional[torch.nn.Module]c                 K  sr   d }t | tr|  }n| }t|ds|S t s|S ddlm} ddlm} t |j	||fr7t
| |fi |}|S )Nr   r   )AffineQuantizedTensor)LinearActivationQuantizedTensor)
isinstancer   r   r   r   torchao.dtypesrP   torchao.quantizationrQ   r   r   )rK   rM   rN   r   
new_moduletarget_base_layerrP   rQ   r   r   r   dispatch_torchao   s   


rW   )
rK   rL   rM   rC   rN   r	   r   r   r*   rO   )
__future__r   r<   typingr   r   r"   peft.import_utilsr   peft.tuners.tuners_utilsr   r   configr	   layerr
   r   rW   r   r   r   r   <module>   s   f