o
    }oi,!                     @   sn   d dl mZmZmZ d dlZd dlmZ d dlmZ d dl	m
Z
 d dlmZmZ d dlmZ G dd dZdS )	    )	MagicMockcallpatchN)	TrainerFn)fn)PEFTWrappedAdapterIO)AsyncFinalizableCheckpointIOc                   @   sr   e Zd ZG dd deZG dd dejejZ	dd Z
dd Zd	d
 Zdd Zeddd Zdd Zdd ZdS )TestPEFTc                       s&   e Zd ZdddZ fddZ  ZS )zTestPEFT.DummyPEFTNc                 C   s   |S )N )selfmodulenameprefixr   r   _/home/ubuntu/.local/lib/python3.10/site-packages/tests/lightning/pytorch/callbacks/test_peft.py	transform   s   zTestPEFT.DummyPEFT.transformc                    s   t  | d| _|S )NT)superfreeze_model	is_called)r   r   	__class__r   r   r      s   zTestPEFT.DummyPEFT.freeze_model)NN)__name__
__module____qualname__r   r   __classcell__r   r   r   r   	DummyPEFT   s    
r   c                       s   e Zd Z fddZ  ZS )zTestPEFT.DummyModelc                    s,   t    tdd| _tddd| _d S )N
      )r   __init__nnLinearlinearConv2dconv)r   r   r   r   r   %   s   
zTestPEFT.DummyModel.__init__)r   r   r   r   r   r   r   r   r   
DummyModel$   s    r$   c                 C   s\   |   }|  }||}t|dr|jdksJ d|jjjdks#J |jjjdks,J d S )Nr   Tz@peft methods may subclass `freeze_model()`, so it must be calledF)r$   r   hasattrr   r!   weightrequires_gradr#   )r   modelpefttransformed_modelr   r   r   test_peft_call*   s   zTestPEFT.test_peft_callc                 C   s   ddl m} dD ]b}tjdd|d}||}d| v }|r#|s"J n|r'J |  D ]$\}}|| v s?J d| dt|| | sQJ d| d	q-|  D ]\}}|| v rcqX|d
v siJ qXqd S )Nr   )LinearAdapter)TFr   )biasr-   Key  not found in LinearAdapter diff. val in LinearAdapterzlora_a.weightzlora_b.weight)nemo.collections.llm.peft.lorar,   r   r    
state_dictitemstorchequal)r   r,   has_biasr!   linear_adapterbias_in_state_dictkeyvalr   r   r   test_linear_adapter6   s"   
&zTestPEFT.test_linear_adapterc           	      C   s  ddl m} ddlm} tdd}|| }||}| D ]$\}}|| v s2J d| dt	|| | sDJ d| dq |  D ]\}}||v rTqK|dv sZJ qK| }d	D ]+}t
||spJ d
| d| d|v sJ d
| dt||jjdksJ dqad S )Nr   )deepcopy)patch_linear_moduler   r.   r/   r0   r1   )lora_alora_bz	Expected z to be in modulez.weightz to be in state dictTzExpected {key} to require_grad)copyr=   r2   r>   r   r    r3   r4   r5   r6   r%   getattrr&   r'   )	r   r=   r>   r!   
state_initr8   r:   r;   r3   r   r   r    test_linear_adapter_monkey_patchL   s$   &z)TestPEFT.test_linear_adapter_monkey_patchc                 C   sn   |   }t }t }||_|||d t|jjtsJ t|jjjts'J |jd us.J |j	du s5J d S )NfitT)
r   r   model_transformsetup
isinstancestrategy_checkpoint_ior	   r   _needs_to_call)r   r)   trainer	pl_moduler   r   r   test_peft_setupd   s   zTestPEFT.test_peft_setupz-nemo.lightning.pytorch.callbacks.peft.loggingc                 C   s   |   }t }t }||_tj|j_|||d |jd us J |jdu s'J t |_	d|j	_
ddi|j	j_||| |jjtdtdtdgdd	 |jjd
ksUJ |jjjddidd |jj  |jj| d S )NrE   T
dummy_pathdummy_statedummy_valuez Loading adapters from dummy_pathzInitializing model parallelzSetting up optimizers)	any_orderr   F)strict)r   r   rF   r   FITTINGstater   rG   rK   
wrapped_ioadapter_ckpt_pathload_checkpointreturn_valueon_train_epoch_startinfoassert_has_callsr   
call_countrI   load_model_state_dictassert_called_once_withinit_model_parallelassert_called_oncesetup_optimizers)r   mock_loggingr)   rL   rM   r   r   r   +test_peft_on_train_epoch_start_with_adapterq   s.   

z4TestPEFT.test_peft_on_train_epoch_start_with_adapterc                 C   s   |   }|  }t }||_|jd |jd || ddh}t|ds,J d|j	|ks<J d| d|j	 |
 D ] \}}||j	v rU|jsTJ d	| d
q@|jr`J d	| dq@d S )NFTzlinear.weightzlinear.biasparams_to_saveparams_to_save not setExpected trainable params 
, but got 
Parameter  should require gradients should not require gradients)r$   r   r   lightning_moduler#   requires_grad_r!   set_params_to_saver%   re   named_parametersr'   r   r(   r)   rL   expected_trainabler   paramr   r   r   test_params_to_save   s"   

zTestPEFT.test_params_to_savec                 C   s   |   }td|_|  }t }||_|  || h d}t	|ds*J d|j
|ks:J d| d|j
 | D ] \}}||j
v rS|jsRJ d| dq>|jr^J d| d	q>d S )
N   >   bn.running_varbn.running_meanbn.num_batches_trackedre   rf   rg   rh   ri   rj   rk   )r$   r   BatchNorm2dbnr   r   rl   freezern   r%   re   ro   r'   rp   r   r   r   test_params_to_save_batchnorm   s"   

z&TestPEFT.test_params_to_save_batchnormN)r   r   r   r   r   r   Moduler   FNMixinr$   r+   r<   rD   rN   r   rd   rs   r{   r   r   r   r   r
      s    	
"r
   )unittest.mockr   r   r   r5   torch.nnr    lightning.pytorch.trainer.statesr   nemo.collections.llmr   %nemo.lightning.pytorch.callbacks.peftr   r   !nemo.utils.callbacks.dist_ckpt_ior	   r
   r   r   r   r   <module>   s   