o
    8wi                     @   s8   d dl Z d dlZddlmZmZ G dd dejjZdS )    N   )PromptEncoderConfig#PromptEncoderReparameterizationTypec                       s(   e Zd ZdZ fddZdd Z  ZS )PromptEncodera  
    The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.

    Args:
        config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.

    Example:

    ```py
    >>> from peft import PromptEncoder, PromptEncoderConfig

    >>> config = PromptEncoderConfig(
    ...     peft_type="P_TUNING",
    ...     task_type="SEQ_2_SEQ_LM",
    ...     num_virtual_tokens=20,
    ...     token_dim=768,
    ...     num_transformer_submodules=1,
    ...     num_attention_heads=12,
    ...     num_layers=12,
    ...     encoder_reparameterization_type="MLP",
    ...     encoder_hidden_size=768,
    ... )

    >>> prompt_encoder = PromptEncoder(config)
    ```

    **Attributes**:
        - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.
        - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.
        - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and
        `encoder_reparameterization_type="LSTM"`.
        - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.
        - **input_size** (`int`) -- The input size of the prompt encoder.
        - **output_size** (`int`) -- The output size of the prompt encoder.
        - **hidden_size** (`int`) -- The hidden size of the prompt encoder.
        - **total_virtual_tokens** (`int`): The total number of virtual tokens of the
        prompt encoder.
        - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt
          encoder.


    Input shape: (`batch_size`, `total_virtual_tokens`)

    Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
    c                    sn  t    |j| _| j| _| j| _|j| _|j|j | _	|j
| _tj| j	| j| _|js| jtjkrj|j}|j}tjj| j| j||ddd| _tjtj| jd | jd tj tj| jd | j| _d S | jtjkrtj}|j|krtd| jj d| d tj| j| jtj tj| j| jtj tj| j| jg}tjj| | _d S tdd S )NT)
input_sizehidden_size
num_layersdropoutbidirectionalbatch_first   zfor z8, the argument `encoder_num_layers` is ignored. Exactly z MLP layers are used.PPrompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.) super__init__	token_dimr   output_sizeencoder_hidden_sizer   num_virtual_tokensnum_transformer_submodulestotal_virtual_tokensencoder_reparameterization_typeencoder_typetorchnn	Embedding	embeddinginference_moder   LSTMencoder_dropoutencoder_num_layers	lstm_head
SequentialLinearReLUmlp_headMLPr   warningswarnvalue
ValueError)selfconfiglstm_dropoutr   encoder_num_layers_defaultlayers	__class__ W/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/peft/tuners/p_tuning/model.pyr   G   sT   
	

zPromptEncoder.__init__c                 C   sP   |  |}| jtjkr| | |d }|S | jtjkr$| |}|S td)Nr   r   )r   r   r   r   r$   r    r%   r)   )r*   indicesinput_embedsoutput_embedsr1   r1   r2   forwardy   s   

zPromptEncoder.forward)__name__
__module____qualname____doc__r   r6   __classcell__r1   r1   r/   r2   r      s    .2r   )r&   r   r+   r   r   r   Moduler   r1   r1   r1   r2   <module>   s   