o
    NÆÏi*  ã                   @   s@   d dl Z d dlZd dlmZ ddlmZ G dd„ dejjƒZdS )é    N)Úgather_params_ctxé   )ÚPromptTuningInitc                       s(   e Zd ZdZ‡ fdd„Zdd„ Z‡  ZS )ÚPromptEmbeddingaÂ  
    The model to encode virtual tokens into prompt embeddings.

    Args:
        config ([`PromptTuningConfig`]): The configuration of the prompt embedding.
        word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.

    **Attributes**:
        - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.

    Example:

    ```py
    >>> from peft import PromptEmbedding, PromptTuningConfig

    >>> config = PromptTuningConfig(
    ...     peft_type="PROMPT_TUNING",
    ...     task_type="SEQ_2_SEQ_LM",
    ...     num_virtual_tokens=20,
    ...     token_dim=768,
    ...     num_transformer_submodules=1,
    ...     num_attention_heads=12,
    ...     num_layers=12,
    ...     prompt_tuning_init="TEXT",
    ...     prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
    ...     tokenizer_name_or_path="t5-base",
    ... )

    >>> # t5_model.shared is the word embeddings of the base model
    >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)
    ```

    Input Shape: (`batch_size`, `total_virtual_tokens`)

    Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
    c                    sÖ  t ƒ  ¡  |j|j }tj ||j¡| _|j	t
jkr^|js^|j}tjd||ftjd |jj¡}t| ¡ ƒ ||ƒ ¡  ¡ }W d   ƒ n1 sIw   Y  | tj¡}tj |¡| j_d S |j	t
jkrç|jséddlm} |jpqi }| dd ¡ |j|jfi |¤Ž}	|j }
|	|
ƒd }t!|ƒ}||krš|d |… }n||k r©t" #|| ¡}|| }|d |… }t $|¡ |jj¡}t| ¡ ƒ ||ƒ ¡  ¡ }W d   ƒ n1 sÒw   Y  | tj¡}tj |¡| j_d S d S d S )Nr   )Údtype)ÚAutoTokenizerÚtrust_remote_codeÚ	input_ids)%ÚsuperÚ__init__Únum_virtual_tokensÚnum_transformer_submodulesÚtorchÚnnÚ	EmbeddingÚ	token_dimÚ	embeddingÚprompt_tuning_initr   ÚSAMPLE_VOCABÚinference_modeÚnum_embeddingsÚrandintÚlongÚtoÚweightÚdevicer   Ú
parametersÚdetachÚcloneÚfloat32Ú	ParameterÚTEXTÚtransformersr   Útokenizer_kwargsÚpopÚfrom_pretrainedÚtokenizer_name_or_pathÚprompt_tuning_init_textÚlenÚmathÚceilÚ
LongTensor)ÚselfÚconfigÚword_embeddingsÚtotal_virtual_tokensÚ
vocab_sizeÚinit_token_idsÚword_embedding_weightsr   r#   Ú	tokenizerÚ	init_textÚnum_text_tokensÚnum_reps©Ú	__class__© úS/home/ubuntu/.local/lib/python3.10/site-packages/peft/tuners/prompt_tuning/model.pyr   >   sD   
ÿÿ
ÿêzPromptEmbedding.__init__c                 C   s   |   |¡}|S )N)r   )r,   ÚindicesÚprompt_embeddingsr9   r9   r:   Úforwardf   s   
zPromptEmbedding.forward)Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r=   Ú__classcell__r9   r9   r7   r:   r      s    %(r   )	r)   r   Úpeft.utils.integrationsr   r-   r   r   ÚModuler   r9   r9   r9   r:   Ú<module>   s
   