o
    	۷i                     @   s&  d dl Z d dlmZmZmZ d dlZd dlmZ d dlmZm	Z	m
Z
 ddlmZ ddlmZmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ e& rddl,m-Z-m.Z. e'/e0Z1G dd dej2Z3G dd dej2Z4			d4dej5dej6dej6dej6deej6 dee7 de7deej6 fd d!Z8G d"d# d#ej5Z9G d$d% d%eZ:e%G d&d' d'e Z;e%G d(d) d)e;Z<e%d*d+G d,d- d-e;eZ=e%G d.d/ d/e;Z>e%d0d+G d1d2 d2e;Z?g d3Z@dS )5    N)CallableOptionalUnion)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)CacheDynamicCacheEncoderDecoderCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer))BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentions SequenceClassifierOutputWithPastTokenClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringis_torch_flex_attn_availablelogging)deprecate_kwarg   )BioGptConfig)	BlockMaskmake_flex_block_causal_maskc                       sR   e Zd ZdZdedef fddZ		ddejd	ed
eej f fddZ	  Z
S ) BioGptLearnedPositionalEmbeddingzN
    This module learns positional embeddings up to a fixed maximum size.
    num_embeddingsembedding_dimc                    s   d| _ t || j  | d S )N   )offsetsuper__init__)selfr"   r#   	__class__ `/home/ubuntu/vllm_env/lib/python3.10/site-packages/transformers/models/biogpt/modeling_biogpt.pyr'   <   s   z)BioGptLearnedPositionalEmbedding.__init__r   Nattention_maskpast_key_values_lengthposition_idsc                    sL   |du rt j|dd}|| d  }|dd|df }t || j S )z3`input_ids_shape` is expected to be [bsz x seqlen].Nr   dim)torchcumsumlongr&   forwardr%   )r(   r-   r.   r/   r)   r+   r,   r5   B   s
   z(BioGptLearnedPositionalEmbedding.forward)r   N)__name__
__module____qualname____doc__intr'   r2   
LongTensorr   r5   __classcell__r+   r+   r)   r,   r!   7   s    	r!   c                
       sL   e Zd ZdZddedededee f fddZd	ej	f fd
dZ
  ZS )BioGptScaledWordEmbeddingz\
    This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
          ?r"   r#   padding_idxembed_scalec                    s   t  ||| || _d S N)r&   r'   r@   )r(   r"   r#   r?   r@   r)   r+   r,   r'   X   s   
z"BioGptScaledWordEmbedding.__init__	input_idsc                    s   t  || j S rA   )r&   r5   r@   )r(   rB   r)   r+   r,   r5   \   s   z!BioGptScaledWordEmbedding.forward)r>   )r6   r7   r8   r9   r:   r   floatr'   r2   Tensorr5   r<   r+   r+   r)   r,   r=   S   s    $r=           modulequerykeyvaluer-   scalingdropout	head_maskc                 K   s   |d u r| dd }t||dd| }	|d ur|	| }	tjj|	dd}	|d ur5|	|dddd }	tjj|	|| j	d}	t|	|}
|
dd
 }
|
|	fS )N      r$   r   r0   r   ptraining)sizer2   matmul	transposenn
functionalsoftmaxviewrK   rQ   
contiguous)rF   rG   rH   rI   r-   rJ   rK   rL   kwargsattn_weightsattn_outputr+   r+   r,   eager_attention_forward`   s   r]   c                       s   e Zd ZdZ						ddededed	ed
ededee dee f fddZ	e
dddd						ddejdeej dee deej deej dedeej dee deejeej eeej  f fddZ  ZS ) BioGptAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrE   FTN	embed_dim	num_headsrK   
is_decoderbias	is_causalconfig	layer_idxc	           	         s   t    || _|| _|| _|| | _|| _| j| | jkr*td| j d| d| jd | _|| _	|| _
|| _|d u rK| j	rKtd| jj d tj|||d| _tj|||d| _tj|||d| _tj|||d| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).rN   zInstantiating a decoder z without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.rb   )r&   r'   r_   r`   rK   head_dimrd   
ValueErrorrJ   ra   rc   re   loggerwarning_oncer*   r6   rU   Lineark_projv_projq_projout_proj)	r(   r_   r`   rK   ra   rb   rc   rd   re   r)   r+   r,   r'      s0   


zBioGptAttention.__init__past_key_valuepast_key_values4.58new_nameversionhidden_stateskey_value_statesr-   layer_head_maskoutput_attentionscache_positionrZ   returnc                 K   s  |du}	|j dd \}
}|	r|j d n|}|
|d| jf}|
|d| jf}| |j| dd}d}|durNt|trL|j| j	}|	rH|j
}n|j}n|}|	rR|n|}|	rk|durk|rk|j| j	 j}|j| j	 j}n@| |}| |}|j| dd}|j| dd}|dur|	s|nd}|||| j	d|i\}}|	rt|trd|j| j	< t}| jjdkrt| jj }|| ||||f| jsd	n| j| j||d
|\}}||
|d }| |}||fS )z#Input shape: Batch x Time x ChannelNrM   r   r$   Frz   TeagerrE   )rK   rJ   ry   rL   )shaperg   rn   rX   rT   
isinstancer   
is_updatedgetre   cross_attention_cacheself_attention_cachelayerskeysvaluesrl   rm   updater]   rd   _attn_implementationr   rQ   rK   rJ   reshaperY   ro   )r(   rv   rw   rq   r-   rx   ry   rz   rZ   is_cross_attentionbsztgt_lensrc_lenq_input_shapekv_input_shapequery_statesr   curr_past_key_valuecurrent_states
key_statesvalue_statesattention_interfacer\   r[   r+   r+   r,   r5      sb   





zBioGptAttention.forward)rE   FTFNN)NNNNFN)r6   r7   r8   r9   r:   rC   boolr   r   r'   r   r2   rD   r
   r   r   tupler5   r<   r+   r+   r)   r,   r^   ~   sf    	'r^   c                       s   e Zd Zddedee f fddZedddd					
			ddej	deej	 deej	 dee
 dee dee deej deej	 dee deejeeejejf  f fddZ  ZS )BioGptDecoderLayerNrd   re   c              	      s   t    |j| _t| j|j|jdd||d| _|j| _	t
|j | _|j| _t| j| _t| j|j| _t|j| j| _t| j| _d S )NT)r_   r`   rK   ra   rc   rd   re   )r&   r'   hidden_sizer_   r^   num_attention_headsattention_probs_dropout_prob	self_attnhidden_dropout_probrK   r	   
hidden_actactivation_fnactivation_dropoutrU   	LayerNormself_attn_layer_normrk   intermediate_sizefc1fc2final_layer_norm)r(   rd   re   r)   r+   r,   r'      s$   
	zBioGptDecoderLayer.__init__rp   rq   rr   rs   FTrv   r-   rx   ry   	use_cacher/   rz   rZ   r{   c	              
   K   s   |}
|  |}| jd|||||||d|	\}}tjj|| j| jd}|
| }|}
| |}| |}| |}tjj|| j	| jd}| 
|}tjj|| j| jd}|
| }|f}|rb||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            past_key_values (`Cache`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        )rv   rq   r-   rx   ry   r/   rz   rO   Nr+   )r   r   rU   rV   rK   rQ   r   r   r   r   r   )r(   rv   r-   rx   rq   ry   r   r/   rz   rZ   residualself_attn_weightsoutputsr+   r+   r,   r5     s6   







zBioGptDecoderLayer.forwardrA   )NNNFTNN)r6   r7   r8   r   r   r:   r'   r   r2   rD   r
   r   r;   r   r   r   FloatTensorr5   r<   r+   r+   r)   r,   r      s>    	
r   c                   @   s   e Zd ZU eed< dZdZdZdZdZ	dZ
deeejdf  dejdejdefd	d
ZedejdededejdejdefddZdS )BioGptPreTrainedModelrd   biogptTr-   r   input_tensorrz   rq   c                 C   sb  | j jdkr*t|tjrt|}|S |d u r(ttj|jd |jd f|jd}|S | j jdkr>|d ur<|dk	 r<|S d S |d urF|
 nd}|d urO|jnd}| j jdkre|setj|||| jd	red S |j}|jd }|rt| }	nt|tjr|jd
 n|| d }	| j|||	|||jd d}
| j jdkr|d ur|jjdv rt|j}t|
|}
|
S )Nflex_attentionr   r   )rR   deviceflash_attention_2rE   Fsdpa)inputs_embedsr.   is_trainingrM   )sequence_lengthtarget_lengthdtyperz   
batch_size)cudaxpunpu)rd   r   r~   r2   rD   r    onesr}   r   anyget_seq_lengthis_compileabler   _ignore_causal_mask_sdparQ   r   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positiontypefinfomin_unmask_unattended)r(   r-   r   rz   rq   past_seen_tokensusing_compilable_cacher   r   r   causal_mask	min_dtyper+   r+   r,   _update_causal_maske  s`   





z)BioGptPreTrainedModel._update_causal_maskr   r   r   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuer   r   r   )diagonalr   rM   r   )r1   r2   r   r   fullr   triuaranger   expandcloner}   tomasked_fill)r-   r   r   r   rz   r   rZ   r   r   mask_lengthpadding_maskr+   r+   r,   r     s,    $
6  zKBioGptPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_positionN)r6   r7   r8   r   __annotations__base_model_prefixsupports_gradient_checkpointing_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraphr   r   r2   rD   r
   r   staticmethodr:   r   r   r+   r+   r+   r,   r   Y  s>   
 
Lr   c                       s   e Zd Zdef fddZe											ddeej deej	 deej	 deej	 d	ee
 d
ee deej dee dee dee deej dee deeef fddZ  ZS )BioGptModelrd   c                    s   t     | _ j| _ j| _ j| _ j| _	 j
r"t jnd}t j| j| j	|d| _t j| j| _t fddt jD | _t| j| _d| _|   d S )Nr>   )r@   c                    s   g | ]}t  |d qS ))re   )r   ).0ird   r+   r,   
<listcomp>  s    z(BioGptModel.__init__.<locals>.<listcomp>F)r&   r'   rd   	layerdropr   rK   r   r_   pad_token_idr?   scale_embeddingmathsqrtr=   
vocab_sizeembed_tokensr!   max_position_embeddingsembed_positionsrU   
ModuleListrangenum_hidden_layersr   r   
layer_normgradient_checkpointing	post_init)r(   rd   r@   r)   r   r,   r'     s    zBioGptModel.__init__NrB   r-   rL   r   rq   r   r/   ry   output_hidden_statesreturn_dictrz   rZ   r{   c                 K   s:  |d ur|n| j j}|	d ur|	n| j j}	|d ur|n| j j}|
d ur$|
n| j j}
|d u |d uA r4td|d urF|}|j}|d|d }n|d ur^| d d }|d d d d df }ntd|d u rk| 	|}| j
rz| jrz|rztd d}|r|d u rt| j d}|rt|trtd t|}| d d \}}|d ur| nd}|d u rtj||| |jd	}|d u r|| }tj|||jd	}|}| ||||}|d u rtj|d
d}|| d
  }|d d |d f }| j|||d}|| }tjj|| j| jd}| j
r| jr|rtd d}|	r dnd }|r'dnd }d }t| jD ]E\}}|	r<||f7 }| jrMt g }|| j!k rMq0||f||d urZ|| nd |||||d|}|d }|rt||d
 f7 }q0|	r~||f7 }| "|}|
stdd |||||fD S t#|||||dS )NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timerM   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsz[`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...Fr   zPassing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `DynamicCache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.r   r   r   r0   )r/   rO   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...r+   )r-   rx   rq   ry   r   r/   rz   c                 s   s    | ]	}|d ur|V  qd S rA   r+   )r   vr+   r+   r,   	<genexpr>  s    z&BioGptModel.forward.<locals>.<genexpr>)last_hidden_staterq   rv   
attentionscross_attentions)$rd   ry   r   r   use_return_dictrh   r}   rX   rR   r   r   rQ   ri   rj   r   r~   r   from_legacy_cacher   r2   r   r   r   r   r3   r4   r   rU   rV   rK   	enumerater   randr   r   r   )r(   rB   r-   rL   r   rq   r   r/   ry   r   r   rz   rZ   inputinput_shaper   
seq_lengthr.   mask_seq_lengthself_attn_cacher   	positionsrv   all_hidden_statesall_self_attnsall_cross_attentionsidxdecoder_layerdropout_probabilitylayer_outputsr+   r+   r,   r5     s   



	

zBioGptModel.forward)NNNNNNNNNNN)r6   r7   r8   r   r'   r   r   r2   r;   r   r
   r   rD   r   r   r   r   r   r5   r<   r+   r+   r)   r,   r     sR    	

r   zR
    BioGPT Model with a `language modeling` head on top for CLM fine-tuning.
    )custom_introc                        s   e Zd ZdgZ fddZdd Zdd Ze												dd	ee	j
 d
ee	j dee	j dee	j dee dee	j
 dee dee	j
 dee dee dee dee	j dee deeef fddZ  ZS )BioGptForCausalLMzoutput_projection.weightc                    s8   t  | t|| _tj|j|jdd| _| 	  d S NFrf   )
r&   r'   r   r   rU   rk   r   r   output_projectionr   r(   rd   r)   r+   r,   r'     s   
zBioGptForCausalLM.__init__c                 C   s   | j S rA   r  r(   r+   r+   r,   get_output_embeddings  s   z'BioGptForCausalLM.get_output_embeddingsc                 C   s
   || _ d S rA   r  )r(   new_embeddingsr+   r+   r,   set_output_embeddings  s   
z'BioGptForCausalLM.set_output_embeddingsNrB   r-   rL   r   rq   labelsr   r/   ry   r   r   rz   rZ   r{   c                 K   s   |dur|n| j j}| j|f|||||||	|
||d
|}|d }| |}d}|dur;| j||fd| j ji|}|sQ|f|dd  }|durO|f| S |S t|||j|j|j	|j
dS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        N)
r-   rL   r   rq   r   r/   ry   r   r   rz   r   r   r   )losslogitsrq   rv   r   r   )rd   r   r   r  loss_functionr   r   rq   rv   r   r   )r(   rB   r-   rL   r   rq   r  r   r/   ry   r   r   rz   rZ   r   sequence_outputprediction_scoreslm_lossoutputr+   r+   r,   r5     sP   
zBioGptForCausalLM.forward)NNNNNNNNNNNN)r6   r7   r8   _tied_weights_keysr'   r  r  r   r   r2   r;   r   r
   r   rD   r   r   r   r   r   r5   r<   r+   r+   r)   r,   r    s^    		

r  c                        s   e Zd Z fddZe													ddeej deej deej deej dee	 d	eej d
eej dee
 deej dee
 dee
 dee
 deej deeef fddZ  ZS )BioGptForTokenClassificationc                    sj   t  | |j| _t|| _t|dr|jd ur|j}n|j}t	|| _
t|j|j| _|   d S )Nclassifier_dropout)r&   r'   
num_labelsr   r   hasattrr   r   rU   DropoutrK   rk   r   
classifierr   )r(   rd   r   r)   r+   r,   r'     s   
z%BioGptForTokenClassification.__init__NrB   token_type_idsr-   rL   rq   r   r  r   r/   ry   r   r   rz   r{   c                 C   s  |dur|n| j j}| j|||||||	|
|||d}|d }| |}| |}d}|durgt }|durZ|ddk}|d| j}t	||dt
|j|}|||}n||d| j|d}|s}|f|dd  }|dur{|f| S |S t|||j|jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N
rq   r-   rL   r   r   r/   ry   r   r   rz   r   rM   r   r$   )r  r  rv   r   )rd   r   r   rK   r$  r   rX   r!  r2   wheretensorignore_indextype_asr   rv   r   )r(   rB   r%  r-   rL   rq   r   r  r   r/   ry   r   r   rz   transformer_outputsrv   r  r  loss_fctactive_lossactive_logitsactive_labelsr  r+   r+   r,   r5     sJ   

z$BioGptForTokenClassification.forward)NNNNNNNNNNNNN)r6   r7   r8   r'   r   r   r2   r;   r   r
   r   rD   r   r   r   r5   r<   r+   r+   r)   r,   r    sZ    	

r  a  
    The BioGpt Model transformer with a sequence classification head on top (linear layer).

    [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it is required to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    c                        s   e Zd Zdef fddZe													ddeej deej	 deej	 d	ee
 d
eej	 deej dee deej dee dee dee deej deeejf deeef fddZdd Zdd Z  ZS )BioGptForSequenceClassificationrd   c                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r  )
r&   r'   r!  r   r   rU   rk   r   scorer   r  r)   r+   r,   r'   R  s
   
z(BioGptForSequenceClassification.__init__Nr   rB   r-   rL   rq   r   r  r   r/   ry   r   r   rz   logits_to_keepr{   c                 C   s<  |dur|n| j j}| j||||||||	|
||d}|d }t|tr)t| dn|}| |dd|ddf }|durG|jdd \}}n	|jdd \}}| j jdu rYd}n"|durot	
|| j jdd |j}nd}t| jj d |t	j||jd|f }d}|dur| j jdu r| jdkrd	| j _n| jdkr|jt	jks|jt	jkrd
| j _nd| j _| j jd	krt }| jdkr|| | }n+|||}n%| j jd
krt }||d| j|d}n| j jdkrt }|||}|s|f|dd  }|dur|f| S |S t|||j|j|jdS )r&  Nr'  r   r$   rM   r   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   
regressionsingle_label_classificationmulti_label_classification)r  r  rq   rv   r   ) rd   r   r   r~   r:   slicer2  r}   r   r2   nesumr   r   ri   rj   r*   r6   r   problem_typer!  r   r4   r   squeezer   rX   r   r   rq   rv   r   )r(   rB   r-   rL   rq   r   r  r   r/   ry   r   r   rz   r3  r,  rv   slice_indicesr  r   r   pooled_logitsr  r-  r  r+   r+   r,   r5   [  st   $

"


z'BioGptForSequenceClassification.forwardc                 C   s   | j jS rA   r   r   r  r+   r+   r,   get_input_embeddings  s   z4BioGptForSequenceClassification.get_input_embeddingsc                 C   s   || j _d S rA   r>  )r(   rI   r+   r+   r,   set_input_embeddings  s   z4BioGptForSequenceClassification.set_input_embeddings)NNNNNNNNNNNNr   )r6   r7   r8   r   r'   r   r   r2   r;   r   r
   r   rD   r   r:   r   r   r5   r?  r@  r<   r+   r+   r)   r,   r1  C  s^    		

^r1  )r  r  r1  r   r   )NrE   N)Ar   typingr   r   r   r2   torch.nnrU   r   r   r   activationsr	   cache_utilsr
   r   r   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.deprecationr   configuration_biogptr   integrations.flex_attentionr   r    
get_loggerr6   ri   	Embeddingr!   r=   ModulerD   rC   r]   r^   r   r   r   r  r  r1  __all__r+   r+   r+   r,   <module>   s|   

 [  +TTp