o
    wi,                     @   sp  d Z ddlmZmZ ddlZddlmZ ddlmZm	Z	 ddl
mZmZ e r1ddlmZmZmZ e	eZG d	d
 d
Z	d%dejdejdejdejfddZeejef Z					d&dejdee deeeef  dee ddf
ddZdejdedejfddZ			d'dejjdejdejdejdeejdf d ee d!ee d"eej deejejf fd#d$Z dS )(a7  
Partially inspired by torchtune's flex attention implementation

Citation:
@software{torchtune,
  title = {torchtune: PyTorch's finetuning library},
  author = {torchtune maintainers and contributors},
  url = {https//github.com/pytorch/torchtune},
  license = {BSD-3-Clause},
  month = apr,
  year = {2024}
}
    )OptionalUnionN)version   )is_torch_flex_attn_availablelogging)_torch_versionis_torchdynamo_compiling)	BlockMaskcreate_block_maskflex_attentionc                       sJ   e Zd ZdZdZdZdZ fddZej	j
dddd Zd	d
 Z  ZS )WrappedFlexAttentionzh
    We are doing a singleton class so that flex attention is compiled once when it's first called.
    NFc                    s   | j d u rt | | _ | j S N)	_instancesuper__new__)clsargskwargs	__class__ e/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/integrations/flex_attention.pyr   6   s   
zWrappedFlexAttention.__new__)	recursivec                 C   sX   | j r|| jkr*|| _ttjdkr|rtjtddd| _	ntt| _	d| _ dS dS )z>
        Initialize or update the singleton instance.
        z2.6.0Fzmax-autotune-no-cudagraphs)dynamicmodeTN)
_is_flex_compiledtrainingr   parser   base_versiontorchcompiler   _compiled_flex_attention)selfr   r   r   r   __init__<   s   

zWrappedFlexAttention.__init__c                 C   s   | j S r   )r"   )r#   r   r   r   __call__N   s   zWrappedFlexAttention.__call__)__name__
__module____qualname____doc__r   r   r"   r   r    compilerdisabler$   r%   __classcell__r   r   r   r   r   -   s    
r   Fquerykeyvaluereturnc                 K   s(   t  st| nt}|| ||fi |S r   )r	   r   r   )r-   r.   r/   r   r   flex_attention_compiledr   r   r   compile_friendly_flex_attentionR   s   	r2   Tattention_mask_2dattention_chunk_sizeoffsets	is_causalr
   c              	      s    j \}}|s	|}|s|}tjjj dd|fd  j}  |dur2 ddd |  fddfdd	}	 fd
d}
|sL|
n|du rRn|	|duri|d |d fdd}n}t	||d|||ddS )aG  
    IMPORTANT NOTICE: This function is deprecated in favor of using the mask primitives in `masking_utils.py`,
    and will be removed in a future version without warnings. New code should not use it. It is only kept here
    for BC for now, while models using it are being patched accordingly.

    Create a block (causal) document mask for a batch of sequences, both packed and unpacked.
    Create Block (causal) logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`.
    The resultant BlockMask is a compressed representation of the full (causal) block
    mask. BlockMask is essential for performant computation of flex attention.
    See: https://pytorch.org/blog/flexattention/

    Args:
        attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences
        of shape (batch_size, total_seq_len). e.g.

        For unpacked sequence:
        [[1, 1, 1, 1, 0, 0, 0],
         [1, 1, 1, 1, 1, 0, 0]]

        For packed sequence:
        [[1, 1, 1, 2, 2, 2, 0],
         [1, 1, 2, 2, 2, 3, 3]]

    Returns:
        BlockMask
    r   )r/   padN   c                    s@   ||k}| |f | |f k} | |f dk}||@ |@ }|S )z
        Defines the logic of a block causal mask by combining both a standard causal mask
        and a block diagonal document mask.
        See :func:`~torchtune.modules.attention_utils.create_block_causal_mask`
        for an illustration.
        r   r   )	batch_idxhead_idxq_idxkv_idxcausal_maskdocument_maskpadding_mask
final_maskr3   document_idsr   r   causal_mask_mod   s
   z4make_flex_block_causal_mask.<locals>.causal_mask_modc                    s.   | |f | |f k} | |||}||@ S )zU
        Combines the chunk mask with the causal mask for chunked attention.
        r   )r:   r;   r<   r=   
chunk_maskcausal_doc_mask)rD   
chunk_idxsr   r   chunk_causal_mask_mod   s   z:make_flex_block_causal_mask.<locals>.chunk_causal_mask_modc                    s4   | |f | |f k} | |f dk}||@ }|S )zp
        Utilizes default attention mask to enable encoder and encoder-decoder
        attention masks.
        r   r   )r:   r;   r<   r=   r?   r@   rA   rB   r   r   default_mask_mod   s   z5make_flex_block_causal_mask.<locals>.default_mask_modc                    s   | }|  }| |||S r   r   )r:   r;   r<   r=   offset_q	offset_kv)	kv_offsetmask_mod_maybe_combinedq_offsetr   r   mask_mod   s   z-make_flex_block_causal_mask.<locals>.mask_modT)rO   BHQ_LENKV_LENdevice_compile)
shaper    nn
functionalr7   rT   clonefill_cumsumr   )r3   r4   query_length
key_lengthr5   r6   
batch_sizetotal_seq_lenrT   rH   rI   rO   r   )r3   rD   rG   rC   rL   rM   rN   r   make_flex_block_causal_maskh   s<   
"r`   hidden_statesn_repc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r8   N)rV   expandreshape)ra   rb   batchnum_key_value_headsslenhead_dimr   r   r   	repeat_kv   s
   0ri   moduleattention_maskscalingsoftcap	head_maskc                    s,   d ur	t d |dddkrtdd }	d t|tr!|}	n|d ur:d d d d d d d |jd f  fdd}
d	}|jd
 }||d
 @ dksmt||jd
 |jd
  }t||jd
 |jd
  }d}|dd }t||||
|	|||d	| j	d
\}}|
|j}|d
d }||fS )Nzm`flex_attention` does not support `head_mask`. Please set your attention to `eager` if you want this feature.dropoutg        r   z`flex_attention` does not support `dropout`. Please use it with inference only (`model.eval()`) or turn off the attention dropout in the respective config.c                    s^   d urt |   } d ur| | d | |  }  d ur-|  | | d d  } | S )Nr   )r    tanh)scorer:   r;   r<   r=   rn   
score_maskrm   r   r   	score_mod  s   z)flex_attention_forward.<locals>.score_modTr8   Fkernel_options)ru   
block_mask
enable_gqascalerv   
return_lser   r   )loggerwarning_onceget
ValueError
isinstancer
   rV   ri   r2   r   todtype	transpose
contiguous)rj   r-   r.   r/   rk   rl   rm   rn   r   rw   ru   rx   num_local_query_headsrv   attn_outputattention_weightsr   rs   r   flex_attention_forward   sL   
&	

r   )F)NNNNT)NNN)!r)   typingr   r   r    	packagingr   utilsr   r   utils.import_utilsr   r	   !torch.nn.attention.flex_attentionr
   r   r   
get_loggerr&   r{   r   Tensorr2   intOffsettupleboolr`   ri   rW   Modulefloatr   r   r   r   r   <module>   sx    
)

o
