o
    is                     @   s  d dl mZmZ d dlZd dlmZ d dlZejjZ	dd Z
		 						 d,d	d
Z				 d-ddZG dd dejjZG dd dejjZG dd dejjZ					 				 d.ddZ						 					 d/ddZ								 					 d0ddZd1ddZ																				 				 		 	d2deeeejf  deej deej deej d eej d!eej d"ee d#eej d$eej d%eej d&eej fd'd(Zejdddddd ddd dd dd fdejd eej d!eej deej d)ee f
d*d+ZdS )3    )OptionalUnionNc                 C   s"   | d ur|  ddkr|  S | S )N   )stride
contiguous)x r	   Q/home/ubuntu/vllm_env/lib/python3.10/site-packages/hopper/flash_attn_interface.pymaybe_contiguous   s   "r   r   r           Tr   c!           #      C   s^  dd | |||fD \} }}}| ddkr!| ddkr!| n|}dd |||	fD \}}}	dd |
|fD \}
}dd |||fD \}}}d	d ||fD \}}t|}tjg | |||||||||	|
|||||||||||||||d
 |d ||||||| R  ^}}!}"||!g|"R S )Nc                 S      g | ]}t |qS r	   r   .0r   r	   r	   r
   
<listcomp>6       z'_flash_attn_forward.<locals>.<listcomp>r   r   c                 S   r   r	   r   r   r	   r	   r
   r   8       c                 S   r   r	   r   r   r	   r	   r
   r   ;   r   c                 S   r   r	   r   r   r	   r	   r
   r   <   r   c                 S   r   r	   r   r   r	   r	   r
   r   ?   r   r   )r   r   r   flash_attn_3_cudafwd)#qkvk_newv_newqvoutcu_seqlens_qcu_seqlens_kcu_seqlens_k_new	seqused_q	seqused_kmax_seqlen_qmax_seqlen_k
page_tablekv_batch_idx	leftpad_k
rotary_cos
rotary_sinseqlens_rotary	q_descale	k_descale	v_descalesoftmax_scalecausalwindow_sizeattention_chunksoftcaprotary_interleavedscheduler_metadata
num_splitspack_gqa	sm_marginsoftmax_lserestr	   r	   r
   _flash_attn_forward   s   "(	
 !"$r;   Fc                 C   st   dd | ||||fD \} }}}}t | ||||||||||||	|
||||d |d |||^}}}}}||||fS )Nc                 S   r   r	   r   r   r	   r	   r
   r      r   z(_flash_attn_backward.<locals>.<listcomp>r   r   )r   bwd)doutr   r   r   r   r9   r   r    sequed_qsequed_kr$   r%   dqdkdvr/   r0   r1   r3   deterministicr8   	softmax_dr:   r	   r	   r
   _flash_attn_backwardh   s4   "rE   c                   @   s4   e Zd Ze							d
ddZedd	 ZdS )FlashAttnQKVPackedFuncNr   r   r   Fc                 C   s@  |d u r|j d d }| dkr$|j d dksJ |jdd\}}}n1| dks,J |d us2J |j d | d }|d | |j d ksHJ |j|||gd	d\}}}t|||d d d d d d d d d d d d d d d d d ||||f||||	|d
^}}}| ||||| || _|| _|| _|| _	|	| _
|
| _| | _|| _|S )Nr            r      dim      )r0   r1   r2   r3   r8   )shaperK   unbindsplitr;   save_for_backwardr/   r0   r1   r2   r3   rC   ndimr8   )ctxqkvr/   r0   r,   r-   r.   r1   r2   r3   rC   num_heads_qr8   r   r   r   num_heads_kr   r9   r:   r	   r	   r
   forward   sR   
zFlashAttnQKVPackedFunc.forwardc                 G   sT  | j \}}}}}| jdksJ d| jdkr<|jd d dg|jdd  R  }tj||j|jd}	|	jdd\}
}}n7|jd	 }|jd	 }|jd d ||d	  g|jd
d  R  }tj||j|jd}	|	j	|||gdd\}
}}t
||||||d d d d d d |
||| j| j| j| j| j| j |	dd |jd
 f }	|	d d d d d d d d d d d fS )Nr   -FA3 backward does not support attention_chunkrH   rN   rI   dtypedevicer   rJ   rM   r   .)saved_tensorsr2   rS   rO   torchemptyr[   r\   rP   rQ   rE   r/   r0   r1   r3   rC   r8   )rT   r=   argsr   r   r   r   r9   	qkv_shapedqkvr@   rA   rB   rV   rW   r	   r	   r
   backward   sB   
$

,zFlashAttnQKVPackedFunc.backward)	NNNr   r   r   FNr   __name__
__module____qualname__staticmethodrX   rc   r	   r	   r	   r
   rF      s    9rF   c                   @   8   e Zd Ze									dddZed	d
 ZdS )FlashAttnFuncNr   r   r   r   Fc                 C   s   |d u r|j d |d ur|j d nd d }t|||d d |d d d d d d d d d d d d d d |||	|f||
|||||d^}}}| ||||| || _|| _|
| _|| _|| _|| _|| _	|S Nr   r   rG   )r0   r1   r2   r3   r6   r7   r8   )
rO   r;   rR   r/   r0   r1   r2   r3   rC   r8   )rT   r   r   r   r/   r0   r   r,   r-   r.   r1   r2   r3   r6   r7   rC   r8   r   r9   r:   r	   r	   r
   rX      sD   $zFlashAttnFunc.forwardc                 G   s   | j \}}}}}| jdksJ dt|t|t|}}	}
t||||||d d d d d d ||	|
| j| j| j| j| j	| j
 |dd |jd f }|	dd |jd f }	|
dd |jd f }
||	|
d d d d d d d d d d d d d d fS Nr   rY   .r   )r]   r2   r^   
empty_likerE   r/   r0   r1   r3   rC   r8   rO   )rT   r=   r`   r   r   r   r   r9   r@   rA   rB   r	   r	   r
   rc   5  s6   "&zFlashAttnFunc.backwardNNNNr   r   r   r   NFr   rd   r	   r	   r	   r
   rj      s    5rj   c                   @   ri   )FlashAttnVarlenFuncNr   r   r   r   Fc                 C   s   |
d u r|j d |d ur|j d nd d }
t|||d d |d ||d ||||	d d d d d d ||||
f|||||||d^}}}| |||||||||	 || _|	| _|
| _|| _|| _|| _|| _	|| _
|| _|S rk   )rO   r;   rR   r$   r%   r/   r0   r1   r2   r3   rC   r8   )rT   r   r   r   r   r    r"   r#   r$   r%   r/   r0   r   r,   r-   r.   r1   r2   r3   r6   r7   rC   r8   r   r9   r:   r	   r	   r
   rX   V  sP   $zFlashAttnVarlenFunc.forwardc                 G   s   | j \	}}}}}}}	}
}| jdksJ dt|t|t|}}}t||||||||	|
|| j| j|||| j| j| j	| j
| j| j |dd |jd f }|dd |jd f }|dd |jd f }|||d d d d d d d d d d d d d d d d d d d d fS rl   )r]   r2   r^   rm   rE   r$   r%   r/   r0   r1   r3   rC   r8   rO   )rT   r=   r`   r   r   r   r   r9   r   r    r"   r#   r@   rA   rB   r	   r	   r
   rc     s<   "2zFlashAttnVarlenFunc.backwardrn   rd   r	   r	   r	   r
   ro   T  s    Aro   c                 C   s    t | |||||||||	|
|S )a  dropout_p should be set to 0.0 during evaluation
    If Q, K, V are already stacked into 1 tensor, this function will be faster than
    calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
    of the gradients of Q, K, V.
    For multi-query and grouped-query attention (MQA/GQA), please see
    flash_attn_kvpacked_func and flash_attn_func.

    If window_size != (-1, -1), implements sliding window local attention. Query at position i
    will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.

    Arguments:
        qkv: (batch_size, seqlen, 3, nheads, headdim)
        dropout_p: float. Dropout probability.
        softmax_scale: float. The scaling of QK^T before applying softmax.
            Default to 1 / sqrt(headdim).
        causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
        window_size: (left, right). If not (-1, -1), implements sliding window local attention.
        softcap: float. Anything > 0 activates softcapping attention.
        alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
            the attention score of query i and key j.
        deterministic: bool. Whether to use the deterministic implementation of the backward pass,
            which is slightly slower and uses more memory. The forward pass is always deterministic.
        return_attn_probs: bool. Whether to return the attention probabilities. This option is for
           testing only. The returned probabilities are not guaranteed to be correct
           (they might not have the right scaling).
    Return:
        out: (batch_size, seqlen, nheads, headdim).
        softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
            logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
            normalization factor).
        S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
            The output of softmax (possibly with different scaling). It also encodes the dropout
            pattern (negative means that location was dropped, nonnegative means it was kept).
    )rF   apply)rU   r/   r0   r,   r-   r.   r1   r2   r3   rC   rV   r8   r	   r	   r
   flash_attn_qkvpacked_func  s   .rq   c                 C   s(   t | |||||||||	|
|||||S )a	  dropout_p should be set to 0.0 during evaluation
    Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
    than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
    For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
    0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.

    If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
    For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
        1 1 1 1 0
        1 1 1 1 1
    If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
        0 0
        0 0
        0 0
        1 0
        1 1
    If the row of the mask is all zero, the output will be zero.

    If window_size != (-1, -1), implements sliding window local attention. Query at position i
    will only attend to keys between
    [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.

    Arguments:
        q: (batch_size, seqlen, nheads, headdim)
        k: (batch_size, seqlen, nheads_k, headdim)
        v: (batch_size, seqlen, nheads_k, headdim)
        dropout_p: float. Dropout probability.
        softmax_scale: float. The scaling of QK^T before applying softmax.
            Default to 1 / sqrt(headdim).
        causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
        window_size: (left, right). If not (-1, -1), implements sliding window local attention.
        alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
            (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
            is added to the attention score of query i and key j.
        deterministic: bool. Whether to use the deterministic implementation of the backward pass,
            which is slightly slower and uses more memory. The forward pass is always deterministic.
        return_attn_probs: bool. Whether to return the attention probabilities. This option is for
           testing only. The returned probabilities are not guaranteed to be correct
           (they might not have the right scaling).
    Return:
        out: (batch_size, seqlen, nheads, headdim).
        softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
            logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
            normalization factor).
    )rj   rp   )r   r   r   r/   r0   r   r,   r-   r.   r1   r2   r3   r6   r7   rC   r8   r	   r	   r
   flash_attn_func  s    =rr   c                 C   s4   t | |||||||||	|
|||||||||||S N)ro   rp   )r   r   r   r   r    r$   r%   r"   r#   r/   r0   r   r,   r-   r.   r1   r2   r3   r6   r7   rC   r8   r	   r	   r
   flash_attn_varlen_funcE  s,   rt   c                 C   s   t | |||S rs   )r   fwd_combine)out_partiallse_partialr   	out_dtyper	   r	   r
   flash_attn_combines  s   ry   cache_seqlenscache_batch_idxcache_leftpadr&   r   r!   r$   rotary_seqlensr,   r-   r.   c           !      C   s   | ddksJ d| ddksJ d|du r,| jd |dur'|jd nd d }|durHt|trHtj|jd f|tj|jd}t|}t	| |||||d|d|d||d||	|
|||||||f|||||||||d		^}}} |r{||g| R S |S )
a  
    If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
    k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
    the previous step, and update them with the new keys/values from the current step, and do
    attention with the updated cache, all in 1 kernel.

    If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
    For example, the KV cache could be pre-allocated with the max sequence length, and you can use
    cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.

    Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
    rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
    If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
    and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
    If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
    indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).

    See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.

    Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
    than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
    For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
    0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.

    If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
    For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
        1 1 1 1 0
        1 1 1 1 1
    If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
        0 0
        0 0
        0 0
        1 0
        1 1
    If the row of the mask is all zero, the output will be zero.

    If window_size != (-1, -1), implements sliding window local attention. Query at position i
    will only attend to keys between
    [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.

    Note: Does not support backward pass.

    Arguments:
        q: (batch_size, seqlen, nheads, headdim)
        k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no page_table,
            or (num_blocks, page_block_size, nheads_k, headdim) if there's a page_table (i.e. paged KV cache)
            page_block_size can be arbitrary (e.g, 1, 2, 3, 64, etc.).
        v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim_v) if there's no page_table,
            or (num_blocks, page_block_size, nheads_k, headdim_v) if there's a page_table (i.e. paged KV cache)
        k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
            k with k_cache, starting at the indices specified by cache_seqlens.
        v [optional]: (batch_size, seqlen_new, nheads_k, headdim_v). Similar to k.
        qv [optional]: (batch_size, seqlen, nheads, headdim_v)
        rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
            to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
        rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
        cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
            KV cache.
        cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
            If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
            If the indices are not distinct, and k and v are provided, the values updated in the cache
                 might come from any of the duplicate indices.
        cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
        page_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
        softmax_scale: float. The scaling of QK^T before applying softmax.
            Default to 1 / sqrt(headdim).
        causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
        window_size: (left, right). If not (-1, -1), implements sliding window local attention.
        softcap: float. Anything > 0 activates softcapping attention.
        rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
            If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
            rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
            (i.e. GPT-NeoX style).
        num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
           If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
           to automatically determine the number of splits.
           Don't change this unless you know what you are doing.
        return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.

    Return:
        out: (batch_size, seqlen, nheads, headdim).
        softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
            logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
            normalization factor).
    r   r   z+k_cache must have contiguous last dimensionz+v_cache must have contiguous last dimensionNr   rG   rZ   )	r0   r1   r2   r3   r4   r5   r6   r7   r8   )
r   rO   
isinstanceintr^   fullint32r\   r   r;   )!r   k_cachev_cacher   r   r   r)   r*   rz   r{   r|   r&   r   r!   r$   r}   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   return_softmax_lser   r9   r:   r	   r	   r
   flash_attn_with_kvcachew  sX   u$"r   	page_sizec                 C   sX   t |}|d u r
|}t| |||||||||	d |
d |||||d |d |||||}|S )Nr   r   )r   r   get_scheduler_metadata)
batch_sizer$   r%   rV   num_heads_kvheaddimrz   	qkv_dtype	headdim_vr   r!   r|   r   max_seqlen_k_newr0   r1   r2   has_softcapr6   r7   r8   r5   r	   r	   r
   r     s.   r   )r   r   r   TNr   Nr   )r   r   Fr   )NFNNNr   r   r   FNr   )NFNNNNr   r   r   r   NFr   )NNNFNNNNr   r   r   r   NFr   )NN)NNNNNNNNNNNNNNNNNFr   r   r   TNr   Nr   F)typingr   r   r^   torch.nnnnflash_attn_3._Cflash_attn_3opsr   r   r;   rE   autogradFunctionrF   rj   ro   rq   rr   rt   ry   r   Tensorr   bfloat16r   r	   r	   r	   r
   <module>   s  
f
4aWh
@
W

.	

 '