o
    ۷iS                  L   @   s  d dl mZmZmZmZ d dlZd dlmZ d dlZ	ej
j	Zdd Zdd Zdedefd	d
Zejjdddd																									 						 dgdejdejdejdeej deej deej deej deej deej deej deej deej d ee d!ee d"eej d#eej d$eej d%eej d&eej d'eej d(eej d)eej d*eej d+ee d,ed-ed.ed/ed0ed1ed2eej d3ed4ee d5edeejejejejf fFd6d7Zejd																									 						 dgdejdejdejdeej deej deej deej deej deej deej deej deej d ee d!ee d"eej d#eej d$eej d%eej d&eej d'eej d(eej d)eej d*eej d+ee d,ed-ed.ed/ed0ed1ed2eej d3ed4ee d5edeejejejejf fFd8d9Zejjd:d;dd																 dhd<ejdejdejdejd=ejd>ejdeej deej d?eej d@eej d ee d!ee dAeej dBeej dCeej d+ee dDed-ed.ed0edEed5edejf.dFdGZejd:																 dhd<ejdejdejdejd=ejd>ejdeej deej d?eej d@eej d ee d!ee dAeej dBeej dCeej d+ee dDed-ed.ed0edEed5edejf.dHdIZdJdK ZdLdM ZejeedN G dOdP dPejjZG dQdR dRejjZ G dSdT dTejjZ!				U	 				 	didVdWZ"					U	 					 	djdXdYZ#							U	 					 	dkdZd[Z$dld\d]Z%																			U	 				 		 	dmd^eeeejf  d_eej d`eej d"eej deej deej d ee daeej d(eej d)eej d*eej fdbdcZ&ej'dddddd ddUd dd dd fd^ejdeej deej d`eej ddee f
dedfZ(dS )n    )OptionalUnionListTupleNc                 C   s"   | d ur|  ddkr|  S | S )N   )stride
contiguous)x r   G/home/ubuntu/vllm_env/lib/python3.10/site-packages/fa3_fwd_interface.pymaybe_contiguous   s   "r   c                 C   s   | | d | | S )Nr   r   )r
   mr   r   r   round_multiple   s   r   	head_sizereturnc                 C   s   ddl m} |d d s| dkrdS |d d s| dkrdS |d d s*| d	kr*d	S |d d
 s6| dkr6dS |d d sB| dkrBdS dS )Nr   )CONFIGbuild_flagsFLASHATTENTION_DISABLE_HDIM64@   FLASHATTENTION_DISABLE_HDIM96`   FLASHATTENTION_DISABLE_HDIM128   FLASHATTENTION_DISABLE_HDIM192   FLASHATTENTION_DISABLE_HDIM256   )flash_attn_configr   )r   r   r   r   r   round_up_headdim   s"   r   zfa3_fwd::_flash_attn_forwardr   cuda)mutates_argsdevice_typesFr           Tr   qkvk_newv_newqvout_cu_seqlens_qcu_seqlens_kcu_seqlens_k_new	seqused_q	seqused_kmax_seqlen_qmax_seqlen_k
page_tablekv_batch_idx	leftpad_k
rotary_cos
rotary_sinseqlens_rotary	q_descale	k_descale	v_descalesoftmax_scalecausalwindow_size_leftwindow_size_rightattention_chunksoftcaprotary_interleavedscheduler_metadata
num_splitspack_gqa	sm_marginc"           &      C   s  dd | |||fD \} }}}| ddkr!| ddkr!| n|}dd |||	fD \}}}	dd |
|fD \}
}dd |||fD \}}}d	d ||fD \}}t|}tjg | |||||||||	|
|||||||||||||||||||||| |!R  \}"}#}$}%|$d u rtjg |"jd
}$|%d u rtjg |"jd
}%|"|#|$|%fS )Nc                 S      g | ]}t |qS r   r   .0r
   r   r   r   
<listcomp>R       z'_flash_attn_forward.<locals>.<listcomp>r   r   c                 S   rF   r   rG   rH   r   r   r   rJ   T       c                 S   rF   r   rG   rH   r   r   r   rJ   W   rK   c                 S   rF   r   rG   rH   r   r   r   rJ   X   rM   c                 S   rF   r   rG   rH   r   r   r   rJ   [   rK   device)r   r	   r   fa3_fwd_cudafwdtorchtensorrO   )&r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   outsoftmax_lse	out_accumsoftmax_lse_accumr   r   r   _flash_attn_forward-   s   %(	
 !"%rX   c"           /      C   s  |du}"|"r| j \}#}$}%|j d d }&|du rtd|}'n| j \}&}'}$}%|&| j d  }#|j d }(| j})|)tjkr=tj}*n|)}*|durGtd|"rVtj|#|$|(f|*| jd}+ntj|&|'|$|(f|*| jd}+|"rrtj|$|#ftj	| jd},ntj|&|$|'ftj	| jd},|dkrtd||dkr|"rtj||$|#|(ftj	| jd}-tj||$|#ftj	| jd}.n.tj||&|$|'|(ftj	| jd}-tj||&|$|'ftj	| jd}.ntj
g |+jd	}-tj
g |+jd	}.|+|,|-|.fS )
z
    Symbolic fake implementation of flash attention forward.
    Returns tensors with the correct shapes and dtypes without actual computation.
    Nr   r   z9max_seqlen_q must be provided if cu_seqlens_q is providedr   zWTracing (torch.compile/torch.export) with pre-allocated output tensor is not supported.dtyperO   zXtracing (torch.compile/torch.export) with num_splits <= 0 not supported. Got num_splits=rN   )shape
ValueErrorrZ   rR   float8_e4m3fnbfloat16	TypeErroremptyrO   float32rS   )/r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   is_varlen_qtotal_q	num_headsr   
batch_sizeseqlen_qhead_size_vq_type	out_dtyperT   rU   rV   rW   r   r   r   _flash_attn_forward_fake   sB   +

rj   zfa3_fwd::_flash_attn_backward)dqdkdvdoutrT   rU   sequed_qsequed_krk   rl   rm   	is_causaldeterministicc                 C   s^   dd | ||||fD \} }}}}t | ||||||||||||	|
||||||||^}}|S )Nc                 S   rF   r   rG   rH   r   r   r   rJ     rK   z(_flash_attn_backward.<locals>.<listcomp>)rP   bwd)rn   r$   r%   r&   rT   rU   r+   r,   ro   rp   r0   r1   rk   rl   rm   r;   rq   r=   r>   r@   rr   rE   	softmax_drestr   r   r   _flash_attn_backward   s4   "rv   c           +      C   sP  |d u}|d u}|p|p|d up|	d u}|s-| d}| d}| d}|| d }n| dd }| d}|
}|}||d krEd}||d krMd}|rQd}|dk oX|dk}| d}| d}tt||}tj|j} | d d | d  }!|dks|dko| }"|dkr|r|dkrdnd}#n|dkrd}#n|dkr|s|"s|dkrdnd	}#nd}#|dkrdnd}$|d
krdnd}%|!dkr|#}&n|!dks|!dkr|%}&n|$}&|jd }'t||&}(t|||&  |&})|d u rt	|n|}|d u rt	|n|}|d u rt	|n|}|stj
||'|(ftj|jd}*|*S tj
|'|)ftj|jd}*|*S )Nr   r   r   
   r   r#   r   r   P   r       Z   V   Y   rY   )sizer   maxrR   r    get_device_capabilityrO   r[   r   
empty_liker`   ra   )+rn   r$   r%   r&   rT   rU   r+   r,   ro   rp   r0   r1   rk   rl   rm   r;   rq   r=   r>   r@   rr   rE   rb   is_varlen_k	is_varlenre   rf   seqlen_krc   r   rg   head_size_roundedcaparchis_localkBlockM_sm90kBlockM_sm80kBlockM_sm86kBlockMrd   seqlen_q_roundedtotal_q_padded_roundedrt   r   r   r   _flash_attn_backward_fake*  sd   







r   c           	      C   sx   |d d \}}}|\}}}}|  ||||| |d | _|d | _|d |d g| _|d | _|d | _|d | _d S )	N   iiiiiir   )save_for_backwardr;   r<   window_sizer?   r@   rE   )	ctxinputsoutputr$   r%   r&   rT   rU   _r   r   r   setup_context  s   



r   c                 G   s   | j \}}}}}t|t|t|}}	}
t||||||d d d d d d ||	|
| j| j| jd | jd | jd| j ||	|
gdR S )Nr   r   F)NNNNNNNNNNNNNNNNNNNNN)	saved_tensorsrR   r   rv   r;   r<   r   r@   rE   )r   rn   gradsr$   r%   r&   rT   rU   rk   rl   rm   r   r   r   	_backward  s0   "r   )r   c                   @   s6   e Zd Ze								d
ddZedd	 ZdS )FlashAttnQKVPackedFuncNr   r   r   r#   Fc                 C   sV  |d u r|j d d }| dkr$|j d dksJ |jdd\}}}n1| dks,J |d us2J |j d | d }|d | |j d ksHJ |j|||gd	d\}}}t|||d d d d d d d d d d d d d d d d d ||||f||d
 |d ||	|d^}}}| ||||| || _|| _|| _|| _	|	| _
|
| _| | _|| _|r||fS |S )Nr            rL   r   dim      r}   r   r   )r<   r=   r>   r?   r@   rE   )r[   r   unbindsplitrX   r   r;   r<   r   r?   r@   rr   ndimrE   )r   qkvr;   r<   r8   r9   r:   r   r?   r@   rr   num_heads_qrE   return_softmaxr$   r%   r&   num_heads_krT   rU   ru   r   r   r   forward  sT   
zFlashAttnQKVPackedFunc.forwardc                 G   sb  | j \}}}}}| jdksJ d| jdkr<|jd d dg|jdd  R  }tj||j|jd}	|	jdd\}
}}n7|jd	 }|jd	 }|jd d ||d	  g|jd
d  R  }tj||j|jd}	|	j	|||gdd\}
}}t
||||||d d d d d d |
||| j| j| jd | jd | j| j| j |	dd |jd
 f }	|	d d d d d d d d d d d d fS )Nr   -FA3 backward does not support attention_chunkr   r}   r   rY   rL   r   r   r   r   .)r   r?   r   r[   rR   r`   rZ   rO   r   r   rv   r;   r<   r   r@   rr   rE   )r   rn   argsr$   r%   r&   rT   rU   	qkv_shapedqkvrk   rl   rm   r   r   r   r   r   backward  sD   
$

,zFlashAttnQKVPackedFunc.backward)
NNNr   r   r#   FNr   F__name__
__module____qualname__staticmethodr   r   r   r   r   r   r     s    :r   c                   @   :   e Zd Ze										dddZed	d
 ZdS )FlashAttnFuncNr   r   r#   r   Fc                 C   s   |d u r|j d |d ur|j d nd d }t|||d d |d d d d d d d d d d d d d d |||	|f||
d |
d |||||d^}}}| ||||| || _|| _|
| _|| _|| _|| _|| _	|rg||fS |S Nr   r   r   r   )r<   r=   r>   r?   r@   rC   rD   rE   )
r[   rX   r   r;   r<   r   r?   r@   rr   rE   )r   r$   r%   r&   r;   r<   r)   r8   r9   r:   r   r?   r@   rC   rD   rr   rE   r   rT   rU   ru   r   r   r   r     sF   $zFlashAttnFunc.forwardc                 G   s   | j \}}}}}| jdksJ dt|t|t|}}	}
t||||||d d d d d d ||	|
| j| j| jd | jd | j| j	| j
 |dd |jd f }|	dd |jd f }	|
dd |jd f }
||	|
d d d d d d d d d d d d d d fS Nr   r   r   .r   )r   r?   rR   r   rv   r;   r<   r   r@   rr   rE   r[   )r   rn   r   r$   r%   r&   rT   rU   rk   rl   rm   r   r   r   r   T  s8   "&zFlashAttnFunc.backwardNNNNr   r   r#   r   NFr   Fr   r   r   r   r   r     s    7r   c                   @   r   )FlashAttnVarlenFuncNr   r   r#   r   Fc                 C   s   |
d u r|j d |d ur|j d nd d }
t|||d d |d ||d ||||	d d d d d d ||||
f||d |d |||||d^}}}| |||||||||	 || _|	| _|
| _|| _|| _|| _|| _	|| _
|| _|rq||fS |S r   )r[   rX   r   r0   r1   r;   r<   r   r?   r@   rr   rE   )r   r$   r%   r&   r+   r,   r.   r/   r0   r1   r;   r<   r)   r8   r9   r:   r   r?   r@   rC   rD   rr   rE   r   rT   rU   ru   r   r   r   r   v  sR   $zFlashAttnVarlenFunc.forwardc                 G   s  | j \	}}}}}}}	}
}| jdksJ dt|t|t|}}}t||||||||	|
|| j| j|||| j| j| j	d | j	d | j
| j| j |dd |jd f }|dd |jd f }|dd |jd f }|||d d d d d d d d d d d d d d d d d d d d fS r   )r   r?   rR   r   rv   r0   r1   r;   r<   r   r@   rr   rE   r[   )r   rn   r   r$   r%   r&   rT   rU   r+   r,   r.   r/   rk   rl   rm   r   r   r   r     s>   "2zFlashAttnVarlenFunc.backwardr   r   r   r   r   r   r   t  s    Cr   r   c                 C   s"   t | |||||||||	|
||S )a  dropout_p should be set to 0.0 during evaluation
    If Q, K, V are already stacked into 1 tensor, this function will be faster than
    calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
    of the gradients of Q, K, V.
    For multi-query and grouped-query attention (MQA/GQA), please see
    flash_attn_kvpacked_func and flash_attn_func.

    If window_size != (-1, -1), implements sliding window local attention. Query at position i
    will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.

    Arguments:
        qkv: (batch_size, seqlen, 3, nheads, headdim)
        dropout_p: float. Dropout probability.
        softmax_scale: float. The scaling of QK^T before applying softmax.
            Default to 1 / sqrt(headdim).
        causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
        window_size: (left, right). If not (-1, -1), implements sliding window local attention.
        softcap: float. Anything > 0 activates softcapping attention.
        alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
            the attention score of query i and key j.
        deterministic: bool. Whether to use the deterministic implementation of the backward pass,
            which is slightly slower and uses more memory. The forward pass is always deterministic.
        return_attn_probs: bool. Whether to return the attention probabilities. This option is for
           testing only. The returned probabilities are not guaranteed to be correct
           (they might not have the right scaling).
    Return:
        out: (batch_size, seqlen, nheads, headdim).
        softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
            logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
            normalization factor).
        S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
            The output of softmax (possibly with different scaling). It also encodes the dropout
            pattern (negative means that location was dropped, nonnegative means it was kept).
    )r   apply)r   r;   r<   r8   r9   r:   r   r?   r@   rr   r   rE   return_attn_probsr   r   r   flash_attn_qkvpacked_func  s   /r   c                 C   s*   t | |||||||||	|
||||||S )a	  dropout_p should be set to 0.0 during evaluation
    Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
    than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
    For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
    0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.

    If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
    For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
        1 1 1 1 0
        1 1 1 1 1
    If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
        0 0
        0 0
        0 0
        1 0
        1 1
    If the row of the mask is all zero, the output will be zero.

    If window_size != (-1, -1), implements sliding window local attention. Query at position i
    will only attend to keys between
    [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.

    Arguments:
        q: (batch_size, seqlen, nheads, headdim)
        k: (batch_size, seqlen, nheads_k, headdim)
        v: (batch_size, seqlen, nheads_k, headdim)
        dropout_p: float. Dropout probability.
        softmax_scale: float. The scaling of QK^T before applying softmax.
            Default to 1 / sqrt(headdim).
        causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
        window_size: (left, right). If not (-1, -1), implements sliding window local attention.
        alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
            (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
            is added to the attention score of query i and key j.
        deterministic: bool. Whether to use the deterministic implementation of the backward pass,
            which is slightly slower and uses more memory. The forward pass is always deterministic.
        return_attn_probs: bool. Whether to return the attention probabilities. This option is for
           testing only. The returned probabilities are not guaranteed to be correct
           (they might not have the right scaling).
    Return:
        out: (batch_size, seqlen, nheads, headdim).
        softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
            logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
            normalization factor).
    )r   r   )r$   r%   r&   r;   r<   r)   r8   r9   r:   r   r?   r@   rC   rD   rr   rE   r   r   r   r   flash_attn_func  s"   >r   c                 C   s6   t | |||||||||	|
||||||||||||S N)r   r   )r$   r%   r&   r+   r,   r0   r1   r.   r/   r;   r<   r)   r8   r9   r:   r   r?   r@   rC   rD   rr   rE   r   r   r   r   flash_attn_varlen_funcl  s.   r   c                 C   s   t | |||S r   )rP   fwd_combine)out_partiallse_partialrT   ri   r   r   r   flash_attn_combine  s   r   cache_seqlenscache_batch_idxcache_leftpadrotary_seqlensc           !      C   s  | ddksJ d| ddksJ d|du r,| jd |dur'|jd nd d }|durHt|trHtj| jd f|tj|jd}t|}t	| |||||d|d|d||d||	|
|||||||f||d |d |||||||d	
^}}} |r||g| R S |S )
a  
    If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
    k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
    the previous step, and update them with the new keys/values from the current step, and do
    attention with the updated cache, all in 1 kernel.

    If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
    For example, the KV cache could be pre-allocated with the max sequence length, and you can use
    cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.

    Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
    rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
    If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
    and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
    If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
    indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).

    See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.

    Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
    than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
    For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
    0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.

    If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
    For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
        1 1 1 1 0
        1 1 1 1 1
    If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
        0 0
        0 0
        0 0
        1 0
        1 1
    If the row of the mask is all zero, the output will be zero.

    If window_size != (-1, -1), implements sliding window local attention. Query at position i
    will only attend to keys between
    [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.

    Note: Does not support backward pass.

    Arguments:
        q: (batch_size, seqlen, nheads, headdim)
        k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no page_table,
            or (num_blocks, page_block_size, nheads_k, headdim) if there's a page_table (i.e. paged KV cache)
            page_block_size can be arbitrary (e.g, 1, 2, 3, 64, etc.).
        v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim_v) if there's no page_table,
            or (num_blocks, page_block_size, nheads_k, headdim_v) if there's a page_table (i.e. paged KV cache)
        k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
            k with k_cache, starting at the indices specified by cache_seqlens.
        v [optional]: (batch_size, seqlen_new, nheads_k, headdim_v). Similar to k.
        qv [optional]: (batch_size, seqlen, nheads, headdim_v)
        rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
            to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
        rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
        cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
            KV cache.
        cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
            If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
            If the indices are not distinct, and k and v are provided, the values updated in the cache
                 might come from any of the duplicate indices.
        cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
        page_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
        softmax_scale: float. The scaling of QK^T before applying softmax.
            Default to 1 / sqrt(headdim).
        causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
        window_size: (left, right). If not (-1, -1), implements sliding window local attention.
        softcap: float. Anything > 0 activates softcapping attention.
        rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
            If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
            rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
            (i.e. GPT-NeoX style).
        num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
           If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
           to automatically determine the number of splits.
           Don't change this unless you know what you are doing.
        return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.

    Return:
        out: (batch_size, seqlen, nheads, headdim).
        softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
            logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
            normalization factor).
    r   r   z+k_cache must have contiguous last dimensionz+v_cache must have contiguous last dimensionNr   r   rY   )
r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   )
r   r[   
isinstanceintrR   fullint32rO   r   rX   )!r$   k_cachev_cacher%   r&   r)   r5   r6   r   r   r   r2   r+   r-   r0   r   r8   r9   r:   r;   r<   r   r?   r@   rA   rB   rC   rD   rE   return_softmax_lserT   rU   ru   r   r   r   flash_attn_with_kvcache  sZ   u$#r   	page_sizec                 C   sX   t |}|d u r
|}t| |||||||||	d |
d |||||d |d |||||}|S )Nr   r   )r   rP   get_scheduler_metadata)re   r0   r1   r   num_heads_kvheaddimr   	qkv_dtype	headdim_vr+   r-   r   r   max_seqlen_k_newr<   r   r?   has_softcaprC   rD   rE   rB   r   r   r   r   D  s.   r   )NNNNNNNNNNNNNNNNNNNNNFr   r   r   r#   TNr   Nr   )NNNNNNNNNNFr   r   r#   Fr   )NFNNNr   r   r#   FNr   F)NFNNNNr   r   r#   r   NFr   F)NNNFNNNNr   r   r#   r   NFr   F)NN)NNNNNNNNNNNNNNNNNFr   r   r#   TNr   Nr   F))typingr   r   r   r   rR   torch.nnnn
fa3_fwd._Cfa3_fwdopsrP   r   r   r   r   library	custom_opTensorfloatboolrX   register_fakerj   rv   r   r   r   register_autogradautogradFunctionr   r   r   r   r   r   r   r   r^   r   r   r   r   r   <module>   s  	
 !"#
]	
 !"#h	

5	
acZk
B
Y

0	

 (