o
    }oi                     @   s  d dl Z d dlZd dlmZ d dlmZ d dlmZmZm	Z	m
Z
 d dlZd dlZd dlmZmZmZ d dlmZ d dlmZ d dlmZ d d	lmZ d d
lmZ d dlmZmZmZmZmZm Z  d dl!m"Z"m#Z#m$Z$ d dl%m&Z& d dl'm(Z( d dl)m*Z* d dl+m,Z, d dl-m.Z.m/Z/ d dl0m1Z1 d dl2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8 d dl9m:Z: dgZ;G dd de1ee,e.Z<G dd de<e/j=Z>G dd de1e,e.Z?e/@e<du re/jAe<e>d eG dd dZBdS )    N)OrderedDict)	dataclass)ListOptionalSetTuple)
DictConfig
ListConfig	open_dict)nn)CacheAwareStreamingConfig)StreamingEncoder)CausalConv1D)ConformerLayer)LocalAttRelPositionalEncodingMultiHeadAttentionPositionalEncodingRelPositionalEncodingRelPositionMultiHeadAttention'RelPositionMultiHeadAttentionLongformer)ConvSubsamplingStackingSubsamplingSubsamplingReductionModule)adapter_utils)#compute_stochastic_depth_drop_probs)	typecheck)
Exportable)AccessMixinadapter_mixins)NeuralModule)AcousticEncodedRepresentationBoolTypeChannelTypeLengthsType
NeuralTypeSpectrogramType)loggingConformerEncoderc                       s  e Zd ZdZdRddZedd Zedd	 Zed
d Zedd Z	edd Z
edd Z																														 					dSd!ed"ed#ed$ed%ed&ed'ed(ef fd)d*Z	dTd+d,ZdUd-d.Ze 				dVd/d0Z				dVd1d2Zd3efd4d5Zd6d7 Zd8d9 ZdUd:d;Zd<d= Zd>d? Z					@dWdAedBedCedDedEef
dFdGZdejdd fdHdIZ 				dXdJedDe!e dKedLej"fdMdNZ#dOefdPdQZ$  Z%S )Yr'   a  
    The encoder for ASR model of Conformer.
    Based on this paper:
    'Conformer: Convolution-augmented Transformer for Speech Recognition' by Anmol Gulati et al.
    https://arxiv.org/abs/2005.08100

    Args:
        feat_in (int): the size of feature channels
        n_layers (int): number of layers of ConformerBlock
        d_model (int): the hidden size of the model
        feat_out (int): the size of the output features
            Defaults to -1 (means feat_out is d_model)
        subsampling (str): the method of subsampling:
            choices = ['vggnet', 'striding', 'dw-striding', 'stacking', 'stacking_norm']
            Defaults to striding.
        subsampling_factor (int): the subsampling factor which should be power of 2
            Defaults to 4.
        subsampling_conv_chunking_factor(int): optionally, force chunk inputs (helpful for large inputs)
            Should be power of 2, 1 (auto-chunking, default), or -1 (no chunking)
        subsampling_conv_channels (int): the size of the convolutions in the subsampling module
            Defaults to -1 which would set it to d_model.
        reduction (str, Optional): the method of reduction, choices=['pooling', 'striding']. If no value
            is passed, then no reduction is performed and the models runs with the original 4x subsampling.
        reduction_position (int, Optional): the index of the layer to apply reduction. If -1, apply reduction
            at the end.
        reduction_factor (int): the reduction factor which should be either 1 or a power of 2
            Defaults to 1.
        ff_expansion_factor (int): the expansion factor in feed forward layers
            Defaults to 4.
        self_attention_model (str): the type of the attention layer and positional encoding.

            'rel_pos':
                relative positional embedding and Transformer-XL
            'rel_pos_local_attn':
                relative positional embedding and Transformer-XL with local attention using
                overlapping chunks. Attention context is determined by att_context_size parameter.
            'abs_pos':
                absolute positional embedding and Transformer

            Default is rel_pos.
        pos_emb_max_len (int): the maximum length of positional embeddings
            Defaults to 5000
        n_heads (int): number of heads in multi-headed attention layers
            Defaults to 4.
        att_context_size (List[Union[List[int],int]]): specifies the context sizes on each side.
            Each context size should be a list of two integers like `[100, 100]`.
            A list of context sizes like `[[100,100]`, `[100,50]]` can also be passed. -1 means unlimited context.
            Defaults to `[-1, -1]`
        att_context_probs (List[float]): a list of probabilities of each one of the att_context_size
            when a list of them is passed. If not specified, uniform distribution is being used.
            Defaults to None
        att_context_style (str): 'regular' or 'chunked_limited'.
            Defaults to 'regular'
        xscaling (bool): enables scaling the inputs to the multi-headed attention layers by `sqrt(d_model)`.
            Defaults to True.
        untie_biases (bool): whether to not share (untie) the bias weights between layers of Transformer-XL
            Defaults to True.
        conv_kernel_size (int): the size of the convolutions in the convolutional modules
            Defaults to 31.
        conv_norm_type (str): the type of the normalization in the convolutional modules
            Defaults to 'batch_norm'.
        conv_context_size (list): it can be"causal" or a list of two integers
            while `conv_context_size[0]+conv_context_size[1]+1==conv_kernel_size`.
            `None` means `[(conv_kernel_size-1)//2`, `(conv_kernel_size-1)//2]`, and 'causal' means
            `[(conv_kernel_size-1), 0]`.
            Defaults to None.
        conv_dual_mode (bool): specifies if convolution should be dual mode when dual_offline mode is being used.
            When enables, the left half of the convolution kernel would get masked in streaming cases.
            Defaults to False.
        use_bias (bool): Use bias in all Linear and Conv1d layers from each ConformerLayer to improve
            activation flow and stabilize training of huge models.
            Defaults to True.
        dropout (float): the dropout rate used in all layers except the attention layers
            Defaults to 0.1.
        dropout_pre_encoder (float): the dropout rate used before the encoder
            Defaults to 0.1.
        dropout_emb (float): the dropout rate used for the positional embeddings
            Defaults to 0.1.
        dropout_att (float): the dropout rate used for the attention layer
            Defaults to 0.0.
        stochastic_depth_drop_prob (float): if non-zero, will randomly drop
            layers during training. The higher this value, the more often layers
            are dropped. Defaults to 0.0.
        stochastic_depth_mode (str): can be either "linear" or "uniform". If
            set to "uniform", all layers have the same probability of drop. If
            set to "linear", the drop probability grows linearly from 0 for the
            first layer to the desired value for the final layer. Defaults to
            "linear".
        stochastic_depth_start_layer (int): starting layer for stochastic depth.
            All layers before this will never be dropped. Note that drop
            probability will be adjusted accordingly if mode is "linear" when
            start layer is > 1. Defaults to 1.
        global_tokens (int): number of tokens to be used for global attention.
            Only relevant if self_attention_model is 'rel_pos_local_attn'.
            Defaults to 0.
        global_tokens_spacing (int): how far apart the global tokens are
            Defaults to 1.
        global_attn_separate (bool): whether the q, k, v layers used for global tokens should be separate.
            Defaults to False.
        use_pytorch_sdpa (bool): use torch sdpa instead of manual attention.
            Defaults to False.
        use_pytorch_sdpa_backends (list[str]): list of backend names to use in sdpa.
            None or empty list means all backends. e.g. ["MATH"]
            Defaults to None.
        bypass_pre_encode: if True, skip the pre-encoder module and the `audio_signal` should be pre-encoded
            embeddings. The `audio_signal` input supports two formats depending on the `bypass_pre_encode`
            boolean flag. This determines the required format of the input variable `audio_signal`.
            Defaults to `bypass_pre_encode=False`. `bypass_pre_encode=True` is used for the cases
            where frame-level, context-independent embeddings are needed to be saved or reused.
            (e.g., speaker cache in streaming speaker diarization)
        sync_max_audio_length (bool): when true, performs NCCL all_reduce to allocate the same amount of memory for
            positional encoding buffers on all GPUs. Disabling this setting may help with deadlocks in certain
            scenarios such as model parallelism, or generally when this module is not being ran on some GPUs
            as a part of the training step.
          c                 C   s  t |  j}| jro|}| jdur9t| jjtr| jjd }n| jj}t| jjtr1| jjd }n| jj}|| }t	j
|| j||d}t	j|d ||f|t	jd}| j|||d\}	}
}t|||	dd|
dd|g}|S t	j
|| j||d}t	j|d ||f|t	jd}t||g}|S )zs
        Generates input examples for tracing etc.
        Returns:
            A tuple of input examples.
        Nr(   device   r+   dtype)
batch_sizer+   max_dimr   )next
parametersr+   export_cache_supportstreaming_cfg
isinstance
chunk_sizelistpre_encode_cache_sizetorchrandn_feat_inrandintint64get_initial_cache_statetuple	transpose)self	max_batchr0   devwindow_sizer6   r8   input_exampleinput_example_lengthcache_last_channelcache_last_timecache_last_channel_lenall_input_example rK   b/home/ubuntu/.local/lib/python3.10/site-packages/nemo/collections/asr/modules/conformer_encoder.pyrE      s>   


zConformerEncoder.input_examplec                 C   `   t tdt ttdt tdt ddtdt ddttdt ddtt t dddS )*Returns definitions of module input ports.BDTrP   rQ   rP   rR   rQ   ToptionalrQ   rP   rQ   rR   audio_signallengthrG   rH   rI   bypass_pre_encoder   r$   r%   r?   r#   r"   r!   rA   rK   rK   rL   input_types      
zConformerEncoder.input_typesc                 C   rM   )rN   rO   rP   rP   rQ   rR   rQ   TrT   rP   rQ   rQ   rR   rW   r[   r\   rK   rK   rL   input_types_for_export   r^   z'ConformerEncoder.input_types_for_exportc              
   C   P   t tdt ttdt tdt ddtdt ddttdt dddS )+Returns definitions of module output ports.rO   rP   rS   TrT   rV   outputsencoded_lengthscache_last_channel_nextcache_last_time_nextcache_last_channel_next_lenr   r$   r    r?   r#   r"   r\   rK   rK   rL   output_types      
zConformerEncoder.output_typesc              
   C   rb   )rc   rO   rP   r_   TrT   r`   rd   rj   r\   rK   rK   rL   output_types_for_export  rl   z(ConformerEncoder.output_types_for_exportc                 C      | j s	tg dS t S )NrG   rH   rI   r3   setr\   rK   rK   rL   disabled_deployment_input_names     z0ConformerEncoder.disabled_deployment_input_namesc                 C   rn   )N)rg   rh   ri   rp   r\   rK   rK   rL    disabled_deployment_output_names  rs   z1ConformerEncoder.disabled_deployment_output_namesFstridingr,   Nrel_posregularT     
batch_norm皙?        linearr   stochastic_depth_drop_probstochastic_depth_modestochastic_depth_start_layerglobal_tokensglobal_tokens_spacingglobal_attn_separateuse_pytorch_sdpasync_max_audio_lengthc'           -   
      sr  t    || }'|| _|| _|| _|| _|| _|| _|| _|!| _	|#| _
|"| _|$| _|%d u r0g }%|%| _|&| _| j|||||d\| _| _| _| _|rQt|| _nd | _|	dkrZ|}	|r|dkr|dv rtt||||dkrndndd| _nt|||||	|td|d	| _nt||| _|
r|dkr|dkr||k sJ t|
||d
| _|| _nd | _d | _|| _ |s|dkr|| }(t!t"#||(})t!t"#||(}*tj$%|) tj$%|* nd })d }*|| _&|dkrt'|||| j|d| _(n9|dkrt)|dkrt*dt+||||| j|d| _(n|dkr"d })d }*t,|||| jd| _(nt*d| dt- | _.t/|D ]H}+t0d'i d|d|'d|d|!d|"d|#d|d|d|d| jd|d |d!|)d"|*d#| jd$|d%| jd&| j},| j.1|, q3|dkr|| j krt| j || _2|| _ nd | _2|| _ | 3| j& d| _4| 5  d| _6t7t8| j.||| | _9d | _:d S )(N)att_context_styleatt_context_sizeatt_context_probsconv_context_sizeconv_kernel_sizeru   r(   )stackingstacking_normr   TF)subsampling_factorfeat_infeat_outnorm)subsamplingr   r   r   conv_channels subsampling_conv_chunking_factor
activation	is_causal)	reductiond_modelreduction_factorrw   r   dropout_ratemax_lenxscaledropout_rate_embrel_pos_local_attnr   8When using local attention, context size must be set > 0r   r   r   r   r   r   abs_posr   r   r   r   !Not valid self_attention_model: ''!r   d_ffself_attention_modelr   r   r   n_headsr   conv_norm_typer   dropoutdropout_att
pos_bias_u
pos_bias_vr   use_biasr   use_pytorch_sdpa_backendsrK   );super__init__r   n_layersr;   r   r   r   r   r   r   r   r   r   r   _calc_context_sizesatt_context_size_allr   r   r   mathsqrtr   r   
pre_encoder   r   ReLULinearr   reduction_subsamplingreduction_position	_feat_out	Parameterr9   Tensorinitzeros_pos_emb_max_lenr   pos_encmax
ValueErrorr   r   
ModuleListlayersranger   appendout_projset_max_audio_lengthuse_pad_masksetup_streaming_paramsr3   r   lenlayer_drop_probsinterctc_capture_at_layers)-rA   r   r   r   r   causal_downsamplingr   r   r   subsampling_conv_channelsr   r   r   ff_expansion_factorr   r   r   r   r   xscalinguntie_biasesr   r   r   r   r   r   dropout_pre_encoderdropout_embr   r   r   r   r   r   r   r   r   r   r   d_headr   r   ilayer	__class__rK   rL   r   !  s0  
)	








	

zConformerEncoder.__init__c                 C   s   |dur| dd}| dd}| j|||||d}| j|dd}t|dkr)|S |d du rC|d du rC|d	 du rC|d |d fS |d |d |d  dd|d  dd|d	 fS )
z]
        Forward function for model export. Please see `forward()` for more details.
        Nr   r(   ro   F)keep_all_outputs      r,   )r@   forward_internalstreaming_post_processr   )rA   rX   rY   rG   rH   rI   retsrK   rK   rL   forward_for_export  s*   $z#ConformerEncoder.forward_for_exportc                 C   s   t |dkr|d |d dddfS |\}}}}}|dur;| jjdkr;| jjdkr;|dddd| jj dddf }| jjdkr`|rH| jdkr`|ddddd| jjf }tj|| jjd}|||||fS )z
        Post-process the output of the forward function for streaming.

        Args:
            rets: The output of the forward function.
            keep_all_outputs: Whether to keep all outputs.
        r   r   r(   Nrx   r   )r   r4   last_channel_cache_sizevalid_out_lenr   r9   clamp)rA   r   r   encodedencoded_lenrg   rh   ri   rK   rK   rL   r     s    z'ConformerEncoder.streaming_post_processc                 C   s   |s|j d | jkrtd| j d|j d  d|r2|j d | jkr2td| j d|j d  d|rC| j|d| j |jd	 n| j|d|jd	 | j||||||d
S )a  
        Forward function for the ConformerEncoder accepting an audio signal and its corresponding length.
        The `audio_signal` input supports two formats depending on the `bypass_pre_encode` boolean flag.
        This determines the required format of the input variable `audio_signal`:
        (1) bypass_pre_encode = False (default):
            `audio_signal` must be a tensor containing audio features.
            Shape: (batch, self._feat_in, n_frames)
        (2) bypass_pre_encode = True:
            `audio_signal` must be a tensor containing pre-encoded embeddings.
            Shape: (batch, n_frame, self.d_model)
        zFIf bypass_pre_encode is False, audio_signal should have shape (batch, z", n_frame) but got last dimension .ru   zNIf bypass_pre_encode is True, audio_signal should have shape (batch, n_frame, z) but got last dimension r   )
seq_lengthr+   )rG   rH   rI   rZ   )	shaper;   r   r   update_max_seq_lengthsizer   r+   r   )rA   rX   rY   rG   rH   rI   rZ   rK   rK   rL   forward$  s:   zConformerEncoder.forwardc              	   C   s  |du r|j |df|dtj|jd}| jr,t| jdkr,tj	| j| j
dd }n| j}|st|dd}t| jtjrE| |}n2| j||d\}}|tj}| jjdkrw|durw|dd| jjdddf }|| jj jdd	}| jdur|durtd
|d}|dur| jj}	|| jj }
||	 }||	 }t||	 }n|}d}d}	d}| j||	d\}}| j|||||jd\}}|dur|dd|	df }|dur|dd|	df }g }g }tt| j| jD ]\}\}}|}|dur|| }|| }nd}d}|||||||d}|dur&|\}}}| | | | | jrJ|dkrJt!d|k }|r@|d | }n
|| d|  | }| j|krt| j"||d\}}|d}| j||	d\}}| j|||||jd\}}| #t$| ddr| j%du r| j&'di 'dg | _%|| j%v r|}| j(dur| (|}| j)d| t|ddd | j)d| |d q| j(dur| (|}| jdkr| j"||d\}}t|dd}|jtjd}|durtj*|dd}tj*|dd}||||tj||
 |	dfS ||fS )a  
        The `audio_signal` input supports two formats depending on the `bypass_pre_encode` boolean flag.
        This determines the required format of the input variable `audio_signal`:
        (1) bypass_pre_encode = False (default):
            `audio_signal` must be a tensor containing audio features.
            Shape: (batch, self._feat_in, n_frames)
        (2) bypass_pre_encode = True:
            `audio_signal` must be a tensor containing pre-encoded embeddings.
            Shape: (batch, n_frame, self.d_model)

        `bypass_pre_encode=True` is used in cases where frame-level, context-independent embeddings are
        needed to be saved or reused (e.g., speaker cache in streaming speaker diarization).
        Nr   ru   r.   r+   r(   )weightsr   )xlengths)minz4Caching with reduction feature is not supported yet!)r   	cache_len)r   padding_lengthmax_audio_lengthoffsetr+   )r   att_maskpos_embpad_maskrG   rH   r}         ?
model_guidinterctccapture_layersinterctc/layer_output_)nametensorinterctc/layer_length_)r.   )dimr   )+new_fullr   r9   r=   r+   trainingr   r   randomchoicesr   r   r@   r5   r   r   r   tor4   drop_extra_pre_encodedr   r   r   r   cache_drop_sizenegr   _create_masks	enumeratezipr   r   r   randr   is_access_enabledgetattrr   
access_cfggetr   register_accessible_tensorstack)rA   rX   rY   rG   rH   rI   rZ   cur_att_context_sizer   r   cache_keep_sizer   r   rg   r   r   r   rh   lth	drop_probr   original_signalcache_last_channel_curcache_last_time_curshould_drop_lth_audio_signalrK   rK   rL   r   S  s   




	




	


z!ConformerEncoder.forward_internalr   c                 C   sb   | j r#tj r#tj|gtj|d}tjj|tjjjd |	 
 }|| jkr/| | dS dS )z
        Updates the maximum sequence length for the model.

        Args:
            seq_length (int): New maximum sequence length.
            device (torch.device): Device to use for computations.
        r   )opN)r   r9   distributedis_initializedr  float32
all_reduceReduceOpMAXintitemr   r   )rA   r   r+   global_max_lenrK   rK   rL   r     s   	
z&ConformerEncoder.update_max_seq_lengthc                 C   s6   || _ t|  j}t|  j}| j||| dS )z
        Sets maximum input length.
        Pre-calculates internal seq_range mask.

        Args:
            max_audio_length (int): New maximum sequence length.
        N)r   r1   r2   r+   r.   r   	extend_pe)rA   r   r+   r.   rK   rK   rL   r     s   z%ConformerEncoder.set_max_audio_lengthc                 C   s
  | j dkrtjd||tj|d}| jdkr3|d dkr$|j|d  d}|d dkr2|j|d d}ng| jdkr|d dkrN|d dkrM|j|d  d}nL|d d }|d dkra|d | }nd	}tjd|tj|j	d}	tj
|	|d
d}	|	d|	d }
tt|
|t|
d}t||d}nd }tjd||d|dd|dk }|d urtjd||d|dd|dk}||}|d ur|dd|dg}t||dd}|d d d |d |f }t|||j	}| }| }||fS )Nr   r(   r   rx   r   )diagonalchunked_limitedru   '  trunc)rounding_moder*   r   )r   r9   onesboolr   triutrilaranger(  r+   div	unsqueezelogical_andlegeexpandr   repeatr@   r	  )rA   r   r   r   r   r+   r   r6   left_chunks_num	chunk_idxdiff_chunkschunked_limited_maskr   pad_mask_offpad_mask_for_att_maskrK   rK   rL   r    s\   





zConformerEncoder._create_masksc                 C   s   | j }|| _ |S )z
        Enables or disables the pad mask and assign the boolean state `on`.

        Returns:
            mask (bool): The current state of the pad mask.
        )r   )rA   onmaskrK   rK   rL   enable_pad_maskT  s   z ConformerEncoder.enable_pad_maskc           	      C   s  |rZt |}t|d tr|g}t|D ]D\}}t|tr#t |||< |dkrX|d dkrD|d |d d  dkrDtd| d| d|d dk rXt|dkrXtd| dqnd	d	gg}|r|t|t|krmtd
t |}t|dkr{tdndt| gt| }|d urt|trt |}t|t st|tstd|dkr|d dg}n"|d |d  d |krtd| j	 dn|d d |d d g}||d ||fS )Nr   r-  r(   zatt_context_size[z][0] % (att_context_size[z][1] + 1) should be zero!z Right context (att_context_size[z5][1]) can not be unlimited for chunked_limited style!ru   zIThe size of the att_context_probs should be the same as att_context_size.zTThe sum of numbers in att_context_probs should be equal to one to be a distribution.r   zVInvalid conv_context_size! It should be the string 'causal' or a list of two integers.causalzInvalid conv_context_size: !r   )
r7   r5   r(  r  r	   r   r   sumstrr   )	rA   r   r   r   r   r   r   r   att_csrK   rK   rL   r   `  sR   
$


z$ConformerEncoder._calc_context_sizesc                 C   s<   || j vrtd| d| j   |dur|| _|   dS )z
        Sets the default attention context size from `att_context_size` argument.

        Args:
            att_context_size (list): The attention context size to be set.
        zatt_context_size=z5 is not among the list of the supported look-aheads: N)r   r&   warningr   r   )rA   r   rK   rK   rL   set_default_att_context_size  s   
z-ConformerEncoder.set_default_att_context_sizer.  r6   
shift_sizeleft_chunksr   max_contextc           
      C   s  t  }|du r
| j}|dur |dk rtd|d }|| |_n*| jdkr-|d }d|_n| jdkrE|d | j | jd | j  }||_nd|_d}|du r\|d dkrX|d n||_n!|du rx|d dkrj|d n||_t	d|j d n|| |_t
| jd	r| j }nd}t|tr|d | j|  |d | j|  g|_n|d|  |_t|tr|d |d ||j   |d |d ||j   g|_n
|d| |j  |_t|jtr|jd |d  | j d |_n|j| j |_t
| jd
r| j |_nd|_t|jtr%|jd dkr!d|jd d | j  |_nd|_n|j| j |_| j D ]}	t
|	drMt|	trC|j|	_t|	trM|j|	_q1|| _dS )a  
        This function sets the needed values and parameters to perform streaming.
        The configuration would be stored in self.streaming_cfg.
        The streaming configuration is needed to simulate streaming inference.

        Args:
            chunk_size (int): overrides the chunk size
            shift_size (int): overrides the shift size for chunks
            left_chunks (int): overrides the number of left chunks visible to each chunk
            max_context (int): the value used for the cache size of last_channel layers
                               if left context is set to infinity (-1)
                               Defaults to -1 (means feat_out is d_model)
        Nr(   z7chunk_size needs to be a number larger or equal to one.r-  r   rx   z/left_chunks is not set. Setting it to default: r   get_sampling_framesget_streaming_cache_size_max_cache_len)r   r   r   r  r   r   r   r   r&   rK  hasattrr   rP  r5   r7   r   r6   rM  r   rQ  r8   r
  r   modulesr   r   r4   )
rA   r6   rM  rN  r   rO  r4   lookahead_stepssampling_framesmrK   rK   rL   r     s|   





z'ConformerEncoder.setup_streaming_paramsc                 C   s  |d u rt |  j}|dkrtj}ntj}| jd }|t| j|| j	j
| jf||d}|t| j|| j|f||d}|dkr|tjdt|| j	j
|f|tjd}	t|D ]&}
d|d d |
|	|
 d d d f< |	|
 dkrzd|d d |
d d d d f< qTn	tj||tjd}	|||	fS )Nr   r-   )r1   r2   r+   r9   r:   zerosr   r   r   r4   r   r   r<   r   r=   r   )rA   r/   r.   r+   r0   create_tensorlast_time_cache_sizerG   rH   rI   r   rK   rK   rL   r>     sH   

 
z(ConformerEncoder.get_initial_cache_stater   update_configr+   c           	      C   s^  |rt |}n| j}|du r| j}|dkrt|dkrtd|dkr6t| jj| jj| jj	| j
| jjd}n4|dkrNt|| jj| jj| jj	| j
| jjd}n|dkrbt| jj| jj| jj	| j
d	}ntd
| d|durt|j|d}| `|| _|| _|| _| | j	 |  D ]\}}t|tkr|dkrt| jj| jj| jj|d dd| j| jd}n=|dkrt| jj| jj| jj|d |dd| j| jd	}n!|dkrt| jj| jj| jj|d | j| jd}ntd| d|dur|j|d}|j|j dd |`||_||_q|r-t| j || j_|| j_W d   dS 1 s&w   Y  dS dS )a  
        Update the self_attention_model which changes the positional encoding and attention layers.

        Args:
            self_attention_model (str): type of the attention layer and positional encoding

                'rel_pos':
                    relative positional embedding and Transformer-XL

                'rel_pos_local_attn':
                    relative positional embedding and Transformer-XL with local attention using
                    overlapping windows. Attention context is determined by att_context_size parameter.

                'abs_pos':
                    absolute positional embedding and Transformer

                If None is provided, the self_attention_model isn't changed. Defaults to None.
            att_context_size (List[int]): List of 2 ints corresponding to left and right attention context sizes,
                or None to keep as it is. Defaults to None.
            update_config (bool): Whether to update the config or not with the new attention model.
                Defaults to True.
            device (torch.device): If provided, new layers will be moved to the device.
                Defaults to None.
        Nr   r   r   rw   r   r   r   r   r   r   r*   )n_headn_featr   max_cache_lenr   r   r   r   )	r\  r]  r   r^  r   r   r   r   r   )r\  r]  r   r^  r   r   'z|' is not not a valid value for 'self_attention_model', valid values can be from ['rel_pos', 'rel_pos_local_attn', 'abs_pos']F)strict)r7   r   r   r   r   r   _cfgr   r   r   r   r   r   r   r	  r   r   named_modulestyper   r   r   r   r   r   r   r   load_state_dict	self_attn
state_dictr
   )	rA   r   r   r[  r+   new_pos_encr  rW  new_attnrK   rK   rL   change_attention_model4  s    

	

$z'ConformerEncoder.change_attention_modelr   c                 C   s,   t | jdstd dS | jj|d dS )a  
        Update the conv_chunking_factor (int)
        Default is 1 (auto)
        Set it to -1 (disabled) or to a specific value (power of 2) if you OOM in the conv subsampling layers


        Args:
            subsampling_conv_chunking_factor (int)
        'change_subsampling_conv_chunking_factorzPModel pre_encoder doesn't have a change_subsampling_conv_chunking_factor method N)r   )rS  r   r&   inforj  )rA   r   rK   rK   rL   rj    s   

z8ConformerEncoder.change_subsampling_conv_chunking_factor)r(   r)   )#ru   Frv   r,   r(   ru   NNr(   r,   rw   r,   NNrx   TTry   rz   r{   NTr|   r|   r|   r}   r}   r~   r(   r   r(   FFNTNNN)T)NNNF)NNNNr.  )NNTN)&__name__
__module____qualname____doc__rE   propertyr]   ra   rk   rm   rr   rt   floatrI  r(  r2  r   r   r   r   r   r   r   r   r  rE  r   rL  r7   r   r9   r$  r>   r   r+   ri  rj  __classcell__rK   rK   r   rL   r'   >   s    
t*





 !"#$%' M

2
 )
80
k*
 c                       s   e Zd ZdZdedefddZdefddZdde	e defddZ
dee fddZdefddZdee f fddZ  ZS )ConformerEncoderAdapterzLThis class inherits from ConformerEncoder and wraps the adapter mixin class.r  cfgc                 C   s&   |  |}| jD ]}||| qd S N)_update_adapter_cfg_input_dimr   add_adapter)rA   r  ru  conformer_layerrK   rK   rL   rx    s   

z#ConformerEncoderAdapter.add_adapterreturnc                 C   s   t dd | jD S )Nc                 S   s   g | ]}|  qS rK   )is_adapter_available).0ry  rK   rK   rL   
<listcomp>      z@ConformerEncoderAdapter.is_adapter_available.<locals>.<listcomp>)anyr   r\   rK   rK   rL   r{    s   z,ConformerEncoderAdapter.is_adapter_availableNTenabledc                 C   s   | j D ]	}|j||d qd S )N)r  r  )r   set_enabled_adapters)rA   r  r  ry  rK   rK   rL   r    s   
z,ConformerEncoderAdapter.set_enabled_adaptersc                 C   s2   t g }| jD ]	}||  qtt|}|S rv  )rq   r   updateget_enabled_adapterssortedr7   )rA   namesry  rK   rK   rL   r    s
   
z,ConformerEncoderAdapter.get_enabled_adaptersc                 C   s   t j| || jd}|S )N)
module_dim)r   update_adapter_cfg_input_dimr   )rA   ru  rK   rK   rL   rw    s   z5ConformerEncoderAdapter._update_adapter_cfg_input_dimc                    s8   t   }t|dkr| tjtjtjg |  }|S )Nr   )r   get_accepted_adapter_typesr   set_accepted_adapter_typesr   LINEAR_ADAPTER_CLASSPATHMHA_ADAPTER_CLASSPATHRELMHA_ADAPTER_CLASSPATH)rA   typesr   rK   rL   r    s   
z2ConformerEncoderAdapter.get_accepted_adapter_types)NT)rm  rn  ro  rp  rI  dictrx  r2  r{  r   r  r   r  r   rw  r   rc  r  rs  rK   rK   r   rL   rt    s    rt  c                       s`   e Zd ZdZ			ddedee dededef
 fd	d
Z		dde
ejejf fddZ  ZS )#ConformerMultiLayerFeatureExtractorai  
    A wrapper module that extracts features from multiple layers of a ConformerEncoder,
    by reusing existing mechanisim for interctc loss.
    To use it, set `layer_idx_list` to  specify the indices of layers to extract from.
    Also, you can specify an `aggretator` module to aggregate the features from different layers,
    default not aggregating.
    NFencoderlayer_idx_list
aggregatordetachconvert_to_cpuc                    s|   t    || _dd |D | _| jD ]}|dk s |t|jkr.td| dt|j dqd| ji||d| _|| _d S )	Nc                 S   s   g | ]}t |qS rK   )r(  )r|  lyr_idxrK   rK   rL   r}  	  r~  z@ConformerMultiLayerFeatureExtractor.__init__.<locals>.<listcomp>r   zlayer index z out of range [0, )r   )r   r  r  )	r   r   r  r  r   r   r   enc_access_cfgr  )rA   r  r  r  r  r  r   r   rK   rL   r     s   


z,ConformerMultiLayerFeatureExtractor.__init__rz  c              	   C   st  | j t| dd d}| j| jt| dd d | jdt| dd d | j|||||d}i }| | j D ]}	|	D ]}
|
drL|
|v rLt	d|
 dq9|
|	 q5g }g }| jD ]@}z|d	|  }|d
|  }W n ty{   t	d| dw t|dkst|dkrt	d||d  ||d  qZ| j  | j|t| dd d | jd ur| ||S ||fS )Nr   )guidT)access_enabledr  )rX   rY   rG   rH   rI   z	interctc/zlayer z  has been logged multiple times!r   r  zIntermediate layer zS was not captured! Check the layer index and the number of ConformerEncoder layers.r(   z4Make sure encoder.forward is called exactly one timer   )r  r  update_access_cfgr  set_access_enabledr  get_module_registryvalues
startswithRuntimeErrorr  r  KeyErrorr   r   reset_registryr  )rA   rX   rY   rG   rH   rI   old_access_flagr  total_registrymodule_registrykeyencoded_listencoded_len_list	layer_idxlayer_outputslayer_lengthsrK   rK   rL   r     sJ   	



z+ConformerMultiLayerFeatureExtractor.forward)NFFrl  )rm  rn  ro  rp  r'   r   r(  r   r2  r   r   r9   r   r   rs  rK   rK   r   rL   r    s(    r  )
base_classadapter_classc                   @   s6   e Zd ZU dZdZee ed< dZee	e
  ed< dS )ConformerChangeConfiga  
    Change self_attention_model for Conformer.

    Options:
     'rel_pos': relative positional embedding and Transformer-XL
     'rel_pos_local_attn': relative positional embedding and Transformer-XL with local attention using
      overlapping chunks. Attention context is determined by att_context_size parameter.
     'abs_pos': absolute positional embedding and Transformer
    Nr   r   )rm  rn  ro  rp  r   r   rI  __annotations__r   r   r(  rK   rK   rK   rL   r  M  s   
 r  )Cr   r  collectionsr   dataclassesr   typingr   r   r   r   r9   torch.distributed	omegaconfr   r	   r
   r   #nemo.collections.asr.models.configsr   +nemo.collections.asr.parts.mixins.streamingr   2nemo.collections.asr.parts.submodules.causal_convsr   7nemo.collections.asr.parts.submodules.conformer_modulesr   :nemo.collections.asr.parts.submodules.multi_head_attentionr   r   r   r   r   r   1nemo.collections.asr.parts.submodules.subsamplingr   r   r    nemo.collections.asr.parts.utilsr   5nemo.collections.asr.parts.utils.regularization_utilsr   nemo.core.classes.commonr   nemo.core.classes.exportabler   nemo.core.classes.mixinsr   r   nemo.core.classes.moduler   nemo.core.neural_typesr    r!   r"   r#   r$   r%   
nemo.utilsr&   __all__r'   AdapterModuleMixinrt  r  get_registered_adapterregister_adapterr  rK   rK   rK   rL   <module>   sP              -S