o
    wi                     @   s\  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZmZmZmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z'm(Z( ddl)m*Z* e(+e,Z-G dd dej.Z/dd Z0dBddZ1dej2de3dej2fddZ4	dCdej.d ej2d!ej2d"ej2d#eej2 d$e5d%e5fd&d'Z6G d(d) d)ej.Z7ed*G d+d, d,ej.Z8G d-d. d.eZ9e&G d/d0 d0e!Z:G d1d2 d2ej.Z;e&G d3d4 d4e:Z<G d5d6 d6ee%Z=e&G d7d8 d8e:eZ>e&d9d:G d;d< d<e:Z?e&G d=d> d>e:Z@e&G d?d@ d@e:ZAg dAZBdS )D    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPastQuestionAnsweringModelOutput SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )Qwen2Configc                       s$   e Zd Z fddZdd Z  ZS )Qwen2MLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)super__init__confighidden_sizeintermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnselfr&   	__class__ e/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/qwen2/modeling_qwen2.pyr%   %   s   
zQwen2MLP.__init__c                 C   s$   |  | | || | }|S N)r,   r.   r*   r+   )r0   xr,   r3   r3   r4   forward/   s    zQwen2MLP.forward)__name__
__module____qualname__r%   r7   __classcell__r3   r3   r1   r4   r    $   s    
r    c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)r6   x1x2r3   r3   r4   rotate_half4   s   rE   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerE   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr3   r3   r4   apply_rotary_pos_emb;   s
   

rO   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r@   expandreshape)rP   rQ   batchnum_key_value_headsslenhead_dimr3   r3   r4   	repeat_kvV   s
   0rY           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr=   r   r<   )r?   dtype)ptrainingr   )rY   num_key_value_groupsrA   matmul	transposer@   r   
functionalsoftmaxfloat32torc   ra   re   
contiguous)r[   r\   r]   r^   r_   r`   ra   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr3   r3   r4   eager_attention_forwardb   s   
&rt   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej dee de	eje
ej e
e	ej  f fddZ  ZS )Qwen2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperr&   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j| j |jdd| _|j| dkro|j| _d S d | _d S )NrX   g      Tr"   Fsliding_attention)r$   r%   r&   rv   getattrr'   num_attention_headsrX   rV   rf   r`   attention_dropout	is_causalr   r)   q_projk_projv_projo_projlayer_typessliding_windowr0   r&   rv   r1   r3   r4   r%      s   
$zQwen2Attention.__init__NrP   position_embeddingsr_   past_key_valuecache_positionrn   rR   c                 K   s(  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jsqdn| j| j| jd|\}}|jg |dR   }| |}||fS )Nr<   r   r=   )rJ   rI   r   eagerrZ   )ra   r`   r   )r@   rX   r|   viewrh   r}   r~   rO   updaterv   rt   r&   _attn_implementationr   re   rz   r`   r   rT   rm   r   )r0   rP   r   r_   r   r   rn   input_shapehidden_shapequery_statesro   rp   rI   rJ   cache_kwargsattention_interfacers   rq   r3   r3   r4   r7      s:   		

zQwen2Attention.forward)NN)r8   r9   r:   __doc__r   intr%   rA   Tensortupler   r   
LongTensorr   r   r7   r;   r3   r3   r1   r4   ru   |   s(    ru   RMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	Qwen2RMSNormư>c                    s&   t    tt|| _|| _dS )z;
        Qwen2RMSNorm is equivalent to T5LayerNorm
        N)r$   r%   r   	ParameterrA   onesweightvariance_epsilon)r0   r'   epsr1   r3   r4   r%      s   

zQwen2RMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr=   r<   T)keepdim)	rc   rl   rA   rk   powmeanrsqrtr   r   )r0   rP   input_dtypevariancer3   r3   r4   r7      s
   zQwen2RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r   r@   r   r0   r3   r3   r4   
extra_repr   s   zQwen2RMSNorm.extra_repr)r   )r8   r9   r:   r%   r7   r   r;   r3   r3   r1   r4   r      s    r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )Qwen2DecoderLayerr&   rv   c                    s^   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| | _d S )N)r&   rv   r   )r$   r%   r'   ru   	self_attnr    mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   attention_typer   r1   r3   r4   r%      s   

zQwen2DecoderLayer.__init__NFrP   r_   rK   r   output_attentions	use_cacher   r   rn   rR   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)rP   r_   rK   r   r   r   r   r   r3   )r   r   r   r   )r0   rP   r_   rK   r   r   r   r   r   rn   residualself_attn_weightsoutputsr3   r3   r4   r7      s.   
	



zQwen2DecoderLayer.forward)NNNFFNN)r8   r9   r:   r   r   r%   rA   r   r   r   r   boolr   r   r   FloatTensorr7   r;   r3   r3   r1   r4   r      s<    	
r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )Qwen2PreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )NrZ   )r   stdg      ?)r&   initializer_range
isinstancer   r)   r   datanormal_r#   zero_	Embeddingpadding_idxr   fill_)r0   r[   r   r3   r3   r4   _init_weights  s   


z"Qwen2PreTrainedModel._init_weightsN)r8   r9   r:   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r3   r3   r3   r4   r     s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )Qwen2RotaryEmbeddingNr&   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r$   r%   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr&   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r0   r&   devicer   r1   r3   r4   r%   %  s   
zQwen2RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r<   r   mpscpuF)device_typeenabledr=   r>   )rc   )r   floatrS   r@   rl   r   r   r   strrA   autocastrh   rB   rI   r   rJ   rc   )
r0   r6   rK   inv_freq_expandedposition_ids_expandedr   freqsembrI   rJ   r3   r3   r4   r7   6  s   0&zQwen2RotaryEmbedding.forwardr5   )
r8   r9   r:   r   r%   rA   no_gradr   r7   r;   r3   r3   r1   r4   r   $  s
    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )
Qwen2Modelr&   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _d| jjv | _|   d S )Nc                    s   g | ]}t  |qS r3   )r   ).0rv   r&   r3   r4   
<listcomp>O  s    z'Qwen2Model.__init__.<locals>.<listcomp>r   r   Frw   )r$   r%   pad_token_idr   
vocab_sizer   r   r'   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointingr&   r   has_sliding_layers	post_initr/   r1   r   r4   r%   H  s   zQwen2Model.__init__c                 C      | j S r5   r   r   r3   r3   r4   get_input_embeddingsY     zQwen2Model.get_input_embeddingsc                 C   
   || _ d S r5   r   r0   r^   r3   r3   r4   set_input_embeddings\     
zQwen2Model.set_input_embeddingsN	input_idsr_   rK   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsrR   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}t	| }ts| j |||	||d}d	tdi |i}| jrtdi ||d
< |}| ||}|rdnd }|rdnd }| jd | j j D ])}|r||f7 }||f||j |||||	|d|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   )r&   input_embedsr_   r   r   rK   full_attentionrw   r3   )r_   rK   r   r   r   r   r   )last_hidden_stater   rP   
attentions)r&   r   r  r   
ValueErrorr   re   loggerwarning_oncer   r   r   r   r	   get_seq_lengthrA   aranger@   r   rF   dictr   r   r   r   r   r   r   r   r   )r0   r  r_   rK   r   r  r   r   r  r   r  past_seen_tokenscausal_mask_mappingmask_kwargsrP   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr3   r3   r4   r7   _  s   



	


zQwen2Model.forward	NNNNNNNNN)r8   r9   r:   r   r%   r   r   r   r   r   rA   r   r   r   r   r   r   r   r   r7   r;   r3   r3   r1   r4   r   F  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r8   r9   r:   r3   r3   r3   r4   r    s    r  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&Qwen2ForCausalLMzlm_head.weightlm_headcolwise_reprP   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S r!   )
r$   r%   r   r   r   r   r)   r'   r  r   r/   r1   r3   r4   r%     s
   
zQwen2ForCausalLM.__init__c                 C      | j jS r5   r   r   r   r3   r3   r4   r        z%Qwen2ForCausalLM.get_input_embeddingsc                 C      || j _d S r5   r  r   r3   r3   r4   r        z%Qwen2ForCausalLM.set_input_embeddingsc                 C   r   r5   r  r   r3   r3   r4   get_output_embeddings  r   z&Qwen2ForCausalLM.get_output_embeddingsc                 C   r   r5   r#  )r0   new_embeddingsr3   r3   r4   set_output_embeddings  r  z&Qwen2ForCausalLM.set_output_embeddingsc                 C   r   r5   r   )r0   decoderr3   r3   r4   set_decoder  r  zQwen2ForCausalLM.set_decoderc                 C   r   r5   r'  r   r3   r3   r4   get_decoder  r   zQwen2ForCausalLM.get_decoderNr   r  r_   rK   r   r  labelsr   r   r  r   logits_to_keeprn   rR   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )at  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Qwen2ForCausalLM

        >>> model = Qwen2ForCausalLM.from_pretrained("meta-qwen2/Qwen2-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-qwen2/Qwen2-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r  r_   rK   r   r  r   r   r  r   )r  r+  r   lossr  r   rP   r
  r3   )r&   r   r  r   r	  r   r   slicer  loss_functionr   r   r   rP   r
  )r0   r  r_   rK   r   r  r+  r   r   r  r   r,  rn   r   rP   slice_indicesr  r.  r3   r3   r4   r7     s:   '
zQwen2ForCausalLM.forward)NNNNNNNNNNr   )r8   r9   r:   _tied_weights_keys_tp_plan_pp_planr%   r   r   r$  r&  r)  r*  r   r   r   rA   r   r   r   r   r   r   r   r   r  r   r7   r;   r3   r3   r1   r4   r    sf    		
r  a  
    The Qwen2 Model transformer with a sequence classification head on top (linear layer).

    [`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )Qwen2ForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r!   )
r$   r%   
num_labelsr   r   r   r)   r'   scorer   r/   r1   r3   r4   r%   N  s
   
z'Qwen2ForSequenceClassification.__init__c                 C   r  r5   r  r   r3   r3   r4   r   W  r   z3Qwen2ForSequenceClassification.get_input_embeddingsc                 C   r!  r5   r  r   r3   r3   r4   r   Z  r"  z3Qwen2ForSequenceClassification.set_input_embeddingsNr  r_   rK   r   r  r+  r   r   r  rR   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r_   rK   r   r  r   r   r  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r<   )r   rc   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )r  r+  pooled_logitsr&   r-  )r   r	  r9  r@   r&   r   r  rl   r   rA   int32r  argmaxr  r  r2   r8   r0  r   r   rP   r
  )r0   r  r_   rK   r   r  r+  r   r   r  transformer_outputsrP   r  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesr<  r.  r3   r3   r4   r7   ]  sL   


z&Qwen2ForSequenceClassification.forwardr  )r8   r9   r:   r%   r   r   r   r   r   rA   r   r   r   r   r   r   r7   r;   r3   r3   r1   r4   r7  ?  sH    		
r7  c                       r6  )Qwen2ForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r$   r%   r8  r   r   rx   rE  rF  r   Dropoutra   r)   r'   r9  r   )r0   r&   rE  r1   r3   r4   r%     s   
z$Qwen2ForTokenClassification.__init__c                 C   r  r5   r  r   r3   r3   r4   r     r   z0Qwen2ForTokenClassification.get_input_embeddingsc                 C   r!  r5   r  r   r3   r3   r4   r     r"  z0Qwen2ForTokenClassification.set_input_embeddingsNr  r_   rK   r   r  r+  r   r   r  rR   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r:  r;  N)r.  r  rP   r
  )	r   r	  ra   r9  r0  r&   r   rP   r
  )r0   r  r_   rK   r   r  r+  r   r   r  r   sequence_outputr  r.  r3   r3   r4   r7     s,   


z#Qwen2ForTokenClassification.forwardr  )r8   r9   r:   r%   r   r   r   r   r   rA   r   r   r   r   r   r   r7   r;   r3   r3   r1   r4   rD    sH    	
rD  c                       s   e Zd ZdZ fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e
j de	e
j de	e de	e defddZ  ZS )Qwen2ForQuestionAnsweringtransformerc                    s2   t  | t|| _t|jd| _|   d S )Nr=   )	r$   r%   r   rJ  r   r)   r'   
qa_outputsr   r/   r1   r3   r4   r%     s   
z"Qwen2ForQuestionAnswering.__init__c                 C   r  r5   rJ  r   r   r3   r3   r4   r     r   z.Qwen2ForQuestionAnswering.get_input_embeddingsc                 C   r!  r5   rL  r   r3   r3   r4   r     r"  z.Qwen2ForQuestionAnswering.set_input_embeddingsNr  r_   rK   r   r  start_positionsend_positionsr   r  rR   c
              	   K   s   | j |||||||	d}|j}| |}|jddd\}}|d }|d }d }|d urA|d urA| j||||fi |
}t||||j|j	dS )N)r_   rK   r   r  r   r  r   r<   r>   )r.  start_logits
end_logitsrP   r
  )
rJ  r	  rK  splitsqueezerm   r0  r   rP   r
  )r0   r  r_   rK   r   r  rM  rN  r   r  rn   r   rH  r  rO  rP  r.  r3   r3   r4   r7     s0   

z!Qwen2ForQuestionAnswering.forwardr  )r8   r9   r:   r   r%   r   r   r   r   r   rA   r   r   r   r   r   r   r7   r;   r3   r3   r1   r4   rI    sJ    	
rI  )r   r   r  r7  rD  rI  )Nr   )rZ   )Ctypingr   r   r   rA   r   activationsr   cache_utilsr   r	   
generationr
   integrationsr   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_qwen2r   
get_loggerr8   r  Moduler    rE   rO   r   r   rY   r   rt   ru   r   r   r   r   r   r  r  r7  rD  rI  __all__r3   r3   r3   r4   <module>   sv   


?6" 	lVF>