o
    ei~                     @   s  d dl Z d dlmZ d dlmZ d dlZd dlmZ ddlmZ	 ddl
mZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZmZ ddlmZmZ ddlmZ ddlmZmZm Z m!Z!m"Z" ddl#m$Z$ ddl%m&Z& ddl'm(Z( ddl)m*Z*m+Z+ ee G dd deZ,ee ddG dd deZ-ee ddG dd deZ.edG d d! d!ej/Z0G d"d# d#ej/Z1G d$d% d%ej/Z2	&dJd'ej/d(ej3d)ej3d*ej3d+ej3dB d,e4d-e4fd.d/Z5G d0d1 d1ej/Z6G d2d3 d3ej/Z7G d4d5 d5eZ8G d6d7 d7ej/Z9G d8d9 d9ej/Z:G d:d; d;ej;Z<G d<d= d=eZ=d>ej3d?e>fd@dAZ?G dBdC dCe=Z@e dDdG dEdF dFe=ZAe G dGdH dHe=eZBg dIZCdS )K    N)Callable)	dataclass)nn   )initialization)ACT2FN)Cache)GenerationMixin)use_kernel_forward_from_hub)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPastBaseModelOutputWithPooling)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)ModelOutputTransformersKwargsauto_docstringcan_return_tupletorch_compilable_check)merge_with_config_defaults)capture_outputs   )	AutoModel   )Ovis2ConfigOvis2VisionConfigc                   @   $   e Zd ZU dZdZejdB ed< dS )*BaseModelOutputWithVisualIndicatorFeaturesz
    visual_indicator_features (`torch.FloatTensor` of shape `(batch_size, visual_indicator_size)`):
        Visual indicator features extracted from the model, which can be used for auxiliary tasks or further processing.
    Nvisual_indicator_features)__name__
__module____qualname____doc__r    torchFloatTensor__annotations__ r(   r(   f/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/ovis2/modeling_ovis2.pyr   ,   s   
 r   zJ
    Base class for Llava outputs, with hidden states and attentions.
    custom_introc                   @   r   )Ovis2ModelOutputWithPasta  
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    image_hidden_states (`torch.FloatTensor`, *optional*):
        A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
        image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
    Nimage_hidden_states)r!   r"   r#   r$   r-   r%   r&   r'   r(   r(   r(   r)   r,   7   s   
 r,   zQ
    Base class for Ovis2 causal language model (or autoregressive) outputs.
    c                   @   s   e Zd ZU dZdZejdB ed< dZejdB ed< dZ	e
dB ed< dZeej dB ed< dZeej dB ed< dZejdB ed< dS )	Ovis2CausalLMOutputWithPastaA  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    image_hidden_states (`torch.FloatTensor`, *optional*):
        A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
        image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
    Nlosslogitspast_key_valueshidden_states
attentionsr-   )r!   r"   r#   r$   r/   r%   r&   r'   r0   r1   r   r2   tupler3   r-   r(   r(   r(   r)   r.   L   s   
 r.   RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )Ovis2RMSNormư>epsreturnNc                    s&   t    tt|| _|| _dS )z;
        Ovis2RMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parameterr%   onesweightvariance_epsilon)selfhidden_sizer8   	__class__r(   r)   r;   l   s   

zOvis2RMSNorm.__init__r2   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   Tkeepdim)	dtypetor%   float32powmeanrsqrtr?   r>   )r@   r2   input_dtypevariancer(   r(   r)   forwardt   s
   zOvis2RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r4   r>   shaper?   r@   r(   r(   r)   
extra_repr{   s   zOvis2RMSNorm.extra_repr)r7   )
r!   r"   r#   floatr;   r%   TensorrO   rR   __classcell__r(   r(   rB   r)   r6   j   s    r6   c                       $   e Zd Z fddZdd Z  ZS )Ovis2VisionMLPc                    x   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S Nbiasr:   r;   configrA   intermediate_sizer   Linearmlp_bias	gate_projup_proj	down_projr   
hidden_actact_fnr@   r]   rB   r(   r)   r;         
zOvis2VisionMLP.__init__c                 C   $   |  | | || | }|S Nrc   re   ra   rb   r@   xrc   r(   r(   r)   rO          zOvis2VisionMLP.forwardr!   r"   r#   r;   rO   rU   r(   r(   rB   r)   rW          
rW   c                       s8   e Zd Zdef fddZdejdejfddZ  Z	S )Ovis2VisionEmbeddingsr]   c                    s   t    || _|j| _|j| _|j| _tj|j	| j| j| jdd| _
| j| j d | _| j| _t| j| j| _| jdt| jddd t|j|j| _d S )Nvalid)in_channelsout_channelskernel_sizestridepaddingr   position_idsr   rD   F)
persistent)r:   r;   r]   rA   	embed_dim
image_size
patch_sizer   Conv2dnum_channelspatch_embeddingnum_patchesnum_positions	Embeddingposition_embeddingregister_bufferr%   arangeexpandr6   rms_norm_epsrms_normrf   rB   r(   r)   r;      s"   
zOvis2VisionEmbeddings.__init__pixel_valuesr9   c                 C   sL   | j jj}|  |j|d}|ddd}| |}|| | j }|S )NrG   r   r   )	r   r>   rG   rH   flatten	transposer   r   rw   )r@   r   target_dtypepatch_embeds
embeddingsr(   r(   r)   rO      s   

zOvis2VisionEmbeddings.forward)
r!   r"   r#   r   r;   r%   r&   rT   rO   rU   r(   r(   rB   r)   rp      s    rp           modulequerykeyvalueattention_maskscalingdropoutc           
      K   s|   t ||dd| }|d ur|| }tjj|dt jd|j}tjj	||| j
d}t ||}	|	dd }	|	|fS )NrD   )dimrG   )ptrainingr   r   )r%   matmulr   r   
functionalsoftmaxrI   rH   rG   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputr(   r(   r)   eager_attention_forward   s   
r   c                
       sR   e Zd ZdZ fddZ	d
dejdejdB deejejdB f fdd	Z  Z	S )Ovis2VisionAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _d| _tj| j| j|jd| _tj| j| j|jd| _tj| j| j|jd| _tj| j| j|jd| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      FrZ   )r:   r;   r]   rA   rz   num_attention_heads	num_headshead_dim
ValueErrorscaleattention_dropoutr   	is_causalr   r_   qkv_biask_projv_projq_projout_projrf   rB   r(   r)   r;      s$   

zOvis2VisionAttention.__init__Nr2   r   r9   c              
   K   s   |j \}}}| |}| |}| |}	|||| j| jdd}|||| j| jdd}|	||| j| jdd}	t	| j
jt}
|
| |||	|| j| j| jsVdn| jd\}}|||| }| |}||fS )z#Input shape: Batch x Time x Channelr   r   r   )r   r   r   )rP   r   r   r   viewr   r   r   r   get_interfacer]   _attn_implementationr   r   r   r   r   reshaper   r   )r@   r2   r   r   
batch_size
seq_lengthrz   querieskeysvaluesattention_interfacer   r   r(   r(   r)   rO      s.   




zOvis2VisionAttention.forwardri   )
r!   r"   r#   r$   r;   r%   rT   r4   rO   rU   r(   r(   rB   r)   r      s    r   c                       rV   )Ovis2MLPc                    rX   rY   r\   rf   rB   r(   r)   r;     rg   zOvis2MLP.__init__c                 C   rh   ri   rj   rk   r(   r(   r)   rO     rm   zOvis2MLP.forwardrn   r(   r(   rB   r)   r     ro   r   c                	       sN   e Zd Zdef fddZ	ddejdejdB dee dejfd	d
Z	  Z
S )Ovis2VisionEncoderLayerr]   c                    sB   t    t|| _t|| _t|j|j| _	t|j|j| _
d S ri   )r:   r;   r   	attentionr   ffnr6   rA   r   	rms_norm1	rms_norm2rf   rB   r(   r)   r;     s
   


z Ovis2VisionEncoderLayer.__init__Nr2   r   r   r9   c                 K   sL   |  |}| jd||d|\}}|| }| |}| |}|| }|S )N)r2   r   r(   )r   r   r   r   )r@   r2   r   r   norm_hidden_statesr   _
mlp_outputr(   r(   r)   rO     s   


zOvis2VisionEncoderLayer.forwardri   )r!   r"   r#   r   r;   r%   rT   r   r   rO   rU   r(   r(   rB   r)   r     s    
r   c                	       sR   e Zd ZdZdef fddZee	ddej	dB de
e defd	d
Z  ZS )Ovis2VisionEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`Ovis2VisionEncoderLayer`].

    Args:
        config: Ovis2VisionConfig
    r]   c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r(   )r   ).0r   r]   r(   r)   
<listcomp>9  s    z/Ovis2VisionEncoder.__init__.<locals>.<listcomp>F)	r:   r;   r]   r   
ModuleListrangenum_hidden_layerslayersgradient_checkpointingrf   rB   r   r)   r;   6  s   
 
zOvis2VisionEncoder.__init__Nr   r   r9   c                 K   s,   |}| j D ]}|||fi |}qt|dS )Nlast_hidden_state)r   r   )r@   inputs_embedsr   r   r2   encoder_layerr(   r(   r)   rO   =  s   

zOvis2VisionEncoder.forwardri   )r!   r"   r#   r$   r   r;   r   r   r%   rT   r   r   r   rO   rU   r(   r(   rB   r)   r   -  s    r   c                       s>   e Zd Zdef fddZe	ddejdB fddZ  Z	S )	Ovis2VisionTransformerr]   c                    s>   t    || _t|| _t|| _t|j|j	| _
d| _d S )NF)r:   r;   r]   rp   r   r   encoderr6   rA   r   r   r   rf   rB   r(   r)   r;   M  s   



zOvis2VisionTransformer.__init__Nr   c                 K   s:   |  |}| jd||d|}|j}| |}t|dS )N)r   r   r   r(   )r   r   r   r   r   )r@   r   r   r   r2   encoder_outputsr   r(   r(   r)   rO   U  s   


zOvis2VisionTransformer.forwardri   )
r!   r"   r#   r   r;   r   r%   rT   rO   rU   r(   r(   rB   r)   r   L  s    r   c                       s*   e Zd Zdejdejf fddZ  ZS )Ovis2VisualEmbeddingTablevisual_tokensr9   c                    s8   |j tjtjtjtjtjfv rt |S t	|| j
S ri   )rG   r%   int8int16int32int64longr:   rO   r   r>   )r@   r   rB   r(   r)   rO   k  s   z!Ovis2VisualEmbeddingTable.forward)r!   r"   r#   r%   rT   rO   rU   r(   r(   rB   r)   r   j  s    "r   c                       sT   e Zd ZU eed< dZdZdZdgZdZ	dZ
dZdZdZdZdZ fddZ  ZS )	Ovis2PreTrainedModelr]   model)imagetextTr   r1   c                    s@   t  | t|trt|jt|jj	d 
d d S d S )NrD   rx   )r:   _init_weights
isinstancerp   initcopy_rw   r%   r   rP   r   )r@   r   rB   r(   r)   r     s   
&z"Ovis2PreTrainedModel._init_weights)r!   r"   r#   r   r'   base_model_prefixinput_modalitiessupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_cache_class_supports_flash_attn_supports_flex_attn_supports_sdpa_can_compile_fullgraph_supports_attention_backendr   rU   r(   r(   rB   r)   r   q  s   
 r   r0   r   c                 C   sJ   |  |}|j|ddd }tj| tjd||d}||  | }|S )NTrE   r   )memory_formatg      ?)r   maxr%   
zeros_likelegacy_contiguous_formatscatter_detach)r0   r   y_softindexy_hardretr(   r(   r)   hard_softmax  s
   
r   c                	       s^   e Zd ZU eed< eedZdef fddZe	e
dejdee deeB fdd	Z  ZS )
Ovis2VisionModelr]   )r2   r3   c                    st   t  | || _t|| _|j| _|j| _tj|j	|j
 |j
 | j| j dd| _t| j| j | _|   d S NFrZ   )r:   r;   r]   r   transformernum_visual_indicator_tokens
vocab_sizer   r_   rA   hidden_stridehead_linear	LayerNorm	head_norm	post_initrf   rB   r(   r)   r;     s   

zOvis2VisionModel.__init__r   r   r9   c              	   K   sN  | j |fi |}|d }| jjdkrl|j\}}}| jj}tt|}	|	|	 |kr.td||	|  | }
tj	
|ddd|
d|
fdd}|	|
7 }	|||	| ||	| ||}|dddddd}||d	|| | }| |}| |}| jjd
krtj	j|d	dd}n| jjdkrt|d	d}n| jjdkrtj	j|d	d}t||dS )Nr   r   z.Token sequence length must be a perfect squareconstantr   r         rD   gumbel_argmaxT)r   hard	st_argmaxr   r   )r   pooler_output)r   r]   r   rP   intmathsqrtr   r   r   padr   permuter   r  tokenize_functiongumbel_softmaxr   r   r   )r@   r   r   outputsr   
num_imagesseq_len
hidden_dimr   sqrt_lpad_sizer0   
prob_tokenr(   r(   r)   rO     s<   

zOvis2VisionModel.forward)r!   r"   r#   r   r'   r   r   _can_record_outputsr;   r   r   r%   r&   r   r   r4   r   rO   rU   r(   r(   rB   r)   r     s   
 r   zu
    The Ovis2 model which consists of a vision backbone and a language model, without a language modeling head.
    c                        s.  e Zd Zi Zdef fddZdd Zdd Zee	dd	d
e
jdee deeB fddZde
jde
jde
jfddZee														d"de
jdB d
e
jdB de
jdB de
jdB dedB de
jdB de
jdB dedB dedB dedB dedB de
jdB dee
jB deeB fd d!Z  ZS )#
Ovis2Modelr]   c                    s^   t  | t|j| _t|j| _t	|jj
|j| _|jj
| _|j
| _
|j| _|   d S ri   )r:   r;   r   vision_configvision_towerr   from_configtext_configlanguage_modelr   r   rA   visual_embeddings_tablevisual_vocab_sizevisual_indicator_token_idsr  rf   rB   r(   r)   r;     s   
zOvis2Model.__init__c                 C   
   | j  S ri   )r  get_input_embeddingsrQ   r(   r(   r)   r$       
zOvis2Model.get_input_embeddingsc                 C      | j | d S ri   )r  set_input_embeddingsr@   r   r(   r(   r)   r'       zOvis2Model.set_input_embeddingszWObtains image last hidden states from the vision tower and apply multimodal projection.r*   r   r   r9   c           
      K   s   | j |fddi|}|j}|j\}}}tj||| j jf|j|jd|jd}tj	||gdd}| 
|}tj| j| j j | jtjd|j}	||_| 
|	|_|S )Nreturn_dictTF)rG   devicerequires_gradlayoutr   r	  r   )r  r
  rP   r%   zerosr   rG   r+  r-  catr   r   r!  r   rH   r    )
r@   r   r   image_outputsimage_featuresr   img_seq_lenr   padding_tensorvisual_indicatorr(   r(   r)   get_image_features  s,   	
zOvis2Model.get_image_features	input_idsr   r1  c                 C   s   |du r||   tj| jjtj|jdk}|d}n|| jjk}| }|j	d |j	d  }|
d||j}t||  | kd| d|  |S )z
        Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
        equal to the length of multimodal features. If the lengths are different, an error is raised.
        NrG   r+  rD   r   r   z6Image features and image tokens do not match, tokens: z, features: )r$  r%   tensorr]   image_token_idr   r+  allsumrP   	unsqueeze	expand_asrH   r   numel)r@   r6  r   r1  special_image_maskn_image_tokensn_image_featuresr(   r(   r)   get_placeholder_mask
  s   zOvis2Model.get_placeholder_maskNr   r   rw   r1   labels	use_cacheoutput_attentionsoutput_hidden_statesr*  cache_positionlogits_to_keepc                 K   sd  |	d ur|	n| j j}	|
d ur|
n| j j}
|d u |d uA r td|d u r*|  |}|d ur| j|dd}|j}|j}| j|||d}|	||}t
| jD ];\}}|d u rl||  tj|tj|jdk}|d}n||k|j}| r|| || |j|j||< qN| jd	||||||	|
d||d
|}t|j|j|j|j|d ur|dS d dS )
Nz:You must specify exactly one of input_ids or inputs_embedsT)r   r*  )r   r1  r7  rD   )
r   rw   r1   r   rD  rE  rF  r*  rG  rH  )r   r1   r2   r3   r-   r(   )r]   rE  rF  r   r$  r5  r
  r    rB  masked_scatter	enumerater"  r%   r8  r   r+  r:  rH   anyr=  rG   r  r,   r   r1   r2   r3   )r@   r6  r   r   rw   r1   r   rC  rD  rE  rF  r*  rG  rH  r   r0  r1  r    r?  ivisual_indicator_idmaskr  r(   r(   r)   rO   "  sj   

zOvis2Model.forwardNNNNNNNNNNNNr   )r!   r"   r#   _checkpoint_conversion_mappingr   r;   r$  r'  r   r   r%   r&   r   r   r4   r   r5  
LongTensorrB  rT   r   boolr  r,   rO   rU   r(   r(   rB   r)   r    s    
	
r  c                        s<  e Zd Zi ZddiZdef fddZdd Zdd	 Zd
e	j
fddZedejdee d
eeB fddZee													d$dejdB dejdB dejdB dejdB dedB dejdB dejdB dedB dedB dedB dedB dejdB deejB d
eeB fdd Z							!d% fd"d#	Z  ZS )&Ovis2ForConditionalGenerationzlm_head.weightz(model.language_model.embed_tokens.weightr]   c                    s8   t  | t|| _tj|j|jdd| _| 	  d S r   )
r:   r;   r  r   r   r_   rA   r   lm_headr  rf   rB   r(   r)   r;   x  s   
z&Ovis2ForConditionalGeneration.__init__c                 C   r#  ri   )r   r$  rQ   r(   r(   r)   r$  ~  r%  z2Ovis2ForConditionalGeneration.get_input_embeddingsc                 C   r&  ri   )r   r'  r(  r(   r(   r)   r'    r)  z2Ovis2ForConditionalGeneration.set_input_embeddingsr9   c                 C   s   | j S ri   )rT  rQ   r(   r(   r)   get_output_embeddings  s   z3Ovis2ForConditionalGeneration.get_output_embeddingsr   r   c                 K   s   | j jdd|i|S )Nr   r(   )r   r5  )r@   r   r   r(   r(   r)   r5    s   z0Ovis2ForConditionalGeneration.get_image_featuresNr   r6  r   rw   r1   r   rC  rD  rE  rF  r*  rG  rH  c                 K   s   |	dur|	n| j j}	|
dur|
n| j j}
| jd||||||||	|
d|d|}|d }t|tr7t| dn|}| |dd|ddf }d}|dur\| jd||| j j	j
d|}t|||j|j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from PIL import Image
        >>> import httpx
        >>> from io import BytesIO
        >>> from transformers import AutoProcessor, Ovis2ForConditionalGeneration

        >>> model = Ovis2ForConditionalGeneration.from_pretrained("thisisiron/Ovis2-2B-hf")
        >>> processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-2B-hf")

        >>> prompt = "<|im_start|>user\n<image>\nDescribe the image.<|im_end|>\n<|im_start|>assistant\n"
        >>> url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
        >>> with httpx.stream("GET", url) as response:
        ...     image = Image.open(BytesIO(response.read()))

        >>> inputs = processor(images=image, text=prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(**inputs, max_new_tokens=15)
        >>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0]
        "user\n\nDescribe the image.\nassistant\nThe image features a brown dog standing on a wooden floor, looking up with"
        ```NT)r6  r   r   rw   r1   r   rD  rE  rF  r*  rG  r   )r0   rC  r   )r/   r0   r1   r2   r3   r-   r(   )r]   rE  rF  r   r   r  slicerT  loss_functionr  r   r.   r1   r2   r3   r-   )r@   r6  r   r   rw   r1   r   rC  rD  rE  rF  r*  rG  rH  r   r  r2   slice_indicesr0   r/   r(   r(   r)   rO     sH   0z%Ovis2ForConditionalGeneration.forwardFc	              	      s>   t  j|f||||||d|	}
|s|	dds||
d< |
S )N)r1   r   r   rG  rH  is_first_iterationrD  Tr   )r:   prepare_inputs_for_generationget)r@   r6  r1   r   r   r   rG  rH  rY  r   model_inputsrB   r(   r)   rZ    s   z;Ovis2ForConditionalGeneration.prepare_inputs_for_generationrO  )NNNNNNF)r!   r"   r#   rP  _tied_weights_keysr   r;   r$  r'  r   ModulerU  r   r%   r&   r   r   r4   r   r5  r   rQ  rT   r   rR  r  r.   rO   rZ  rU   r(   r(   rB   r)   rS  s  s    	
YrS  )r   r  rS  )r   )Dr  collections.abcr   dataclassesr   r%   r    r   r   activationsr   cache_utilsr   
generationr	   integrationsr
   modeling_layersr   modeling_outputsr   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.genericr   utils.output_capturingr   autor   configuration_ovis2r   r   r   r,   r.   r^  r6   rW   rp   rT   rS   r   r   r   r   r   r   r   r   r   r  r   r   r  rS  __all__r(   r(   r(   r)   <module>   s   	(
=
A  