o
    	۷i`J                     @   sV  d dl mZmZ d dlZd dlmZ ddlmZmZ ddlm	Z	 ddl
mZ ddlmZ dd	lmZmZmZ d
dlmZmZ d
dlmZ d
dlmZ d
dlmZmZmZmZmZ ee Z!G dd deZ"G dd deZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'G dd deZ(G dd deZ)G dd  d eZ*g d!Z+dS )"    )OptionalUnionN)nn   )CacheDynamicCache)GenerationConfig)FlashAttentionKwargs)Unpack)auto_docstringcan_return_tuplelogging   )Idefics3ConfigIdefics3VisionConfig)Idefics3ImageProcessor)Idefics3ImageProcessorFast)Idefics3BaseModelOutputWithPast Idefics3ForConditionalGenerationIdefics3ModelIdefics3PreTrainedModelIdefics3VisionTransformerc                   @      e Zd ZdZdZdS )SmolVLMVisionConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
    SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
    >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig

    >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
    >>> configuration = SmolVLMVisionConfig()

    >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
    >>> model = SmolVLMVisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlm_visionN__name__
__module____qualname____doc__
model_type r!   r!   a/home/ubuntu/vllm_env/lib/python3.10/site-packages/transformers/models/smolvlm/modular_smolvlm.pyr   )   s    3r   c                   @      e Zd ZdS )SmolVLMPreTrainedModelNr   r   r   r!   r!   r!   r"   r$   a       r$   c                   @   r#   )SmolVLMVisionTransformerNr%   r!   r!   r!   r"   r'   e   r&   r'   c                   @   r   )SmolVLMConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a
    SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import SmolVLMModel, SmolVLMConfig
    >>> # Initializing configuration
    >>> configuration = SmolVLMConfig()
    >>> # Initializing a model from the configuration
    >>> model = SmolVLMModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlmNr   r!   r!   r!   r"   r(   i   s    %r(   c                   @   r#   )SmolVLMImageProcessorNr%   r!   r!   r!   r"   r*      r&   r*   c                   @   r#   )SmolVLMImageProcessorFastNr%   r!   r!   r!   r"   r+      r&   r+   c                   @   r#   )SmolVLMBaseModelOutputWithPastNr%   r!   r!   r!   r"   r,      r&   r,   c                #   @   s
  e Zd ZdZdejdejdejfddZ	ddejd	e	ej fd
dZ
eedd													dde	ej de	ej de	ej de	e de	ej de	ej d	e	ej de	ej de	e de	e de	e de	e de	ej dee deeef fddZdS )SmolVLMModelz
    A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger
    in forward. Instead, we override inputs_merger here with custom logic.
    	input_idsinputs_embedsimage_hidden_statesc                 C   s  |j \}}}|d u r!||  tj| jjtj|jdk}|d }n|| jjk}|jdd}t	|| dks:t
d|| }tjjj|jddddd}	|	d d	 }
|jd	d}|d | }|d | }|
d| }t|}||| || d d f ||< t|d	||}|S )
Ndtypedevice).r      dimr   zCAt least one sample has <image> tokens not divisible by patch_size.)r4   r   )value)shapeget_input_embeddingstorchtensorconfigimage_token_idlongr3   sumall
ValueErrorr   
functionalpadcumsum	unsqueeze
zeros_likewhere)selfr.   r/   r0   _
patch_size
image_masknum_image_tokensblocks_per_sampleoffsetsblock_offsetrow_cum	chunk_idx	local_idx	block_idximage_embedsmerged_embedsr!   r!   r"   inputs_merger   s*   

zSmolVLMModel.inputs_mergerNpixel_valuespixel_attention_maskc                    s8   j \}}}}} j| jd  j|| g j dd R    j dd  } dkjdd|k}	t|	s:d|	d	<  |	   |du rVtj	 fd
ddD tj
 jd}n|j|| g|j dd R  }||	  }| jjj}
|jd|
|
d}|jd|
|
d}|jddd	k
 }| j |d}|j}| |}|S )a  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
            pixel_attention_mask (`torch.LongTensor`, *optional*):
                The attention mask indicating padded regions in the image.
        )r2   r   Nr4   g        )r8   r5   Tr   c                    s   g | ]} j | qS r!   )r9   ).0irX   r!   r"   
<listcomp>   s    z3SmolVLMModel.get_image_features.<locals>.<listcomp>)r   r   r   )sizer2   r3   )	dimensionr`   step)r8   rZ   )rX   patch_attention_mask)r9   tor2   viewnumelr@   any
contiguousr;   onesboolr3   r=   vision_configrK   unfoldvision_modellast_hidden_state	connector)rI   rX   rY   
batch_size
num_imagesnum_channelsheightwidthnb_values_per_imagereal_images_indsrK   patches_subgridrc   r0   r!   r^   r"   get_image_features   s0     

zSmolVLMModel.get_image_featuresa  
        Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
        the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
        max_num_images is the maximum number of images among the batch_size samples in the batch.
        Padding images are not needed beyond padding the pixel_values at the entrance of the model.
        For efficiency, we only pass through the vision_model's forward the real images by
        discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
        image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
        )custom_introattention_maskposition_idspast_key_values	use_cacheoutput_attentionsoutput_hidden_statesreturn_dictcache_positionkwargsreturnc                 K   s~  |
d ur|
n| j j}
|d ur|n| j j}|	d ur|	n| j j}	|d ur$|n| j j}| jr8| jjr8|	r8t	d d}	|d urB|j
\}}n|d urM|j
\}}}ntd|	r]|d u r]t| j d}|d u rl| j ||j}|d urx|d urxtd|d ur| |||j}n|d ur|j| j|jd}|d ur| j|||d}| jd|||||	|
|d|d		|}t|j|j|j|j|d
S )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embeds)r=   zMYou cannot specify both pixel_values and image_hidden_states at the same timer1   )r.   r/   r0   T)	r/   rz   r{   r|   r}   r~   r   r   r   )rn   r|   hidden_states
attentionsr0   r!   )r=   r~   r   r}   use_return_dicttraining
text_modelgradient_checkpointingloggerwarning_oncer9   rB   r   r:   rd   r3   rx   r2   rW   r,   rn   r|   r   r   )rI   r.   rz   r{   r|   r/   rX   rY   r0   r}   r~   r   r   r   r   rp   
seq_lengthrJ   outputsr!   r!   r"   forward   sf   
zSmolVLMModel.forward)N)NNNNNNNNNNNNN)r   r   r   r   r;   
LongTensorTensorrW   FloatTensorr   rx   r   r   r   
BoolTensorrj   r
   r	   r   tupler,   r   r!   r!   r!   r"   r-      s~    
!
1	

r-   c                       s(   e Zd Z fddZ fddZ  ZS )SmolVLMForConditionalGenerationc                    sL   t  | t|| _t|| jj_tj	|j
j|j
jdd| _|   d S )NF)bias)super__init__r-   modelr   from_model_configr   generation_configr   Lineartext_confighidden_size
vocab_sizelm_head	post_init)rI   r=   	__class__r!   r"   r   W  s
   
z(SmolVLMForConditionalGeneration.__init__c                    s   t  jdi | dS )a	  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
            The hidden states of the image encoder after modality projection.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are
            ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> import requests
        >>> import torch
        >>> from PIL import Image
        >>> from io import BytesIO

        >>> from transformers import AutoProcessor, AutoModelForImageTextToText
        >>> from transformers.image_utils import load_image

        >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
        >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
        >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
        >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

        >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct")
        >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", dtype=torch.bfloat16, device_map="auto")

        >>> # Create inputs
        >>> messages = [
        ...     {
        ...         "role": "user",
        ...         "content": [
        ...             {"type": "video", "path": path/to/video},
        ...             {"type": "text", "text": "What is happening in this video?"},
        ...         ]
        ...     }
        ... ]

        >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True)

        >>> # Generate
        >>> generated_ids = model.generate(**inputs, max_new_tokens=256)
        >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

        >>> print(generated_texts)
        ```Nr!   )r   r   )rI   super_kwargsr   r!   r"   r   ^  s   1z'SmolVLMForConditionalGeneration.forward)r   r   r   r   r   __classcell__r!   r!   r   r"   r   V  s    r   )r   r(   r*   r+   r   r$   r-   r'   ),typingr   r   r;   r   cache_utilsr   r   
generationr   modeling_flash_attention_utilsr	   processing_utilsr
   utilsr   r   r   idefics3.configuration_idefics3r   r   "idefics3.image_processing_idefics3r   'idefics3.image_processing_idefics3_fastr   idefics3.modeling_idefics3r   r   r   r   r   
get_loggerr   r   r   r$   r'   r(   r*   r+   r,   r-   r   __all__r!   r!   r!   r"   <module>   s0   
	8* 8<