
    %	&hC                     d   d dl mZmZmZmZ d dlZd dlZd dlmZ ddlm	Z	 ddl
mZ ddlmZmZ dd	lmZ dd
lmZmZmZmZmZ  ej.                  e      Z G d de      Z G d de      Z G d de      Z G d de      Z G d de      Z G d de      Z G d de      Z  G d de      Z!g dZ"y)    )ListOptionalTupleUnionN)nn   )DynamicCache)logging   )Idefics3ConfigIdefics3VisionConfig)Idefics3ImageProcessor)Idefics3BaseModelOutputWithPast Idefics3ForConditionalGenerationIdefics3ModelIdefics3PreTrainedModelIdefics3VisionTransformerc                       e Zd ZdZdZy)SmolVLMVisionConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
    SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
    >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig

    >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
    >>> configuration = SmolVLMVisionConfig()

    >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
    >>> model = SmolVLMVisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlm_visionN__name__
__module____qualname____doc__
model_type     /var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/smolvlm/modular_smolvlm.pyr   r   (   s    1f "Jr   r   c                       e Zd Zy)SmolVLMPreTrainedModelNr   r   r   r   r   r   r!   r!   `       r   r!   c                       e Zd Zy)SmolVLMVisionTransformerNr"   r   r   r   r%   r%   d   r#   r   r%   c                       e Zd ZdZdZy)SmolVLMConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a
    SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import SmolVLMModel, SmolVLMConfig
    >>> # Initializing configuration
    >>> configuration = SmolVLMConfig()
    >>> # Initializing a model from the configuration
    >>> model = SmolVLMModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```smolvlmNr   r   r   r   r'   r'   h   s    #J Jr   r'   c                       e Zd Zy)SmolVLMImageProcessorNr"   r   r   r   r*   r*      r#   r   r*   c                       e Zd Zy)SmolVLMBaseModelOutputWithPastNr"   r   r   r   r,   r,      r#   r   r,   c                      e Zd ZdZdej
                  dej                  dej                  fdZ	 	 	 	 	 	 	 	 	 	 	 	 	 ddeej
                     deej                     deej
                     d	ee	ej                        deej                     d
eej                     deej                     deej                     dee   dee   dee   dee   deej
                     deeef   fdZy)SmolVLMModelz
    A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger
    in forward. Instead, we override inputs_merger here with custom logic.
    	input_idsinputs_embedsimage_hidden_statesc                 H   |j                   \  }}}|| j                  k(  }|j                  d      }t        j                  ||z  dk(        st        d      ||z  }t        j                  j                  j                  |j                  d      dd      }	|	d d }
|j                  d      }|dz
  |z  }|dz
  |z  }|
j                  d      |z   }t        j                  |      }|||   ||   d d f   ||<   t        j                  |j                  d      ||      }|S )N   dimr   zCAt least one sample has <image> tokens not divisible by patch_size.)r3   r   )value)shapeimage_token_idsumtorchall
ValueErrorr   
functionalpadcumsum	unsqueeze
zeros_likewhere)selfr/   r0   r1   _
patch_size
image_masknum_image_tokensblocks_per_sampleoffsetsblock_offsetrow_cum	chunk_idx	local_idx	block_idximage_embedsmerged_embedss                    r   inputs_mergerzSmolVLMModel.inputs_merger   s:    /44:q$"5"55
%>>a>0yy)J6!;<bcc,
:((%%))*;*B*Bq*B*I6YZ)[s|###+q[Z/	q[J.	 **1-	9	''6#6y7LiXbNcef7f#gZ J$8$8$<lMZr   Nattention_maskposition_idspast_key_valuespixel_valuespixel_attention_mask	use_cacheoutput_attentionsoutput_hidden_statesreturn_dictcache_positionreturnc                 d   |
|
n| j                   j                  }
||n| j                   j                  }|	|	n| j                   j                  }	||n| j                   j                  }| j
                  r/| j                  j                  r|	rt        j                  d       d}	||j                  \  }}n||j                  \  }}}nt        d      d}|	r|
t               }|j                         }|||dk(  rt        d      |9 | j                  j                         |      j                  |j                         }||t        d      ||j                  \  }}}}}|} |j"                  ||z  g|j                  dd   }|j                  dd  j%                         }|d	k(  j'                  d
      |k7  }t)        |      sd|d<   ||   j+                         }|Lt-        j.                  dD cg c]  }|j                  |    c}t,        j0                  |j                         }n6 |j"                  ||z  g|j                  dd   }||   j+                         }| j                   j2                  j4                  }|j7                  d||      }|j7                  d||      }|j'                  d      dkD  j1                         }| j9                  ||      j:                  }| j=                  |      }n)|'|j                  | j>                  |j                         }||| jA                  |||      }| j                  |||||	|
|||	      }|stC        d g ||D              S tE        |j:                  |jF                  |jH                  |jJ                  |      S c c}w )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embedsr   zWWhen first calling the model, if input_embeds are passed, input_ids should not be None.zMYou cannot specify both pixel_values and image_hidden_states at the same timer   r3   g        )r7   r4   T)r   r   r   )sizedtypedevice)	dimensionra   step)r7   r_   )rV   patch_attention_mask)rb   rc   )r/   r0   r1   )	r0   rS   rT   rU   rX   rY   rZ   r[   r\   c              3   &   K   | ]	  }||  y w)Nr   ).0vs     r   	<genexpr>z'SmolVLMModel.forward.<locals>.<genexpr>1  s     Uqq}Us   )last_hidden_staterU   hidden_states
attentionsr1   )&configrY   rZ   rX   use_return_dicttraining
text_modelgradient_checkpointingloggerwarning_oncer8   r=   r	   get_seq_lengthget_input_embeddingstorc   viewnumelr:   any
contiguousr;   onesboolvision_configrF   unfoldvision_modelrk   	connectorrb   rR   tupler,   rU   rl   rm   )rD   r/   rS   rT   rU   r0   rV   rW   r1   rX   rY   rZ   r[   r\   
batch_size
seq_lengthrE   past_seen_tokens
num_imagesnum_channelsheightwidthnb_values_per_imagereal_images_indsirF   patches_subgridrf   outputss                                r   forwardzSmolVLMModel.forward   s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]==T__CC	l I  %.__"J
&(5(;(;%J
ATUU&"...==?$):?OST?Tvww BDOO@@B9MPPQZQaQabM #(;(Glmm%BNBTBT?J
L&%'L,<,,Z*-D^|GYGYZ[Z\G]^L #/"4"4QR"8">">"@ , 388\8JNaa'(&* #'(89DDFL $+',zz9BCA,,,Q/C**'..($ (A';'@'@+(.B.H.H.L($ (<<L'M'X'X'Z$22==J299AJ]g9hO-44qzXb4cO$3$7$7H$7$E$I#O#O#Q  #'"3"3)%9 #4 #     #'..1D"E ,"5"8"8tzzR[RbRb"8"c$)<)H !..#+$7 / M //')%+/!5#) " 

 U$Cg$C/B$CUUU-%77#33!//)) 3
 	
i Ds   9N-)NNNNNNNNNNNNN)r   r   r   r   r;   
LongTensorTensorrR   r   r   FloatTensor
BoolTensorr}   r   r   r,   r   r   r   r   r.   r.      s|   
)):?,,]b]i]i6 151537=A5948;?;?$(,0/3&*59@
E,,-@
 !.@
 u//0	@

 "$u'8'8"9:@
   1 12@
 u001@
 'u'7'78@
 &e&7&78@
 D>@
 $D>@
 'tn@
 d^@
 !!1!12@
 
u44	5@
r   r.   c                   ,     e Zd ZdZ fdZ fdZ xZS )SmolVLMForConditionalGenerationzy
    A subclass of Idefics3ForConditionalGeneration that uses SmolVLMModel
    instead of the default Idefics3Model.
    c                     t         |   |       t        |      | _        t	        j
                  |j                  j                  |j                  j                  d      | _	        | j                          y )NF)bias)super__init__r.   modelr   Lineartext_confighidden_size
vocab_sizelm_head	post_init)rD   rn   	__class__s     r   r   z(SmolVLMForConditionalGeneration.__init__B  sS     !&)
yy!3!3!?!?ASASA^A^ejkr   c                 $    t        |   di | y)a  
        Args:
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `SmolVLMForConditionalGeneration`).
                Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
                computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        Returns:

        Example:

        ```python
        >>> import requests
        >>> import torch
        >>> from PIL import Image
        >>> from io import BytesIO

        >>> from transformers import AutoProcessor, AutoModelForImageTextToText
        >>> from transformers.image_utils import load_image

        >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
        >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
        >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
        >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

        >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct")
        >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", torch_dtype=torch.bfloat16, device_map="auto")

        >>> # Create inputs
        >>> messages = [
        ...     {
        ...         "role": "user",
        ...         "content": [
        ...             {"type": "video", "path": path/to/video},
        ...             {"type": "text", "text": "What is happening in this video?"},
        ...         ]
        ...     }
        ... ]

        >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True)

        >>> # Generate
        >>> generated_ids = model.generate(**inputs, max_new_tokens=256)
        >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

        >>> print(generated_texts)
        ```Nr   )r   r   )rD   super_kwargsr   s     r   r   z'SmolVLMForConditionalGeneration.forwardH  s    ` 	','r   )r   r   r   r   r   r   __classcell__)r   s   @r   r   r   <  s    
0( 0(r   r   )r   r'   r*   r   r!   r.   r%   )#typingr   r   r   r   r;   torch.utils.checkpointr   cache_utilsr	   utilsr
   idefics3.configuration_idefics3r   r   "idefics3.image_processing_idefics3r   idefics3.modeling_idefics3r   r   r   r   r   
get_loggerr   rs   r   r!   r%   r'   r*   r,   r.   r   __all__r   r   r   <module>r      s     0 /    ' S G  
		H	%5	. 5	p	4 		8 	'	N '	T	2 		%D 	_
= _
D<(&F <(~r   