
    %	&hy                        d dl Zd dlmZmZmZmZmZmZm	Z	 d dl
Z
d dlmZ d dlmZmZmZ ddlmZ ddlmZmZmZ ddlmZmZ ddlmZmZ dd	lmZmZmZm Z m!Z! d
dl"m#Z#  e jH                  e%      Z&dZ'dZ( G d dejR                        Z* G d dejR                        Z+ G d de      Z,	 d7dejR                  de
jZ                  de
jZ                  de
jZ                  dee
jZ                     de.de.fdZ/ G d dejR                        Z0 G d dejR                        Z1 G d  d!ejR                        Z2 G d" d#ejR                        Z3 G d$ d%ejR                        Z4 G d& d'ejR                        Z5 G d( d)ejR                        Z6 G d* d+ejR                        Z7d,Z8g d-Z9d.Z: ed/e:       G d0 d1e,             Z;dZ<d2Z= ed3e:       G d4 d5e,             Z>g d6Z?y)8    N)CallableDictListOptionalSetTupleUnion)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardlogging	torch_int   )IJepaConfigzfacebook/ijepa_vith14_1kr   c                   `     e Zd ZdZ fdZddej                  dedej                  fdZ xZ	S )IJepaPatchEmbeddingsz
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    c                    t         |           |j                  |j                  }}|j                  |j
                  }}t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|d   |d   z  |d   |d   z  z  }|| _        || _        || _        || _
        t        j                  ||||      | _        y )Nr   r   )kernel_sizestride)super__init__
image_size
patch_sizenum_channelshidden_size
isinstancecollectionsabcIterablenum_patchesnnConv2d
projection)selfconfigr$   r%   r&   r'   r,   	__class__s          ~/var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/ijepa/modeling_ijepa.pyr#   zIJepaPatchEmbeddings.__init__,   s    !'!2!2F4E4EJ
$*$7$79K9Kk#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
!!}
15*Q-:VW=:XY$$(&))L+:^hi    pixel_valuesinterpolate_pos_encodingreturnc                    |j                   \  }}}}|| j                  k7  rt        d| j                   d| d      |sV|| j                  d   k7  s|| j                  d   k7  r2t        d| d| d| j                  d    d| j                  d    d		      | j	                  |      j                  d
      j                  dd
      }|S )NzoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r   zInput image size (*z) doesn't match model (z).   )shaper&   
ValueErrorr$   r/   flatten	transpose)r0   r5   r6   
batch_sizer&   heightwidth
embeddingss           r3   forwardzIJepaPatchEmbeddings.forward;   s    2>2D2D/
L&%4,,,!../yaI  (++u8J/J (% 9+,Adooa.@-AE  __\2::1=GG1M
r4   F)
__name__
__module____qualname____doc__r#   torchTensorboolrD   __classcell__r2   s   @r3   r   r   %   s3    jELL D ]b]i]i r4   r   c            	            e Zd ZdZddededdf fdZdej                  de	d	e	dej                  fd
Z
	 	 ddej                  deej                     dedej                  fdZ xZS )IJepaEmbeddingszb
    Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
    r1   use_mask_tokenr7   Nc                    t         |           |r4t        j                  t	        j
                  dd|j                              nd | _        t        |      | _	        | j                  j                  }t        j                  t	        j                  d||j                              | _        t        j                  |j                        | _        |j                   | _        || _        y )Nr   )r"   r#   r-   	ParameterrJ   zerosr'   
mask_tokenr   patch_embeddingsr,   randnposition_embeddingsDropouthidden_dropout_probdropoutr%   r1   )r0   r1   rQ   r,   r2   s       r3   r#   zIJepaEmbeddings.__init__Q   s    Q_",,u{{1a9K9K'LMei 4V <++77#%<<A{FL^L^0_#` zz&"<"<= ++r4   rC   rA   rB   c                 0   |j                   d   }| j                  j                   d   }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  }|j                   d   }|| j
                  z  }|| j
                  z  }	t        |dz        }
|j                  d|
|
|      }|j                  dddd      }t        j                  j                  |||	fdd	      }|j                  dddd      j                  dd|      }|S )
a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   g      ?r   r   r;   bicubicF)sizemodealign_corners)r<   rX   rJ   jit
is_tracingr%   r   reshapepermuter-   
functionalinterpolateview)r0   rC   rA   rB   r,   num_positionspatch_pos_embeddim
new_height	new_widthsqrt_num_positionss              r3   r6   z(IJepaEmbeddings.interpolate_pos_encoding[   s#    !&&q)0066q9 yy##%+*F6UZ?+++22r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nr4   r5   bool_masked_posr6   c                 x   |j                   \  }}}}| j                  ||      }|Z|j                   d   }	| j                  j                  ||	d      }
|j	                  d      j                  |
      }|d|z
  z  |
|z  z   }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )N)r6   r   r]         ?)	r<   rV   rU   expand	unsqueezetype_asr6   rX   r[   )r0   r5   ro   r6   r@   _rA   rB   rC   
seq_lengthmask_tokensmasks               r3   rD   zIJepaEmbeddings.forward   s     (4'9'9$
Avu**<Rj*k
&#))!,J//00ZLK",,R088ED#sTz2[45GGJ $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r4   rE   NF)rF   rG   rH   rI   r   rL   r#   rJ   rK   intr6   r   
BoolTensorrD   rM   rN   s   @r3   rP   rP   L   s    { D T %5<< % %UX %]b]i]i %T 7;).	ll "%"2"23 #'	
 
r4   rP   c                       e Zd ZdZeZdZdZdZddgZ	dZ
dZdeej                  ej                  ej                   f   dd	fd
Zy	)IJepaPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    ijepar5   TrP   
IJepaLayermoduler7   Nc                 l   t        |t        j                  t        j                  f      rt        j                  j                  |j                  j                  j                  t        j                        d| j                  j                        j                  |j                  j                        |j                  _        |j                  %|j                  j                  j                          yyt        |t        j                         rJ|j                  j                  j                          |j                  j                  j#                  d       yt        |t$              rt        j                  j                  |j&                  j                  j                  t        j                        d| j                  j                        j                  |j&                  j                        |j&                  _        |j(                  %|j(                  j                  j                          yyy)zInitialize the weights        )meanstdNrq   )r(   r-   Linearr.   inittrunc_normal_weightdatatorJ   float32r1   initializer_rangedtypebiaszero_	LayerNormfill_rP   rX   rU   )r0   r   s     r3   _init_weightsz"IJepaPreTrainedModel._init_weights   s   fryy"))45 "$!6!6""%%emm43DKKDaDa "7 "b$$% MM {{&  &&( '-KK""$MM$$S)0.0gg.C.C**//225==AKK11 /D / b++112	 &&+
   ,!!&&,,. - 1r4   )rF   rG   rH   rI   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_2r	   r-   r   r.   r   r    r4   r3   r}   r}      sa    
 L$O&*#*L9N!/E"))RYY*L$M /RV /r4   r}   r   querykeyvalueattention_maskscalingr[   c                    t        j                  ||j                  dd            |z  }t        j                  j                  |dt         j                        j                  |j                        }t        j                  j                  ||| j                        }|||z  }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )Nr]   )rk   r   )ptrainingr   r;   )rJ   matmulr?   r-   rf   softmaxr   r   r   r[   r   
contiguous)
r   r   r   r   r   r   r[   kwargsattn_weightsattn_outputs
             r3   eager_attention_forwardr      s     <<s}}R'<=GL ==((2U]](SVVW\WbWbcL ==((6??([L !#n4,,|U3K''1-88:K$$r4   c            
            e Zd Zdeddf fdZdej                  dej                  fdZ	 d
deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )IJepaSelfAttentionr1   r7   Nc                 2   t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads r9   g      F)r   )r"   r#   r'   num_attention_headshasattrr=   r1   rz   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr   	is_causalr-   r   qkv_biasr   r   r   r0   r1   r2   s     r3   r#   zIJepaSelfAttention.__init__   sF    : ::a?PVXhHi"6#5#5"6 7334A7 
 #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
r4   xc                     |j                         d d | j                  | j                  fz   }|j                  |      }|j	                  dddd      S )Nr]   r   r;   r   r   )r_   r   r   rh   re   )r0   r   new_x_shapes      r3   transpose_for_scoresz'IJepaSelfAttention.transpose_for_scores   sL    ffhsmt'?'?AYAY&ZZFF;yyAq!$$r4   	head_maskoutput_attentionsc           
         | j                  | j                  |            }| j                  | j                  |            }| j                  | j                  |            }t        }| j
                  j                  dk7  rN| j
                  j                  dk(  r|rt        j                  d       nt        | j
                  j                     } || ||||| j                  | j                  | j                  sdn| j                        \  }}	|j                         d d | j                  fz   }
|j!                  |
      }|r||	f}|S |f}|S )Neagersdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r   )r   r   r[   r   )r   r   r   r   r   r1   _attn_implementationloggerwarning_oncer   r   r   r   r   r_   r   rd   )r0   hidden_statesr   r   	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapeoutputss               r3   rD   zIJepaSelfAttention.forward   s=    --dhh}.EF	//

=0IJ//

=0IJ(?;;++w6{{//69>O##L
 '>dkk>^>^&_#)<nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EF6G=/2 O\M]r4   ry   )rF   rG   rH   r   r#   rJ   rK   r   r   rL   r	   r   rD   rM   rN   s   @r3   r   r      s    ]{ ]t ](%ell %u|| % bg!(0(>!Z^!	uU\\5<</0%2EE	F!r4   r   c                   |     e Zd ZdZdeddf fdZdej                  dej                  dej                  fdZ xZ	S )	IJepaSelfOutputz
    The residual connection is defined in IJepaLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r1   r7   Nc                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y N)	r"   r#   r-   r   r'   denserY   rZ   r[   r   s     r3   r#   zIJepaSelfOutput.__init__$  sB    YYv1163E3EF
zz&"<"<=r4   r   input_tensorc                 J    | j                  |      }| j                  |      }|S r   r   r[   r0   r   r   s      r3   rD   zIJepaSelfOutput.forward)  s$    

=1]3r4   )
rF   rG   rH   rI   r   r#   rJ   rK   rD   rM   rN   s   @r3   r   r     sD    
>{ >t >
U\\  RWR^R^ r4   r   c                        e Zd Zdeddf fdZdee   ddfdZ	 	 ddej                  de
ej                     d	edeeej                  ej                  f   eej                     f   fd
Z xZS )IJepaAttentionr1   r7   Nc                     t         |           t        |      | _        t	        |      | _        t               | _        y r   )r"   r#   r   	attentionr   outputsetpruned_headsr   s     r3   r#   zIJepaAttention.__init__1  s0    +F3%f-Er4   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   rk   )lenr   r   r   r   r   r   r   r   r   r   r   r   union)r0   r   indexs      r3   prune_headszIJepaAttention.prune_heads7  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r4   r   r   r   c                 h    | j                  |||      }| j                  |d   |      }|f|dd  z   }|S )Nr   r   )r   r   )r0   r   r   r   self_outputsattention_outputr   s          r3   rD   zIJepaAttention.forwardI  sE     ~~mY@QR;;|AF#%QR(88r4   ry   )rF   rG   rH   r   r#   r   rz   r   rJ   rK   r   rL   r	   r   rD   rM   rN   s   @r3   r   r   0  s    "{ "t ";S ;d ;* -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr4   r   c                   `     e Zd Zdeddf fdZdej                  dej                  fdZ xZS )IJepaIntermediater1   r7   Nc                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r"   r#   r-   r   r'   intermediate_sizer   r(   
hidden_actstrr   intermediate_act_fnr   s     r3   r#   zIJepaIntermediate.__init__X  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r4   r   c                 J    | j                  |      }| j                  |      }|S r   )r   r   )r0   r   s     r3   rD   zIJepaIntermediate.forward`  s&    

=100?r4   	rF   rG   rH   r   r#   rJ   rK   rD   rM   rN   s   @r3   r   r   W  s1    9{ 9t 9U\\ ell r4   r   c                   x     e Zd Zdeddf fdZdej                  dej                  dej                  fdZ xZS )IJepaOutputr1   r7   Nc                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r"   r#   r-   r   r   r'   r   rY   rZ   r[   r   s     r3   r#   zIJepaOutput.__init__h  sB    YYv779K9KL
zz&"<"<=r4   r   r   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r3   rD   zIJepaOutput.forwardm  s.    

=1]3%4r4   r   rN   s   @r3   r   r   g  s?    >{ >t >
U\\  RWR^R^ r4   r   c                        e Zd ZdZdeddf fdZ	 	 d
dej                  deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )r   z?This corresponds to the Block class in the timm implementation.r1   r7   Nc                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r"   r#   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r-   r   r'   layer_norm_epslayernorm_beforelayernorm_afterr   s     r3   r#   zIJepaLayer.__init__y  s    '-'E'E$'/-f5!&) "V-?-?VEZEZ [!||F,>,>FDYDYZr4   r   r   r   c                     | j                  | j                  |      ||      }|d   }|dd  }||z   }| j                  |      }| j                  |      }| j	                  ||      }|f|z   }|S )N)r   r   r   )r   r   r   r   r   )r0   r   r   r   self_attention_outputsr   r   layer_outputs           r3   rD   zIJepaLayer.forward  s     "&!!-0/ "0 "

 2!4(, )=8 ++M:((6 {{<?/G+r4   ry   )rF   rG   rH   rI   r   r#   rJ   rK   r   rL   r	   r   rD   rM   rN   s   @r3   r   r   v  s    I[{ [t [ -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr4   r   c                        e Zd Zdeddf fdZ	 	 	 	 ddej                  deej                     deded	ede	e
ef   fd
Z xZS )IJepaEncoderr1   r7   Nc                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w ry   )
r"   r#   r1   r-   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r0   r1   ru   r2   s      r3   r#   zIJepaEncoder.__init__  sN    ]]fF^F^@_#`1Jv$6#`a
&+# $as   A#r   r   r   output_hidden_statesreturn_dictc                 t   |rdnd }|rdnd }t        | j                        D ]h  \  }}	|r||fz   }|||   nd }
| j                  r+| j                  r| j	                  |	j
                  ||
|      }n
 |	||
|      }|d   }|s`||d   fz   }j |r||fz   }|st        d |||fD              S t        |||      S )Nr   r   r   c              3   &   K   | ]	  }||  y wr   r   ).0vs     r3   	<genexpr>z'IJepaEncoder.forward.<locals>.<genexpr>  s     mq_`_lms   )last_hidden_stater   
attentions)	enumerater  r  r   _gradient_checkpointing_func__call__tupler   )r0   r   r   r   r	  r
  all_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputss               r3   rD   zIJepaEncoder.forward  s     #7BD$5b4(4 	POA|#$58H$H!.7.CilO**t}} $ A A ))!#%	! !-]OM^ _)!,M &9]1=M<O&O#'	P*   1]4D Dm]4EGZ$[mmm++*
 	
r4   )NFFT)rF   rG   rH   r   r#   rJ   rK   r   rL   r	   r  r   rD   rM   rN   s   @r3   r  r    sz    ,{ ,t , -1"'%* )
||)
 ELL))
  	)

 #)
 )
 
uo%	&)
r4   r  c                   *     e Zd Zdef fdZd Z xZS )IJepaPoolerr1   c                     t         |           t        j                  |j                  |j
                        | _        t        |j                     | _	        y r   )
r"   r#   r-   r   r'   pooler_output_sizer   r   
pooler_act
activationr   s     r3   r#   zIJepaPooler.__init__  s>    YYv1163L3LM
 !2!23r4   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r!  )r0   r   first_token_tensorpooled_outputs       r3   rD   zIJepaPooler.forward  s6     +1a40

#566r4   )rF   rG   rH   r   r#   rD   rM   rN   s   @r3   r  r    s    4{ 4
r4   r  a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`IJepaImageProcessor.__call__`]
            for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        interpolate_pos_encoding (`bool`, *optional*):
            Whether to interpolate the pre-trained position encodings.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
)r      i   aG  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`IJepaConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
z_The bare IJepa Model transformer outputting raw hidden-states without any specific head on top.c                   8    e Zd Zddededef fdZdefdZdee	e
e	   f   ddfd	Z ee       eeeed
e      	 	 	 	 	 	 	 ddeej*                     deej,                     deej*                     dee   dee   dee   dee   deeef   fd              Z xZS )
IJepaModelr1   add_pooling_layerrQ   c                    t         |   |       || _        t        ||      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd | _        | j                          y )N)rQ   r   )r"   r#   r1   rP   rC   r  encoderr-   r   r'   r   	layernormr  pooler	post_init)r0   r1   r(  rQ   r2   s       r3   r#   zIJepaModel.__init__  sk     )&P#F+f&8&8f>S>ST->k&)D 	r4   r7   c                 .    | j                   j                  S r   )rC   rV   )r0   s    r3   get_input_embeddingszIJepaModel.get_input_embeddings  s    ///r4   heads_to_pruneNc                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr*  r  r   r   )r0   r0  r  r   s       r3   _prune_headszIJepaModel._prune_heads  sE    
 +002 	CLE5LLu%//;;EB	Cr4   vision)
checkpointoutput_typer   modalityexpected_outputr5   ro   r   r   r	  r6   r
  c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|t	        d      | j                  || j                   j                        }| j                  j                  j                  j                  j                  }|j                  |k7  r|j                  |      }| j                  |||      }	| j                  |	||||      }
|
d   }| j                  |      }| j                  | j                  |      nd}|s|||fn|f}||
dd z   S t!        |||
j"                  |
j$                        S )z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)ro   r6   )r   r   r	  r
  r   r   )r  pooler_outputr   r  )r1   r   r	  use_return_dictr=   get_head_maskr  rC   rV   r/   r   r   r   r*  r+  r,  r   r   r  )r0   r5   ro   r   r   r	  r6   r
  expected_dtypeembedding_outputencoder_outputssequence_outputr$  head_outputss                 r3   rD   zIJepaModel.forward%  s   , 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]?@@ &&y$++2O2OP	 99DDKKQQ/'??>:L??/Tl + 
 ,,/!5# ' 
 *!,..98<8OO4UY?L?XO];_n^pL/!""555)-')77&11	
 	
r4   )FFNNNNNNN)rF   rG   rH   r   rL   r#   r   r/  r   rz   r   r3  r   IJEPA_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   rJ   rK   r{   r	   r   rD   rM   rN   s   @r3   r'  r'  	  s"   

{ 
t 
]a 
0&: 0C4T#Y+? CD C ++AB&.$. 046:,0,0/337&*;
u||,;
 "%"2"23;
 ELL)	;

 $D>;
 'tn;
 #+4.;
 d^;
 
u00	1;
 C;
r4   r'  zEgyptian cata  
    IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states)
    e.g. for ImageNet.

    <Tip>

        Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                       e Zd Zdeddf fdZ ee       eee	e
e      	 	 	 	 	 	 	 ddeej                     deej                     deej                     d	ee   d
ee   dee   dee   deee	f   fd              Z xZS )IJepaForImageClassificationr1   r7   Nc                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r(  r   )r"   r#   
num_labelsr'  r~   r-   r   r'   Identity
classifierr-  r   s     r3   r#   z$IJepaForImageClassification.__init__  ss      ++%@
 OUN_N_bcNc"))F$6$68I8IJikititiv 	r4   )r5  r6  r   r8  r5   r   labelsr   r	  r6   r
  c                 n   ||n| j                   j                  }| j                  ||||||      }|d   }	| j                  |	j	                  d            }
d}||j                  |
j                        }| j                   j                  | j                  dk(  rd| j                   _        nl| j                  dkD  rL|j                  t        j                  k(  s|j                  t        j                  k(  rd| j                   _        nd| j                   _        | j                   j                  dk(  rIt               }| j                  dk(  r& ||
j                         |j                               }n ||
|      }n| j                   j                  dk(  r=t               } ||
j!                  d	| j                        |j!                  d	            }n,| j                   j                  dk(  rt#               } ||
|      }|s|
f|dd z   }||f|z   S |S t%        ||
|j&                  |j(                  
      S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N)r   r   r	  r6   r
  r   r   r   
regressionsingle_label_classificationmulti_label_classificationr]   )losslogitsr   r  )r1   r;  r~   rL  r   r   deviceproblem_typerJ  r   rJ   longrz   r   squeezer   rh   r
   r   r   r  )r0   r5   r   rM  r   r	  r6   r
  r   r@  rS  rR  loss_fctr   s                 r3   rD   z#IJepaForImageClassification.forward  s   . &1%<k$++B]B]**/!5%=#  
 "!*!5!5!!5!<=YYv}}-F{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#FF3D))-JJ+-B @&++b/R))-II,./Y,F)-)9TGf$EvE$!//))	
 	
r4   rB  )rF   rG   rH   r   r#   r   rC  r   _IMAGE_CLASS_CHECKPOINTr   rE  _IMAGE_CLASS_EXPECTED_OUTPUTr   rJ   rK   rL   r	   r  rD   rM   rN   s   @r3   rH  rH  o  s     
{ 
t 
 ++AB*)$4	 04,0)-,0/337&*A
u||,A
 ELL)A
 &	A

 $D>A
 'tnA
 #+4.A
 d^A
 
u++	,A
 CA
r4   rH  )r}   r'  rH  )r   )@collections.abcr)   typingr   r   r   r   r   r   r	   rJ   torch.nnr-   r
   r   r   activationsr   modeling_outputsr   r   r   modeling_utilsr   r   pytorch_utilsr   r   utilsr   r   r   r   r   configuration_ijepar   
get_loggerrF   r   rD  rE  Moduler   rP   r}   rK   floatr   r   r   r   r   r   r   r  r  rC  rF  IJEPA_START_DOCSTRINGr'  rY  rZ  rH  __all__r   r4   r3   <module>ri     s    D D D   A A ! b b F Q  - 
		H	% 1   $299 $NNbii Nb"/? "/X %II%<<% 
% <<	%
 U\\*% % %<; ;|bii $$RYY $N		  ")) ' 'T0
299 0
f"))  2 ( 	  e[
% [
	[
| 5 -   U
"6 U
U
p Pr4   