
    %	&hv             	          d Z ddlZddlZddlZddlmZ ddlmZm	Z	m
Z
mZ ddlZddlZddlmZmZ ddlmZmZmZ ddlmZ dd	lmZmZmZmZmZmZ dd
lmZ ddlm Z m!Z!m"Z" ddl#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ ddl,m-Z-  e'j\                  e/      Z0dZ1dZ2g dZ3dZ4dZ5e G d de             Z6dUdej                  de7de8dej                  fdZ9 G d dejt                        Z; G d dejt                        Z< G d  d!ejt                        Z= G d" d#ejt                        Z> G d$ d%e>      Z? G d& d'ejt                        Z@e>e?d(ZA G d) d*ejt                        ZB G d+ d,ejt                        ZC G d- d.ejt                        ZD G d/ d0ejt                        ZE G d1 d2ejt                        ZF G d3 d4ejt                        ZG G d5 d6e      ZHd7ZId8ZJ e%d9eI       G d: d;eH             ZK G d< d=ejt                        ZL e%d>eI       G d? d@eH             ZM e%dAeI       G dB dCeH             ZN G dD dEejt                        ZO G dF dGejt                        ZP G dH dIejt                        ZQ G dJ dKejt                        ZR G dL dMejt                        ZS e%dNeI       G dO dPeH             ZT e%dQeI       G dR dSeHe+             ZUg dTZVy)VzPyTorch BEiT model.    N)	dataclass)ListOptionalTupleUnion)Tensornn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BackboneOutputBaseModelOutputBaseModelOutputWithPoolingImageClassifierOutputMaskedLMOutputSemanticSegmenterOutput)PreTrainedModel)#compile_compatible_method_lru_cache find_pruneable_heads_and_indicesprune_linear_layer)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings	torch_int)BackboneMixin   )
BeitConfigr!   z%microsoft/beit-base-patch16-224-pt22k)r       i   zmicrosoft/beit-base-patch16-224ztabby, tabby catc                       e Zd ZdZy)BeitModelOutputWithPoolinga  
    Class for outputs of [`BeitModel`].

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
            Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
            *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
            will be returned.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    N)__name__
__module____qualname____doc__     |/var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/beit/modeling_beit.pyr$   r$   A   s    r*   r$   input	drop_probtrainingreturnc                    |dk(  s|s| S d|z
  }| j                   d   fd| j                  dz
  z  z   }|t        j                  || j                  | j
                        z   }|j                          | j                  |      |z  }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
            r    r   )r    )dtypedevice)shapendimtorchrandr2   r3   floor_div)r,   r-   r.   	keep_probr4   random_tensoroutputs          r+   	drop_pathr=   [   s     CxII[[^

Q 77E

5ELL YYMYYy!M1FMr*   c                   x     e Zd ZdZd	dee   ddf fdZdej                  dej                  fdZ	de
fdZ xZS )
BeitDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).Nr-   r/   c                 0    t         |           || _        y N)super__init__r-   )selfr-   	__class__s     r+   rC   zBeitDropPath.__init__r   s    "r*   hidden_statesc                 D    t        || j                  | j                        S rA   )r=   r-   r.   rD   rF   s     r+   forwardzBeitDropPath.forwardv   s    FFr*   c                 8    dj                  | j                        S )Nzp={})formatr-   rD   s    r+   
extra_reprzBeitDropPath.extra_repry   s    }}T^^,,r*   rA   )r%   r&   r'   r(   r   floatrC   r6   r   rI   strrM   __classcell__rE   s   @r+   r?   r?   o   sG    b#(5/ #T #GU\\ Gell G-C -r*   r?   c            	            e Zd ZdZdeddf fdZdej                  dededej                  fd	Z		 	 dd
ej                  de
ej                     de
e   dej                  fdZ xZS )BeitEmbeddingszc
    Construct the CLS token, position and patch embeddings. Optionally, also the mask token.

    configr/   Nc                 2   t         |           t        j                  t	        j
                  dd|j                              | _        |j                  r:t        j                  t	        j
                  dd|j                              | _	        nd | _	        t        |      | _        |j                  | _        t        |j                  t        j                   j"                        r|j                  n|j                  |j                  f| _        | j                  j$                  }|j&                  r=t        j                  t	        j
                  d|dz   |j                              | _        nd | _        t        j*                  |j,                        | _        y )Nr    )rB   rC   r	   	Parameterr6   zeroshidden_size	cls_tokenuse_mask_token
mask_tokenBeitPatchEmbeddingspatch_embeddings
patch_size
isinstance
image_sizecollectionsabcIterablenum_patches use_absolute_position_embeddingsposition_embeddingsDropouthidden_dropout_probdropout)rD   rT   rd   rE   s      r+   rC   zBeitEmbeddings.__init__   s$   ekk!Q8J8J&KL   ll5;;q!V=O=O+PQDO"DO 3F ; ++ &++[__-E-EF ##V%6%67 	
 ++7722')||EKK;QR?TZTfTf4g'hD$'+D$zz&"<"<=r*   
embeddingsheightwidthc                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  z  }	|| j
                  z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r    Ng      ?r   r      bicubicFsizemodealign_cornersdim)r4   rf   r6   jit
is_tracingr^   r   reshapepermuter	   
functionalinterpolateviewcat)rD   rj   rk   rl   rd   num_positionsclass_pos_embedpatch_pos_embedrv   
new_height	new_widthsqrt_num_positionss               r+   interpolate_pos_encodingz'BeitEmbeddings.interpolate_pos_encoding   s`    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr*   pixel_valuesbool_masked_posr   c                 8   | j                   |t        j                  d       |j                  \  }}}}| j	                  |      \  }\  }}	|j                         \  }
}}|K| j                  j                  |
|d      }|j                  d      j                  |      }|d|z
  z  ||z  z   }| j                  j                  |
dd      }t        j                  ||fd      }| j                   || j                  |||      z   }| j                  |      }|||	ffS )Nz`interpolate_pos_encoding` argument has no effect for BEiTEmbeddings, embeddings are always interpolated to the input image size. The argument will be removed in transformers v4.51.0.rn   r    ru   )rf   warningswarnr4   r]   rr   r[   expand	unsqueezetype_asrY   r6   r~   r   ri   )rD   r   r   r   _rk   rl   rj   patch_heightpatch_width
batch_sizeseq_lenmask_tokensw
cls_tokenss                  r+   rI   zBeitEmbeddings.forward   s-    ##/4L4XMMn
 +001fe262G2G2U/
/\;!+!2
GQ&//00WbIK))"-55kBA#q1u-a?J^^**:r2>
YY
J7Q?
##/#d&C&CJPVX]&^^J\\*-
L+666r*   NN)r%   r&   r'   r(   r!   rC   r6   r   intr   r   
BoolTensorboolrI   rP   rQ   s   @r+   rS   rS      s    
>z >d >.&D5<< &D &DUX &D]b]i]i &DV 7;37	7ll7 "%"2"237 #+4.	7
 
7r*   rS   c                   Z     e Zd ZdZ fdZdej                  dej                  fdZ xZS )r\   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    c                    t         |           |j                  |j                  }}|j                  |j
                  }}t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|d   |d   z  |d   |d   z  z  }|d   |d   z  |d   |d   z  f}|| _        || _        || _        || _
        || _        t        j                  ||||      | _        y )Nr    r   kernel_sizestride)rB   rC   r`   r^   num_channelsrX   r_   ra   rb   rc   rd   patch_shaper	   Conv2d
projection)	rD   rT   r`   r^   r   rX   rd   r   rE   s	           r+   rC   zBeitPatchEmbeddings.__init__   s   !'!2!2F4E4EJ
$*$7$79K9Kk#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
!!}
15*Q-:VW=:XY!!}
15z!}
ST7UV$$(&&))L+:^hir*   r   r/   c                    |j                   \  }}}}|| j                  k7  rt        d      | j                  |      }|j                   d   |j                   d   }}|j	                  d      j                  dd      }|||ffS )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.ro   r   r    )r4   r   
ValueErrorr   flatten	transpose)	rD   r   r   r   rk   rl   rj   r   r   s	            r+   rI   zBeitPatchEmbeddings.forward   s    2>2D2D/
L&%4,,,w  __\2
$.$4$4Q$79I9I!9Lk''*44Q:
L+666r*   )	r%   r&   r'   r(   rC   r6   r   rI   rP   rQ   s   @r+   r\   r\      s)    j"7ELL 7U\\ 7r*   r\   c                       e Zd Zddedee   ddf fdZd Z	 	 	 	 	 ddej                  deej                     d	e
d
eej                     de
deee      deeej                     eej                  ej                  f   f   fdZ xZS )BeitSelfAttentionNrT   window_sizer/   c                 <   t         |           || _        |j                  |j                  z  dk7  r2t        |d      s&t        d|j                   d|j                   d      |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        t        j                  |j                  | j                        | _        t        j                  |j                  | j                  d      | _        t        j                  |j                  | j                        | _        t        j                  |j                         | _        t%        |      | _        | j&                  rt)        ||      | _        y y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .F)biasr   )rB   rC   rT   rX   num_attention_headshasattrr   r   attention_head_sizeall_head_sizer	   Linearquerykeyvaluerg   attention_probs_dropout_probri   r   has_relative_position_biasBeitRelativePositionBiasrelative_position_biasrD   rT   r   rE   s      r+   rC   zBeitSelfAttention.__init__  sP    : ::a?PVXhHi"6#5#5"6 7334A7 
 $*#=#= #&v'9'9F<V<V'V#W !558P8PPYYv1143E3EF
99V//1C1C%PYYv1143E3EF
zz&"E"EF*.{*;'***B6Wb*cD' +r*   c                     |j                         d d | j                  | j                  fz   } |j                  | }|j	                  dddd      S )Nrn   r   ro   r    r   )rr   r   r   r}   rz   )rD   xnew_x_shapes      r+   transpose_for_scoresz&BeitSelfAttention.transpose_for_scores#  sN    ffhsmt'?'?AYAY&ZZAFFK yyAq!$$r*   rF   	head_maskoutput_attentionsr   r   
resolutionc                    | j                  |      }| j                  | j                  |            }| j                  | j                  |            }	| j                  |      }
t	        j
                  |
|j                  dd            }|t        j                  | j                        z  }| j                  r[|\  }}|| j                  j                  z  || j                  j                  z  f}|| j                  |||j                  d         z   }|||z   }t        j                   j#                  |d      }| j%                  |      }|||z  }t	        j
                  ||	      }|j'                  dddd      j)                         }|j+                         d d | j,                  fz   } |j.                  | }|r||f}|S |f}|S )	Nrn   r    dim_sizeru   r   ro   r   )r   r   r   r   r6   matmulr   mathsqrtr   r   rT   r^   r   r4   r	   r{   softmaxri   rz   
contiguousrr   r   r}   )rD   rF   r   r   r   r   r   mixed_query_layer	key_layervalue_layerquery_layerattention_scoresrk   rl   r   attention_probscontext_layernew_context_layer_shapeoutputss                      r+   rI   zBeitSelfAttention.forward(  s    !JJ}5--dhh}.EF	//

=0IJ//0AB !<<Y5H5HR5PQ+dii8P8P.QQ **&MFE!T[[%;%;;UdkkF\F\=\]K/$2M2M5@S@STU@V 3N 3  
 "-/2HH --//0@b/I ,,7  -	9O_kB%--aAq9DDF"/"4"4"6s";t?Q?Q>S"S***,CD6G=/2 O\M]r*   rA   NFNFN)r%   r&   r'   r!   r   tuplerC   r   r6   r   r   r   r   r   rI   rP   rQ   s   @r+   r   r     s    dz d dSW d.% -1"'9=).+/3||3 ELL)3  	3
 !) 63 #'3 U3Z(3 
uU\\"E%,,*D$EE	F3r*   r   c                        e Zd Z	 	 	 	 	 d	dej                  deej                     dedeej                     dedeee      de	eej                     eej                  ej                  f   f   f fdZ
 xZS )
BeitSdpaSelfAttentionrF   r   r   r   r   r   r/   c           	         |s|*t         j                  d       t        |   ||||||      S | j	                  |      }| j                  | j                  |            }| j                  | j                  |            }	| j                  |      }
d }| j                  rX|\  }}|| j                  j                  z  || j                  j                  z  f}| j                  |||j                  d         }|
||}n||z  }dt        j                  | j                        z  }t         j"                  j$                  j'                  |
||	|| j(                  r| j                  j*                  ndd|      }|j-                  dd	dd
      j/                         }|j1                         d d | j2                  fz   } |j4                  | }|d fS )Na  `BeitSdpaSelfAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.)rF   r   r   r   r   r   r    r   r1   F)	attn_mask	dropout_p	is_causalscaler   ro   r   r   )loggerwarning_oncerB   rI   r   r   r   r   r   rT   r^   r   r4   r   r   r   r6   r	   r{   scaled_dot_product_attentionr.   r   rz   r   rr   r   r}   )rD   rF   r   r   r   r   r   r   r   r   r   	attn_biasrk   rl   r   scalingr   r   rE   s                     r+   rI   zBeitSdpaSelfAttention.forward_  s    	 5w 7?+#"3'=)A% #   !JJ}5--dhh}.EF	//

=0IJ//0AB	**&MFE!T[[%;%;;UdkkF\F\=\]K335@S@STU@V 4 I
 "- 2	33	dii 8 899++HHBF--dkk>>UX I 
 &--aAq9DDF"/"4"4"6s";t?Q?Q>S"S***,CDd""r*   r   )r%   r&   r'   r6   r   r   r   r   r   r   rI   rP   rQ   s   @r+   r   r   ^  s     -1"'9=).+/:#||:# ELL):#  	:#
 !) 6:# #':# U3Z(:# 
uU\\"E%,,*D$EE	F:# :#r*   r   c                   ~     e Zd ZdZdeddf fdZd	dej                  dej                  dej                  fdZ xZ	S )
BeitSelfOutputz
    The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    rT   r/   Nc                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y rA   )	rB   rC   r	   r   rX   denserg   rh   ri   rD   rT   rE   s     r+   rC   zBeitSelfOutput.__init__  sB    YYv1163E3EF
zz&"<"<=r*   rF   input_tensorc                 J    | j                  |      }| j                  |      }|S rA   r   ri   )rD   rF   r   gammas       r+   rI   zBeitSelfOutput.forward  $    

=1]3r*   rA   )
r%   r&   r'   r(   r!   rC   r6   r   rI   rP   rQ   s   @r+   r   r     sD    
>z >d >
U\\  ^c^j^j r*   r   )eagersdpac                       e Zd Zddedee   ddf fdZd Z	 	 	 	 	 ddej                  deej                     d	e
d
eej                     de
deee      deeej                     eej                  ej                  f   f   fdZ xZS )BeitAttentionNrT   r   r/   c                     t         |           t        |j                     ||      | _        t        |      | _        t               | _        y )Nr   )	rB   rC   BEIT_SELF_ATTENTION_CLASSES_attn_implementation	attentionr   r<   setpruned_headsr   s      r+   rC   zBeitAttention.__init__  s?    4V5P5PQRXfqr$V,Er*   c                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r    ru   )lenr   r   r   r   r   r   r   r   r   r<   r   r   union)rD   headsindexs      r+   prune_headszBeitAttention.prune_heads  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r*   rF   r   r   r   r   r   c                 n    | j                  ||||||      }| j                  |d   |      }|f|dd  z   }	|	S )Nr   r    )r   r<   )
rD   rF   r   r   r   r   r   self_outputsattention_outputr   s
             r+   rI   zBeitAttention.forward  sS     ~~9&79OQiku
  ;;|AF#%QR(88r*   rA   r   )r%   r&   r'   r!   r   r   rC   r   r6   r   r   r   r   r   rI   rP   rQ   s   @r+   r   r     s    "z " "SW ";* -1"'9=).+/|| ELL)  	
 !) 6 #' U3Z( 
uU\\"E%,,*D$EE	Fr*   r   c                   `     e Zd Zdeddf fdZdej                  dej                  fdZ xZS )BeitIntermediaterT   r/   Nc                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y rA   )rB   rC   r	   r   rX   intermediate_sizer   r_   
hidden_actrO   r   intermediate_act_fnr   s     r+   rC   zBeitIntermediate.__init__  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r*   rF   c                 J    | j                  |      }| j                  |      }|S rA   )r   r  rH   s     r+   rI   zBeitIntermediate.forward  s&    

=100?r*   	r%   r&   r'   r!   rC   r6   r   rI   rP   rQ   s   @r+   r  r    s1    9z 9d 9U\\ ell r*   r  c                   `     e Zd Zdeddf fdZdej                  dej                  fdZ xZS )
BeitOutputrT   r/   Nc                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y rA   )
rB   rC   r	   r   r  rX   r   rg   rh   ri   r   s     r+   rC   zBeitOutput.__init__  sB    YYv779K9KL
zz&"<"<=r*   rF   c                 J    | j                  |      }| j                  |      }|S rA   r   rH   s     r+   rI   zBeitOutput.forward  r   r*   r  rQ   s   @r+   r	  r	    s1    >z >d >
U\\ ell r*   r	  c                       e Zd ZdZddedee   deddf fdZ	 	 	 	 	 dde	j                  d	ee	j                     d
edee	j                     dedeee      deee	j                     ee	j                  e	j                  f   f   fdZ xZS )	BeitLayerz?This corresponds to the Block class in the timm implementation.NrT   r   drop_path_rater/   c                    t         |           |j                  | _        d| _        t	        ||      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        |dkD  rt        |      nt        j                          | _        t        j                  |j                  |j                        | _        |j&                  }|dkD  ryt        j(                  |t+        j,                  |j                        z  d      | _        t        j(                  |t+        j,                  |j                        z  d      | _        y d\  | _        | _        y )	Nr    r   epsr1   r   T)requires_gradr   )rB   rC   chunk_size_feed_forwardseq_len_dimr   r   r  intermediater	  r<   r	   	LayerNormrX   layer_norm_epslayernorm_beforer?   Identityr=   layernorm_afterlayer_scale_init_valuerV   r6   oneslambda_1lambda_2)rD   rT   r   r  init_valuesrE   s        r+   rC   zBeitLayer.__init__   s   '-'E'E$&v;G,V4 ( "V-?-?VEZEZ [9G#9Mn5SUS^S^S`!||F,>,>FDYDYZ33?LLuzz6CUCU7W)WgklDMLLuzz6CUCU7W)WgklDM+5(DM4=r*   rF   r   r   r   r   r   c                    | j                  | j                  |      |||||      }|d   }|dd  }	| j                  | j                  |z  }| j                  |      |z   }| j	                  |      }
| j                  |
      }
| j                  |
      }
| j                  | j                  |
z  }
| j                  |
      |z   }
|
f|	z   }	|	S )N)r   r   r   r   r   r    )r   r  r  r=   r  r  r<   r  )rD   rF   r   r   r   r   r   self_attention_outputsr   r   layer_outputs              r+   rI   zBeitLayer.forward  s     "&!!-0/#9%=! "0 "
 2!4(, ==$#}}/?? '78=H ++M:((6{{<0==$==<7L ~~l3mC/G+r*   )Nr1   r   )r%   r&   r'   r(   r!   r   r   rN   rC   r6   r   r   r   r   r   rI   rP   rQ   s   @r+   r  r    s    I6z 6 6`e 6pt 6* -1"'9=).+/)||) ELL))  	)
 !) 6) #') U3Z() 
uU\\"E%,,*D$EE	F)r*   r  c                        e Zd Zdededdf fdZ ed      deeef   de	j                  fd       Zdd	ede	j                  fd
Z xZS )r   rT   r   r/   Nc                     t         |           || _        d|d   z  dz
  d|d   z  dz
  z  dz   | _        t	        j
                  t        j                  | j                  |j                              | _	        y )Nro   r   r    r   )
rB   rC   r   num_relative_distancer	   rV   r6   rW   r   relative_position_bias_tabler   s      r+   rC   z!BeitRelativePositionBias.__init__?  sr    &&'+a.&81&<[QR^ASVWAW%X[\%\",.LLKK22F4N4NO-
)r*   
   )maxsizec                    d|d   z  dz
  d|d   z  dz
  z  dz   }|d   |d   z  }t        j                  t        j                  |d         t        j                  |d         d      }t        j                  |      }t        j                  |d      }|dddddf   |dddddf   z
  }|j                  ddd      j                         }|dddddfxx   |d   dz
  z  cc<   |dddddfxx   |d   dz
  z  cc<   |dddddfxx   d|d   z  dz
  z  cc<   t        j                  |dz   fdz  |j                        }|j                  d	      |ddddf<   |dz
  |dddf<   |dz
  |dddf<   |dz
  |d
<   |S )z
        This method creates the relative position index, modified to support arbitrary window sizes,
        as introduced in [MiDaS v3.1](https://arxiv.org/abs/2307.14460).
        ro   r   r    r   ij)indexingN)rr   r2   rn   )r   r   )
r6   meshgridarangestackr   rz   r   rW   r2   sum)	rD   r   r%  window_areagridcoordscoords_flattenrelative_coordsrelative_position_indexs	            r+    generate_relative_position_indexz9BeitRelativePositionBias.generate_relative_position_indexH  s    "#[^!3a!7AA<NQR<R SVW W "!n{1~5~~ell;q>:ELLUV<XcghT"vq1(At4~aqj7QQ)11!Q:EEG1a KNQ$66 1a KNQ$66 1a AA$6$:: "'++K!O3E3IQ`QfQf"g*9*=*=b*AAB')>)B12&)>)BA&(=(A%&&r*   r   c                    d| j                   d   z  dz
  }d| j                   d   z  dz
  }d|d   z  dz
  }d|d   z  dz
  }| j                  }| j                  }	||z  dz   }
|d|	dz
   }|j                  d||d      j	                  dddd      }t
        j                  j                  |t        |      t        |      fd      }|j	                  dddd      j                  |
dz
  d      }t        j                  |||	dz
  d g      }| j                  |      }||j                  d         }|j                  |d   |d   z  dz   |d   |d   z  dz   d      }|j	                  ddd      j                         }|rCt
        j                  j                  |j                  d      ||fdd	
      j                  d      }|j                  d      S )zu
        Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
        ro   r   r    r   Nrn   bilinear)rr   rs   Frq   )r   r&  r%  ry   rz   r	   r{   r|   r   r6   r~   r6  r}   r   r   squeeze)rD   r   r   r   
old_height	old_widthr   r    old_relative_position_bias_tableold_num_relative_distancenew_num_relative_distanceold_sub_tablenew_sub_table new_relative_position_bias_tabler5  r   s                   r+   rI   z BeitRelativePositionBias.forwarda  s-    ))!,,q0
((++a/	Q'!+
A&*	+/+L+L($($>$>!$.$:Q$>!89X;TWX;XY%--aJKSSTUWXZ[]^_11:!6	)8L MT^ 2 
 &--aAq9AAB[^_B_acd+099<=VYZ=Z=\]^,
( #'"G"G"T!ABYB^B^_aBb!c "8!<!<N[^+a/Q+a.1PST1TVX"
 "8!?!?1a!H!S!S!U#%']]%>%>&003)#	 &? &
 gaj # &//22r*   )FN)r%   r&   r'   r!   r   rC   r   r   r   r6   r   r6  r   rI   rP   rQ   s   @r+   r   r   >  sm    
z 
 
$ 
 )4'E#s(O 'PUP\P\ ' 5'0-3T -3]b]i]i -3r*   r   c                        e Zd Zddedee   ddf fdZ	 	 	 	 	 	 ddej                  deej                     de	d	e	d
e	dee
eef      de	deeef   fdZ xZS )BeitEncoderNrT   r   r/   c                    t         |           || _        |j                  | _        | j                  rt        ||      | _        t        j                  d|j                  |j                        D cg c]  }|j                          }}t        j                  t        |j                        D cg c]!  }t        ||j                   r|nd ||         # c}      | _        d| _        y c c}w c c}w )Nr   r   )r   r  F)rB   rC   rT   !use_shared_relative_position_biasr   r   r   r6   linspacer  num_hidden_layersitemr	   
ModuleListranger  use_relative_position_biaslayergradient_checkpointing)rD   rT   r   r   dprirE   s         r+   rC   zBeitEncoder.__init__  s    *0*R*R'***B6Wb*cD' "'63H3H&JbJb!cdAqvvxdd]] v778  /5/P/PVZ#&q6	

 ',# es   3C,2&C1rF   r   r   output_hidden_statesr   r   return_dictc           
      N   |rdnd }|rdnd }	t        | j                        D ]  \  }
}|r||fz   }| j                  rY|\  }}|| j                  j                  z  || j                  j                  z  f}| j                  |||j                  d         }nd }|||
   nd }| j                  r.| j                  r"| j                  |j                  ||||||      }n |||||||      }|d   }|s|	|d   fz   }	 |r||fz   }|st        d |||	fD              S t        |||	      S )Nr)   r    )r   r   r   c              3   &   K   | ]	  }||  y wrA   r)   ).0vs     r+   	<genexpr>z&BeitEncoder.forward.<locals>.<genexpr>  s     mq_`_lms   )last_hidden_staterF   
attentions)	enumeraterL  r   rT   r^   r   r4   rM  r.   _gradient_checkpointing_func__call__r   r   )rD   rF   r   r   rP  r   r   rQ  all_hidden_statesall_self_attentionsrO  layer_modulerk   rl   r   r   layer_head_masklayer_outputss                     r+   rI   zBeitEncoder.forward  s    #7BD$5b4(4 &	POA|#$58H$H!.. *%)?)??$++J`J`A`a)-)D)D:R]j]p]pqr]s *E *& *.&.7.CilO**t}} $ A A ))!#%*,! !-!#%*,! *!,M &9]1=M<O&O#M&	PP   1]4D Dm]4EGZ$[mmm++*
 	
r*   rA   )NFFFNT)r%   r&   r'   r!   r   r   rC   r6   r   r   r   r   r   r   rI   rP   rQ   s   @r+   rC  rC    s    ,z , ,SW ,0 -1"'%*).04 >
||>
 ELL)>
  	>

 #>
 #'>
 U38_->
 >
 
uo%	&>
r*   rC  c                   6    e Zd ZdZeZdZdZdZdgZ	dgZ
dZd Zy)	BeitPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    beitr   Tr  z.*relative_position_index.*c                    t        |t        j                  t        j                  t        j                  f      rm|j
                  j                  j                  d| j                  j                         |j                  %|j                  j                  j                          yyt        |t        j                        rz|j
                  j                  j                  d| j                  j                         |j                  2|j
                  j                  |j                     j                          yyt        |t        j                        rJ|j                  j                  j                          |j
                  j                  j                  d       yt        |t               r|j"                  j                  j                          |j$                  $|j$                  j                  j                          |j&                  %|j&                  j                  j                          yyt        |t(              r%|j*                  j                  j                          yt        |t,              r|j.                  s|j.                  j                  j                  | j                  j0                         |j2                  j                  j                  | j                  j0                         yyy)zInitialize the weightsr1   )meanstdNg      ?)r_   r	   r   r   ConvTranspose2dweightdatanormal_rT   initializer_ranger   zero_	Embeddingpadding_idxr  fill_rS   rY   r[   rf   r   r&  r  r  r  r  )rD   modules     r+   _init_weightsz!BeitPreTrainedModel._init_weights  s   fryy"))R5G5GHI MM&&CT[[5R5R&S{{&  &&( '-MM&&CT[[5R5R&S!!-""6#5#56<<> .-KK""$MM$$S)/!!'')  ,!!&&,,.))5**//557 6 89//44::<	**$$**4;;+M+MN$$**4;;+M+MN + +r*   N)r%   r&   r'   r(   r!   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules"_keys_to_ignore_on_load_unexpected_supports_sdparq  r)   r*   r+   rb  rb    s;    
 L$O&*#$*H)I&NOr*   rb  aF  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
            [`BeitImageProcessor.__call__`] for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
            Whether to interpolate the pre-trained position encodings.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z^The bare Beit Model transformer outputting raw hidden-states without any specific head on top.c                   
    e Zd Zddededdf fdZd Zd Z ee	       e
eeede	      	 	 	 	 	 	 dd
ej                   deej$                     deej                      dee   dee   dedee   deeef   fd              Z xZS )	BeitModelrT   add_pooling_layerr/   Nc                    t         |   |       || _        t        |      | _        t        || j                  j                  j                        | _        |j                  rt        j                         n*t        j                  |j                  |j                        | _        |rt!        |      nd | _        | j%                          y )Nr   r  )rB   rC   rT   rS   rj   rC  r]   r   encoderuse_mean_poolingr	   r  r  rX   r  	layernorm
BeitPoolerpooler	post_init)rD   rT   r{  rE   s      r+   rC   zBeitModel.__init__<  s     (0"6t7W7W7c7cd $44BKKM",,vGYGY_e_t_t:u 	 ->j(4 	r*   c                 .    | j                   j                  S rA   rj   r]   rL   s    r+   get_input_embeddingszBeitModel.get_input_embeddingsK      ///r*   c                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr}  rL  r   r   )rD   heads_to_prunerL  r   s       r+   _prune_headszBeitModel._prune_headsN  sE    
 +002 	CLE5LLu%//;;EB	Cr*   vision)
checkpointoutput_typerr  modalityexpected_outputr   r   r   r   rP  r   rQ  c           	      :   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j	                  || j                   j
                        }| j                  ||      \  }}	|j                  dd }
| j                  |||||
||      }|d   }| j                  |      }| j                  | j                  |      nd}|s|||fn|f}||dd z   S t        |||j                  |j                        S )z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        N)r   ro   )r   r   rP  r   rQ  r   r   r    )rW  pooler_outputrF   rX  )rT   r   rP  use_return_dictget_head_maskrG  rj   r4   r}  r  r  r$   rF   rX  )rD   r   r   r   r   rP  r   rQ  embedding_outputr   r   encoder_outputssequence_outputpooled_outputhead_outputss                  r+   rI   zBeitModel.forwardV  sE   , 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] &&y$++2O2OP	"oolOo\!!''+
,,/!5!#%= ' 
 *!,..98<8OO4UY?L?XO];_n^pL/!""555)-')77&11	
 	
r*   )T)NNNNFN)r%   r&   r'   r!   r   rC   r  r  r   BEIT_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr$   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr6   r   r   r   r   r   rI   rP   rQ   s   @r+   rz  rz  7  s    
z d d 0C ++@A&.$. 7;,0,0/3).&*4
ll4
 "%"2"234
 ELL)	4

 $D>4
 'tn4
 #'4
 d^4
 
u00	14
 B4
r*   rz  c                   `     e Zd Zdeddf fdZdej                  dej                  fdZ xZS )r  rT   r/   Nc                     t         |           |j                  r1t        j                  |j
                  |j                        | _        y d | _        y )Nr  )rB   rC   r~  r	   r  rX   r  r  r   s     r+   rC   zBeitPooler.__init__  sA    KQKbKbBLL++1F1FG 	hl 	r*   rF   c                     | j                   0|d d dd d d f   }| j                  |j                  d            }|S |d d df   }|S )Nr    r   )r  re  )rD   rF   patch_tokensr  s       r+   rI   zBeitPooler.forward  sU    >>%(AB2L NN<+<+<Q+?@M
  *!Q$/Mr*   r  rQ   s   @r+   r  r    s1    
z 
d 
	U\\ 	ell 	r*   r  a  Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting
    visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT
    predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you
    will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.c                       e Zd Zdeddf fdZ ee       eee	      	 	 	 	 	 	 	 	 dde
ej                     de
ej                     de
ej                     d	e
ej                     d
e
e   de
e   dede
e   deeef   fd              Z xZS )BeitForMaskedImageModelingrT   r/   Nc                 H   t         |   |       |j                  | _        t        |d      | _        t        j                  |j                  |j                        | _	        t        j                  |j                  |j                        | _        | j                          y )NFr{  r  )rB   rC   
num_labelsrz  rc  r	   r  rX   r  r  r   
vocab_sizelm_headr  r   s     r+   rC   z#BeitForMaskedImageModeling.__init__  su      ++f>	 f&8&8f>S>STyy!3!3V5F5FG 	r*   r  rr  r   r   r   labelsr   rP  r   rQ  c	           	      j   ||n| j                   j                  }| j                  |||||||      }	|	d   }
| j                  |
      }
| j	                  |
ddddf         }d}|t               } |||   |      }|s|f|	dd z   }||f|z   S |S t        |||	j                  |	j                        S )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
        >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, logits = outputs.loss, outputs.logits
        >>> list(logits.shape)
        [1, 196, 8192]
        ```N)r   r   r   rP  r   rQ  r   r    losslogitsrF   rX  )	rT   r  rc  r  r  r   r   rF   rX  )rD   r   r   r   r  r   rP  r   rQ  r   r  prediction_scoresmasked_lm_lossloss_fctr<   s                  r+   rI   z"BeitForMaskedImageModeling.forward  s    ` &1%<k$++B]B]))+/!5%=#  
 "!*..9 LLAB)?@')H%&7&H&QN')GABK7F3A3M^%.YSYY$!//))	
 	
r*   )NNNNNNFN)r%   r&   r'   r!   rC   r   r  r   r   r  r   r6   r   r   r   r   r   rI   rP   rQ   s   @r+   r  r    s    z d  ++@A>X 046:,0)-,0/3).&*L
u||,L
 "%"2"23L
 ELL)	L

 &L
 $D>L
 'tnL
 #'L
 d^L
 
un$	%L
 Y BL
r*   r  z
    Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
    hidden states of the patch tokens) e.g. for ImageNet.
    c                        e Zd Zdeddf fdZ ee       eee	e
e      	 	 	 	 	 	 	 ddeej                     deej                     deej                     d	ee   d
ee   dedee   deee	f   fd              Z xZS )BeitForImageClassificationrT   r/   Nc                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NTr  r   )rB   rC   r  rz  rc  r	   r   rX   r  
classifierr  r   s     r+   rC   z#BeitForImageClassification.__init__  ss      ++f=	 OUN_N_bcNc"))F$6$68I8IJikititiv 	r*   )r  r  rr  r  r   r   r  r   rP  r   rQ  c                 4   ||n| j                   j                  }| j                  ||||||      }|r|j                  n|d   }	| j	                  |	      }
d}|| j                   j
                  | j                  dk(  rd| j                   _        nl| j                  dkD  rL|j                  t        j                  k(  s|j                  t        j                  k(  rd| j                   _        nd| j                   _        | j                   j
                  dk(  rIt               }| j                  dk(  r& ||
j                         |j                               }n ||
|      }n| j                   j
                  dk(  r=t               } ||
j                  d| j                        |j                  d            }n,| j                   j
                  dk(  rt               } ||
|      }|s|
f|dd z   }||f|z   S |S t!        ||
|j"                  |j$                  	      S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr   r   rP  r   rQ  r    
regressionsingle_label_classificationmulti_label_classificationrn   ro   r  )rT   r  rc  r  r  problem_typer  r2   r6   longr   r   r9  r   r}   r
   r   rF   rX  )rD   r   r   r  r   rP  r   rQ  r   r  r  r  r  r<   s                 r+   rI   z"BeitForImageClassification.forward"  s   . &1%<k$++B]B]))/!5%=#  
 2=--'!*/{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#FF3D))-JJ+-B @&++b/R))-II,./Y,F)-)9TGf$EvE$!//))	
 	
r*   NNNNNFN)r%   r&   r'   r!   rC   r   r  r   _IMAGE_CLASS_CHECKPOINTr   r  _IMAGE_CLASS_EXPECTED_OUTPUTr   r6   r   r   r   r   rI   rP   rQ   s   @r+   r  r    s    
z 
d 
 ++@A*)$4	 04,0)-,0/3).&*=
u||,=
 ELL)=
 &	=

 $D>=
 'tn=
 #'=
 d^=
 
u++	,=
 B=
r*   r  c                        e Zd ZdZ	 	 	 ddededeeeeef   f   deeeeef   ef   dedeeeeef   f   dd	f fd
Z	de
j                  de
j                  fdZ xZS )BeitConvModuleaD  
    A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
    layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).

    Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
    in_channelsout_channelsr   paddingr   dilationr/   Nc                     t         |           t        j                  ||||||      | _        t        j
                  |      | _        t        j                         | _        y )N)r  r  r   r  r   r  )	rB   rC   r	   r   convBatchNorm2dbnReLU
activation)rD   r  r  r   r  r   r  rE   s          r+   rC   zBeitConvModule.__init__q  sQ     	II#%#
	 ...'')r*   r,   c                 l    | j                  |      }| j                  |      }| j                  |      }|S rA   )r  r  r  )rD   r,   r<   s      r+   rI   zBeitConvModule.forward  s0    5!(r*   )r   Fr    )r%   r&   r'   r(   r   r   r   rO   r   rC   r6   r   rI   rP   rQ   s   @r+   r  r  i  s     5601$$ $ 3c3h/0	$
 sE#s(OS01$ $ U38_,-$ 
$*U\\ ell r*   r  c                   h     e Zd Zdedededdf fdZdej                  dej                  fdZ xZS )	BeitPyramidPoolingBlock
pool_scaler  channelsr/   Nc                     t         |           t        j                  |      t	        ||d      g| _        t        | j
                        D ]   \  }}| j                  t        |      |       " y )Nr    r   )	rB   rC   r	   AdaptiveAvgPool2dr  layersrY  
add_modulerO   )rD   r  r  r  rO  rL  rE   s         r+   rC   z BeitPyramidPoolingBlock.__init__  sa      ,;a@
 "$++. 	+HAuOOCFE*	+r*   r,   c                 <    |}| j                   D ]
  } ||      } |S rA   )r  )rD   r,   hidden_staterL  s       r+   rI   zBeitPyramidPoolingBlock.forward  s*    [[ 	/E .L	/r*   )	r%   r&   r'   r   rC   r6   r   rI   rP   rQ   s   @r+   r  r    s?    +3 +S +C +D +U\\ ell r*   r  c            
            e Zd ZdZdeedf   dedededdf
 fd	Zd
ej                  de
ej                     fdZ xZS )BeitPyramidPoolingModulea  
    Pyramid Pooling Module (PPM) used in PSPNet.

    Args:
        pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
            Module.
        in_channels (int): Input channels.
        channels (int): Channels after modules, before conv_seg.
        align_corners (bool): align_corners argument of F.interpolate.

    Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
    pool_scales.r  r  rt   r/   Nc                    t         |           || _        || _        || _        || _        g | _        t        |      D ]I  \  }}t        |||      }| j                  j                  |       | j                  t        |      |       K y )N)r  r  r  )rB   rC   r  rt   r  r  blocksrY  r  appendr  rO   )	rD   r  r  r  rt   rO  r  blockrE   s	           r+   rC   z!BeitPyramidPoolingModule.__init__  s    &*& &{3 	+MAz+z{emnEKKu%OOCFE*	+r*   r   c                     g }| j                   D ]Y  } ||      }t        j                  j                  ||j	                         dd  d| j
                        }|j                  |       [ |S )Nro   r8  rq   )r  r	   r{   r|   rr   rt   r  )rD   r   ppm_outsppmppm_outupsampled_ppm_outs         r+   rI   z BeitPyramidPoolingModule.forward  sn    ;; 	/C!fG " 9 9affhqrl4K]K] !: ! OO-.	/ r*   )r%   r&   r'   r(   r   r   r   rC   r6   r   r   rI   rP   rQ   s   @r+   r  r    s[    
+E#s(O 
+# 
+QT 
+ei 
+nr 
+ $u||*< r*   r  c                   j     e Zd ZdZdeddf fdZd Zdej                  dej                  fdZ	 xZ
S )	BeitUperHeadz
    Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
    [UPerNet](https://arxiv.org/abs/1807.10221).

    Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
    rT   r/   Nc                    t         |           |j                  | _        |j                  gdz  | _        |j                  | _        d| _        t        j                  | j
                  |j                  d      | _
        t        | j                  | j                  d   | j
                  | j                        | _        t        | j                  d   t        | j                        | j
                  z  z   | j
                  dd      | _        t        j                          | _        t        j                          | _        | j                  d d D ]s  }t        || j
                  d      }t        | j
                  | j
                  dd      }| j"                  j'                  |       | j$                  j'                  |       u t        t        | j                        | j
                  z  | j
                  dd      | _        y )	N   Fr    r  rn   )rt   r   r   r  )rB   rC   r  rX   r  r  rt   r	   r   r  r  r  psp_modulesr  r   
bottleneckrI  lateral_convs	fpn_convsr  fpn_bottleneck)rD   rT   r  l_convfpn_convrE   s        r+   rC   zBeitUperHead.__init__  s   !--"../!3**"))DMM63D3DRST 4R MM,,	
 )R 3t'7'7#84==#HHMM	
  ]]_++CR0 	,K#KANF%dmmT]]PQ[\]H%%f-NN!!(+		, -  !DMM1MM	
r*   c                     |d   }|g}|j                  | j                  |             t        j                  |d      }| j	                  |      }|S )Nrn   r    ru   )extendr  r6   r~   r  )rD   inputsr   psp_outsr<   s        r+   psp_forwardzBeitUperHead.psp_forward  sL    2J3((+,99X1-*r*   encoder_hidden_statesc                 P   t        | j                        D cg c]  \  }} |||          }}}|j                  | j                  |             t	        |      }t        |dz
  dd      D ]V  }||dz
     j                  dd  }||dz
     t        j                  j                  ||   |d| j                        z   ||dz
  <   X t        |dz
        D cg c]  } | j                  |   ||          }}|j                  |d          t        |dz
  dd      D ]E  }t        j                  j                  ||   |d   j                  dd  d| j                        ||<   G t        j                  |d      }| j                  |      }| j                  |      }|S c c}}w c c}w )Nr    r   rn   ro   r8  rq   ru   )rY  r  r  r  r   rJ  r4   r	   r{   r|   rt   r  r6   r~   r  r  )	rD   r  rO  lateral_convlateralsused_backbone_levels
prev_shapefpn_outsr<   s	            r+   rI   zBeitUperHead.forward  s   R[\`\n\nRopq,L!6q!9:pp(()>?@  #8}+a/B7 	A!!a%..qr2J&q1uo0I0I*:TM_M_ 1J 1 HQUO	 =BBVYZBZ<[\q%DNN1%hqk2\\%+a/B7 	A--33(1+"3"3AB"7jX\XjXj 4 HQK	 99X1-$$X.(3 q ]s   FF#)r%   r&   r'   r(   r!   rC   r  r6   r   rI   rP   rQ   s   @r+   r  r    s<    $
z $
d $
LU\\ ell r*   r  c                        e Zd ZdZ	 ddedededeeeeef   f   ddf
 fdZd	e	j                  de	j                  fd
Z xZS )BeitFCNHeada  
    Fully Convolution Networks for Semantic Segmentation. This head is implemented of
    [FCNNet](https://arxiv.org/abs/1411.4038>).

    Args:
        config (BeitConfig): Configuration.
        in_channels
        kernel_size (int): The kernel size for convs in the head. Default: 3.
        dilation (int): The dilation rate for convs in the head. Default: 1.


    Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
    rT   in_indexr   r  r/   Nc           
      <   t         |           |j                  | _        |j                  | _        |j                  | _        |j                  | _	        || _
        |dz  |z  }g }|j                  t        | j                  | j
                  |||             t        | j                  dz
        D ]5  }|j                  t        | j
                  | j
                  |||             7 | j                  dk(  rt        j                         | _        nt        j"                  | | _        | j                  r8t        | j                  | j
                  z   | j
                  ||dz        | _        t        j&                  | j
                  |j(                  d      | _        y )Nro   )r   r  r  r    r   r  r  )rB   rC   rX   r  auxiliary_channelsr  auxiliary_num_convs	num_convsauxiliary_concat_inputconcat_inputr  r  r  rJ  r	   r  convs
Sequentialconv_catr   r  r  )	rD   rT   r  r   r  conv_paddingr  rO  rE   s	           r+   rC   zBeitFCNHead.__init__(  sX    	!--1133"99 #q(H4  $--[R^iq	

 t~~)* 	ALLMM4==kS_jr	 >>QDJ.DJ*  4==0$--[bmqrbrDM ))DMM63D3DRSTr*   r  c                     || j                      }| j                  |      }| j                  r(| j                  t	        j
                  ||gd            }| j                  |      }|S )Nr    ru   )r  r  r  r  r6   r~   r  )rD   r  rF   r<   s       r+   rI   zBeitFCNHead.forwardJ  sX    -dmm<M*]]599mV-D!#LMF(r*   )ro   r   r    )r%   r&   r'   r(   r!   r   r   r   rC   r6   r   rI   rP   rQ   s   @r+   r  r    sv     tu U  U,/ UBE UUZ[^`efiknfn`o[oUp U	 UDU\\ ell r*   r  zf
    Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
    c                        e Zd Zdeddf fdZd Z ee       ee	e
      	 	 	 	 	 	 	 ddeej                     deej                     d	eej                     d
ee   dee   dedee   deee	f   fd              Z xZS )BeitForSemanticSegmentationrT   r/   Nc                 x   t         |   |       |j                  | _        t        |d      | _        t        | j                  j                        dk7  rt        d      t        j                  t        j                  |j                  |j                  dd      t        j                  |j                        t        j                         t        j                  |j                  |j                  dd            | _        t        j                  t        j                  |j                  |j                  dd            | _        t        j"                         | _        t        j&                  dd      | _        t+        |      | _        |j.                  rt1        |      nd | _        | j5                          y )NFr  r  zBeitForSemanticSegmentation requires config.out_indices to be a list of 4 integers, specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of a base-sized architecture.ro   r   )rB   rC   r  rz  rc  r   rT   out_indicesr   r	   r  rg  rX   r  GELUfpn1fpn2r  fpn3	MaxPool2dfpn4r  decode_headuse_auxiliary_headr  auxiliary_headr  r   s     r+   rC   z$BeitForSemanticSegmentation.__init__[  sO     ++f>	 t{{&&'1,- 
 MMv1163E3EST]^_NN6--.GGIv1163E3EST]^_	
	 MMv1163E3EST]^_
	 KKM	LLQq9	 (/5;5N5Nk&1TX 	r*   c                 n   t         j                  j                  ||j                  dd  dd      }|0t         j                  j                  ||j                  dd  dd      }t	        | j
                  j                        } |||      }|}|% ||      }	|| j
                  j                  |	z  z  }|S )Nr   r8  Frq   )ignore_index)r	   r{   r|   r4   r   rT   semantic_loss_ignore_indexauxiliary_loss_weight)
rD   r  auxiliary_logitsr  upsampled_logitsupsampled_auxiliary_logitsr  	main_lossr  auxiliary_losss
             r+   compute_lossz(BeitForSemanticSegmentation.compute_loss{  s    ==44bc*5 5 
 ')+)B)B v||BC'8zY^ *C *& $1W1WX-v6	'%&@&INDKK55FFDr*   r  r   r   r  r   rP  r   rQ  c           	      T   ||n| j                   j                  }||n| j                   j                  }|$| j                   j                  dk(  rt	        d      | j                  |||d||      }|r|j                  n|d   }	t        |	      D 
cg c]#  \  }
}|
dz   | j                   j                  v s"|% }}
}|j                  d   }| j                   j                  | j                   j                  z  }|D cg c]3  }|ddddddf   j                  ddd      j                  |d||      5 }}| j                  | j                  | j                   | j"                  g}t%        t'        |            D ]  } ||   ||         ||<    | j)                  |      }d}| j*                  | j+                  |      }d}|| j-                  |||      }|s|r
|f|dd z   }n	|f|dd z   }||f|z   S |S t/        |||r|j                  nd|j0                  	      S c c}}
w c c}w )
aV  
        labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
            Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, BeitForSemanticSegmentation
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
        >>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")

        >>> inputs = image_processor(images=image, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> # logits are of shape (batch_size, num_labels, height, width)
        >>> logits = outputs.logits
        ```Nr    z/The number of labels should be greater than oneTr  r   ro   rn   r  )rT   r  rP  r  r   rc  rF   rY  r  r4   r`   r^   rz   ry   r  r  r  r  rJ  r   r  r  r  r   rX  )rD   r   r   r  r   rP  r   rQ  r   r  idxfeaturefeaturesr   patch_resolutionr   opsrO  r  r  r  r<   s                         r+   rI   z#BeitForSemanticSegmentation.forward  sb   J &1%<k$++B]B]$8$D $++JjJj 	 $++"8"8A"=NOO))/!%%=#  
 :E 5 5'RS* 1::O0PwWTWZ[T[_c_j_j_v_vTvGww!''*
;;11T[[5K5KKnv
ijAaQhK1a(00RAQScd
 

 yy$))TYY		:s8}% 	.A #a&!-HQK	. !!(+*#228<$$V-=vFD# WQR[0 WQR[0)-)9TGf$EvE&3G'//T))	
 	
; x
s   #H7H>8H%r  )r%   r&   r'   r!   rC   r  r   r  r   r   r  r   r6   r   r   r   r   rI   rP   rQ   s   @r+   r
  r
  T  s    z d @& ++@A+BQ`a 04,0)-,0/3).&*Z
u||,Z
 ELL)Z
 &	Z

 $D>Z
 'tnZ
 #'Z
 d^Z
 
u--	.Z
 b BZ
r*   r
  zM
    BEiT backbone, to be used with frameworks like DETR and MaskFormer.
    c                        e Zd Z fdZd Z ee       eee	      	 	 	 d
de
dee   dee   dee   def
d	              Z xZS )BeitBackbonec                    t         |   |       t         | 	  |       t        |j                  dz         D cg c]  }|j
                   c}| _        t        |      | _        t        || j                  j                  j                        | _        |j                  rt        | j                  j                         dk7  rt#        d      |j
                  }t%        j&                  t%        j(                  ||dd      t%        j*                  ||j,                        t%        j.                         t%        j(                  ||dd            | _        t%        j&                  t%        j(                  ||dd            | _        t%        j4                         | _        t%        j8                  dd      | _        | j=                          y c c}w )Nr    r   r  zBeitBackbone requires config.out_indices to be a list of 4 integers, specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of a base-sized architecture.ro   r   r  )rB   rC   _init_backbonerJ  rG  rX   num_featuresrS   rj   rC  r]   r   r}  add_fpnr   rT   r  r   r	   r  rg  r  batch_norm_epsr  r  r  r  r  r  r  r  )rD   rT   r   rX   rE   s       r+   rC   zBeitBackbone.__init__  s[    v&9>v?W?WZ[?[9\]AV//](0"6t7W7W7c7cd>>4;;**+q0 1 
 !,,K"";STU{0E0EF	"";STU	DI b&8&8k_`ij&klDIDI1=DI 	1 ^s   Gc                 .    | j                   j                  S rA   r  rL   s    r+   r  z!BeitBackbone.get_input_embeddings  r  r*   r  r   rP  r   rQ  r/   c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|j                  d   }| j                  |      \  }\  }}|j                  dd }	| j                  |d||	|      }
|r|
j                  n|
d   }d}t        | j                  |      D ]e  \  }}|| j                  v s| j                   j                  r5|ddddddf   }|j                  ddd      }|j                  |d||      }||fz  }g | j                   j                  rY| j                  |d         | j!                  |d         | j#                  |d         | j%                  |d	         g}t'        |      }|s|r|f|
dd z   }|S |f|
dd z   }|S t)        ||r|
j                  nd|
j*                  
      S )aL  
        Returns:

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, AutoBackbone
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
        >>> model = AutoBackbone.from_pretrained(
        ...     "microsoft/beit-base-patch16-224", out_features=["stage1", "stage2", "stage3", "stage4"]
        ... )

        >>> inputs = processor(image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> feature_maps = outputs.feature_maps
        >>> list(feature_maps[-1].shape)
        [1, 768, 14, 14]
        ```Nr   ro   T)rP  r   r   rQ  r    r)   rn   r   )feature_mapsrF   rX  )rT   r  rP  r   r4   rj   r}  rF   zipstage_namesout_featuresreshape_hidden_statesrz   ry   r+  r  r  r  r  r   r   rX  )rD   r   rP  r   rQ  r   r  r   r   r   r   rF   r/  stager  r<   s                   r+   rI   zBeitBackbone.forward  s   F &1%<k$++B]B]$8$D $++JjJj 	 2C1N-TXT_T_TqTq!''*
8<8U55<!''+
,,!%/!#  
 2=--'!*#&t'7'7#G 	0E<)));;44#/12q#9L#/#7#71a#@L#/#7#7
BVa#bL/	0 ;;		,q/*		,q/*		,q/*		,q/*	L !.L#&712;6 M '712;6M%3G'//T))
 	
r*   )NNN)r%   r&   r'   rC   r  r   r  r   r   r  r   r   r   rI   rP   rQ   s   @r+   r'  r'    s    <0 ++@A>X 04,0&*S
S
 'tnS
 $D>	S

 d^S
 
S
 Y BS
r*   r'  )r  r  r
  rz  rb  r'  )r1   F)Wr(   collections.abcra   r   r   dataclassesr   typingr   r   r   r   r6   torch.utils.checkpointr   r	   torch.nnr
   r   r   activationsr   modeling_outputsr   r   r   r   r   r   modeling_utilsr   pytorch_utilsr   r   r   utilsr   r   r   r   r   r   utils.backbone_utilsr   configuration_beitr!   
get_loggerr%   r   r  r  r  r  r  r$   rN   r   r=   Moduler?   rS   r\   r   r   r   r   r   r  r	  r  r   rC  rb  BEIT_START_DOCSTRINGr  rz  r  r  r  r  r  r  r  r  r
  r'  __all__r)   r*   r+   <module>rE     s'       ! / /    A A !  . v v  2 * 
		H	%  > &  < 1  !;  2U\\ e T V[VbVb (-299 - c7RYY c7L#7")) #7LP		 Pf;#- ;#|RYY & ! )BII )Xryy  
 
>		 >BP3ryy P3fT
")) T
n(O/ (OV	  2 dW
# W
	W
t & s \
!4 \
\
~  Q
!4 Q
Q
h"RYY "Jbii ""ryy "JR299 Rj8")) 8v  	P
"5 P
P
f  	w
& w
w
tr*   