
    %	&h                        d Z ddlmZmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZmZ ddlmZ ddlmZmZmZ dd	lmZmZ dd
lmZmZ ddlmZmZmZmZmZ ddlm Z   ejB                  e"      Z#dZ$dZ% G d de	jL                        Z' G d de	jL                        Z(	 d7de	jL                  dejR                  dejR                  dejR                  deejR                     de*de*fdZ+ G d de	jL                        Z, G d de	jL                        Z- G d  d!e	jL                        Z. G d" d#e	jL                        Z/ G d$ d%e	jL                        Z0 G d& d'e	jL                        Z1 G d( d)e	jL                        Z2 G d* d+e	jL                        Z3 G d, d-e      Z4d.Z5d/Z6 ed0e5       G d1 d2e4             Z7 ed3e5       G d4 d5e4             Z8g d6Z9y)8zPyTorch ViViT model.    )CallableOptionalSetTupleUnionN)nn)CrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)add_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings	torch_int   )VivitConfigzgoogle/vivit-b-16x2-kinetics400r   c                   0     e Zd ZdZ fdZddefdZ xZS )VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    c                    t         |           |j                  | _        |j                  | _        |j                  | _        | j                  | j
                  d   z  | j                  | j
                  d   z  z  | j                  | j
                  d   z  z  | _        |j                  | _        t        j                  |j                  |j                  |j                  |j                        | _        y )N   r   r   )kernel_sizestride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_size	embed_dimr   Conv3dnum_channels
projectionselfconfig	__class__s     ~/var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.pyr"   zVivitTubeletEmbeddings.__init__7   s     ++ ++ --__ 22$//!"446$//!"446 	
  ++))!3!3ATAT]c]p]p
    interpolate_pos_encodingc                 \   |j                   \  }}}}}|sP|| j                  k7  s|| j                  k7  r2t        d| d| d| j                  d    d| j                  d    d	      |j                  ddddd	      }| j	                  |      }|j                  d      j                  dd      }|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r      )shaper$   
ValueErrorpermuter,   flatten	transpose)	r.   pixel_valuesr3   
batch_sizer#   r+   heightwidthxs	            r1   forwardzVivitTubeletEmbeddings.forwardG   s    >J>P>P;
Jfe'Vt-F%SWSbSbJb$VHAeW4KDOO\]L^K__`aeapapqras`ttvw 
 $++Aq!Q:OOL) IIaL""1a(r2   F)__name__
__module____qualname____doc__r"   boolrA   __classcell__r0   s   @r1   r   r   ,   s    
 d r2   r   c                   p     e Zd ZdZ fdZdej                  dededej                  fdZd
de	fd	Z
 xZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    c                    t         |           t        j                  t	        j
                  dd|j                              | _        t        |      | _	        t        j                  t	        j
                  d| j                  j                  dz   |j                              | _        t        j                  |j                        | _        |j                  dd  | _        || _        y )Nr   )r!   r"   r   	Parametertorchzerosr(   	cls_tokenr   patch_embeddingsr'   position_embeddingsDropouthidden_dropout_probdropoutr%   r&   r/   r-   s     r1   r"   zVivitEmbeddings.__init___   s    ekk!Q8J8J&KL 6v >#%<<KK400<<q@&BTBTU$
  zz&"<"<= --ab1r2   
embeddingsr>   r?   returnc                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  d   z  }	|| j
                  d   z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r   r   bicubicF)sizemodealign_cornersdim)r7   rR   rN   jit
is_tracingr&   r   reshaper9   r   
functionalinterpolateviewcat)r.   rV   r>   r?   r'   num_positionsclass_pos_embedpatch_pos_embedr_   
new_height	new_widthsqrt_num_positionss               r1   r3   z(VivitEmbeddings.interpolate_pos_encodingm   sj    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"tq11
T__Q//	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr2   r3   c                 0   |j                   \  }}}}}| j                  ||      }| j                  j                  |ddg      }	t	        j
                  |	|fd      }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )Nr3   r   r^   )	r7   rQ   rP   tilerN   rf   r3   rR   rU   )
r.   r<   r3   r=   r#   r+   r>   r?   rV   
cls_tokenss
             r1   rA   zVivitEmbeddings.forward   s    >J>P>P;
Jfe**<Rj*k
^^((*a);<
YY
J7Q?
 $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r2   rB   )rC   rD   rE   rF   r"   rN   Tensorintr3   rG   rA   rH   rI   s   @r1   rK   rK   X   sL    &D5<< &D &DUX &D]b]i]i &DPd r2   rK   modulequerykeyvalueattention_maskscalingrU   c                    t        j                  ||j                  dd            |z  }t        j                  j                  |dt         j                        j                  |j                        }t        j                  j                  ||| j                        }|||z  }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )NrY   )r_   dtype)ptrainingr   r   )rN   matmulr;   r   rc   softmaxfloat32tor{   rU   r}   
contiguous)
rs   rt   ru   rv   rw   rx   rU   kwargsattn_weightsattn_outputs
             r1   eager_attention_forwardr      s     <<s}}R'<=GL ==((2U]](SVVW\WbWbcL ==((6??([L !#n4,,|U3K''1-88:K$$r2   c            
            e Zd Zdeddf fdZdej                  dej                  fdZ	 d
deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )VivitSelfAttentionr/   rW   Nc                 2   t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      F)bias)r!   r"   r(   num_attention_headshasattrr8   r/   rr   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probrx   	is_causalr   Linearqkv_biasrt   ru   rv   r-   s     r1   r"   zVivitSelfAttention.__init__   sF    : ::a?PVXhHi"6#5#5"6 7334A7 
 #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
r2   r@   c                     |j                         d d | j                  | j                  fz   }|j                  |      }|j	                  dddd      S )NrY   r   r   r   r   )r[   r   r   re   r9   )r.   r@   new_x_shapes      r1   transpose_for_scoresz'VivitSelfAttention.transpose_for_scores   sL    ffhsmt'?'?AYAY&ZZFF;yyAq!$$r2   	head_maskoutput_attentionsc           
         | j                  | j                  |            }| j                  | j                  |            }| j                  | j                  |            }t        }| j
                  j                  dk7  rN| j
                  j                  dk(  r|rt        j                  d       nt        | j
                  j                     } || ||||| j                  | j                  | j                  sdn| j                        \  }}	|j                         d d | j                  fz   }
|j!                  |
      }|r||	f}|S |f}|S )Neagersdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   rx   rU   rz   )r   ru   rv   rt   r   r/   _attn_implementationloggerwarning_oncer   r   rx   r}   r   r[   r   rb   )r.   hidden_statesr   r   	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapeoutputss               r1   rA   zVivitSelfAttention.forward   s=    --dhh}.EF	//

=0IJ//

=0IJ(?;;++w6{{//69>O##L
 '>dkk>^>^&_#)<nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EF6G=/2 O\M]r2   NF)rC   rD   rE   r   r"   rN   rq   r   r   rG   r   r   rA   rH   rI   s   @r1   r   r      s    ]{ ]t ](%ell %u|| % bg!(0(>!Z^!	uU\\5<</0%2EE	F!r2   r   c                   |     e Zd ZdZdeddf fdZdej                  dej                  dej                  fdZ xZ	S )	VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r/   rW   Nc                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y N)	r!   r"   r   r   r(   denserS   rT   rU   r-   s     r1   r"   zVivitSelfOutput.__init__  sB    YYv1163E3EF
zz&"<"<=r2   r   input_tensorc                 J    | j                  |      }| j                  |      }|S r   r   rU   r.   r   r   s      r1   rA   zVivitSelfOutput.forward  s$    

=1]3r2   )
rC   rD   rE   rF   r   r"   rN   rq   rA   rH   rI   s   @r1   r   r     sD    
>{ >t >
U\\  RWR^R^ r2   r   c                        e Zd Zdeddf fdZdee   ddfdZ	 	 ddej                  de
ej                     d	edeeej                  ej                  f   eej                     f   fd
Z xZS )VivitAttentionr/   rW   Nc                     t         |           t        |      | _        t	        |      | _        t               | _        y r   )r!   r"   r   	attentionr   outputsetpruned_headsr-   s     r1   r"   zVivitAttention.__init__  s0    +F3%f-Er2   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   r^   )lenr   r   r   r   r   r   rt   ru   rv   r   r   r   union)r.   r   indexs      r1   prune_headszVivitAttention.prune_heads   s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r2   r   r   r   c                 h    | j                  |||      }| j                  |d   |      }|f|dd  z   }|S )Nr   r   )r   r   )r.   r   r   r   self_outputsattention_outputr   s          r1   rA   zVivitAttention.forward2  sE     ~~mY@QR;;|AF#%QR(88r2   r   )rC   rD   rE   r   r"   r   rr   r   rN   rq   r   rG   r   r   rA   rH   rI   s   @r1   r   r     s    "{ "t ";S ;d ;* -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr2   r   c                   $     e Zd Z fdZd Z xZS )VivitIntermediatec                 P   t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r!   r"   r   r   r(   intermediate_sizer   rS   rT   rU   
isinstance
hidden_actstrr   intermediate_act_fnr-   s     r1   r"   zVivitIntermediate.__init__A  ss    YYv1163K3KL
zz&"<"<=f''-'-f.?.?'@D$'-'8'8D$r2   c                 l    | j                  |      }| j                  |      }| j                  |      }|S r   )r   r   rU   )r.   r   s     r1   rA   zVivitIntermediate.forwardJ  s4    

=100?]3r2   rC   rD   rE   r"   rA   rH   rI   s   @r1   r   r   @  s    9r2   r   c                   $     e Zd Z fdZd Z xZS )VivitOutputc                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r!   r"   r   r   r   r(   r   rS   rT   rU   r-   s     r1   r"   zVivitOutput.__init__S  sB    YYv779K9KL
zz&"<"<=r2   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r1   rA   zVivitOutput.forwardX  s.    

=1]3%4r2   r   rI   s   @r1   r   r   R  s    >
r2   r   c                   *     e Zd ZdZ fdZddZ xZS )
VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.c                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r!   r"   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr(   layer_norm_epslayernorm_beforelayernorm_afterr-   s     r1   r"   zVivitLayer.__init__e  s    '-'E'E$'/-f5!&) "V-?-?VEZEZ [!||F,>,>FDYDYZr2   c                     | j                  | j                  |      ||      }|d   }|dd  }||z   }| j                  |      }| j                  |      }| j	                  ||      }|f|z   }|S )N)r   r   r   )r   r   r   r   r   )r.   r   r   r   self_attention_outputsr   r   layer_outputs           r1   rA   zVivitLayer.forwardo  s    !%!!-0/	 "0 "
 2!4(, )=8 ++M:((6 {{<?/G+r2   r   )rC   rD   rE   rF   r"   rA   rH   rI   s   @r1   r   r   b  s    X[r2   r   c                   .     e Zd Z fdZ	 	 	 	 ddZ xZS )VivitEncoderc                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w r   )
r!   r"   r/   r   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r.   r/   _r0   s      r1   r"   zVivitEncoder.__init__  sN    ]]fF^F^@_#`1Jv$6#`a
&+# $as   A#c                 t   |rdnd }|rdnd }t        | j                        D ]h  \  }}	|r||fz   }|||   nd }
| j                  r+| j                  r| j	                  |	j
                  ||
|      }n
 |	||
|      }|d   }|s`||d   fz   }j |r||fz   }|st        d |||fD              S t        |||      S )N r   r   c              3   &   K   | ]	  }||  y wr   r   ).0vs     r1   	<genexpr>z'VivitEncoder.forward.<locals>.<genexpr>  s     mq_`_lms   )last_hidden_stater   
attentions)	enumerater   r   r}   _gradient_checkpointing_func__call__tupler   )r.   r   r   r   output_hidden_statesreturn_dictall_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputss               r1   rA   zVivitEncoder.forward  s     #7BD$5b4(4 	POA|#$58H$H!.7.CilO**t}} $ A A ))!#%	! !-]OM^ _)!,M &9]1=M<O&O#'	P*   1]4D Dm]4EGZ$[mmm++*
 	
r2   )NFFTr   rI   s   @r1   r   r     s    , ")
r2   r   c                   $     e Zd Z fdZd Z xZS )VivitPoolerc                     t         |           t        j                  |j                  |j                        | _        t        j                         | _        y r   )r!   r"   r   r   r(   r   Tanh
activationr-   s     r1   r"   zVivitPooler.__init__  s9    YYv1163E3EF
'')r2   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r  )r.   r   first_token_tensorpooled_outputs       r1   rA   zVivitPooler.forward  s6     +1a40

#566r2   r   rI   s   @r1   r   r     s    $
r2   r   c                   2    e Zd ZdZeZdZdZdZg Z	dZ
dZd Zy)VivitPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    vivitr<   Tc                    t        |t        j                  t        j                  f      rm|j                  j
                  j                  d| j                  j                         |j                  %|j                  j
                  j                          yyt        |t        j                        rz|j                  j
                  j                  d| j                  j                         |j                  2|j                  j
                  |j                     j                          yyt        |t        j                        rJ|j                  j
                  j                          |j                  j
                  j                  d       yt        |t              rI|j                   j
                  j                          |j"                  j
                  j                          yy)zInitialize the weightsr   )meanstdNg      ?)r   r   r   r*   weightdatanormal_r/   initializer_ranger   zero_	Embeddingpadding_idxr   fill_rK   rP   rR   )r.   rs   s     r1   _init_weightsz"VivitPreTrainedModel._init_weights  sI   fryy"))45 MM&&CT[[5R5R&S{{&  &&( '-MM&&CT[[5R5R&S!!-""6#5#56<<> .-KK""$MM$$S)0!!'')&&++113 1r2   N)rC   rD   rE   rF   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_2r  r   r2   r1   r  r    s5    
 L$O&*#N!4r2   r  aG  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`VivitConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`VivitImageProcessor`]. See
            [`VivitImageProcessor.preprocess`] for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        interpolate_pos_encoding (`bool`, *optional*, `False`):
            Whether to interpolate the pre-trained position encodings.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z_The bare ViViT Transformer model outputting raw hidden-states without any specific head on top.c                        e Zd Zd fd	Zd Zd Z ee       ee	e
      	 	 	 	 	 	 ddeej                     deej                     dee   dee   d	ed
ee   deeej                     e	f   fd              Z xZS )
VivitModelc                    t         |   |       || _        t        |      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd | _        | j                          y )Nr   )r!   r"   r/   rK   rV   r   encoderr   r   r(   r   	layernormr   pooler	post_init)r.   r/   add_pooling_layerr0   s      r1   r"   zVivitModel.__init__  si     )&1#F+f&8&8f>S>ST->k&)D 	r2   c                 .    | j                   j                  S r   )rV   rQ   )r.   s    r1   get_input_embeddingszVivitModel.get_input_embeddings#  s    ///r2   c                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr   r   r   r   )r.   heads_to_pruner   r   s       r1   _prune_headszVivitModel._prune_heads&  sE     +002 	CLE5LLu%//;;EB	Cr2   output_typer  r<   r   r   r   r3   r   rW   c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|t	        d      | j                  || j                   j                        }| j                  ||      }| j                  |||||      }|d   }	| j                  |	      }	| j                  | j                  |	      nd}
|s
|	|
f|dd z   S t        |	|
|j                  |j                        S )a  
        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrn   )r   r   r   r   r   r   )r   pooler_outputr   r   )r/   r   r   use_return_dictr8   get_head_maskr   rV   r   r!  r"  r   r   r   )r.   r<   r   r   r   r3   r   embedding_outputencoder_outputssequence_outputr  s              r1   rA   zVivitModel.forward1  s*   n 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]?@@&&y$++2O2OP	??<Rj?k,,/!5# ' 
 *!,..98<8OO4UY#]3oab6III)-')77&11	
 	
r2   )T)NNNNFN)rC   rD   rE   r"   r&  r*  r   VIVIT_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr   rN   FloatTensorrG   r   r   rA   rH   rI   s   @r1   r  r    s    
0	C ++AB+ETcd 5915,0/3).&*u
u001u
 E--.u
 $D>	u

 'tnu
 #'u
 d^u
 
uU&&')CC	Du
 e Cu
r2   r  a  
    ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
[CLS] token) e.g. for Kinetics-400.

    <Tip>

        Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                   
    e Zd Z fdZ ee       eee      	 	 	 	 	 	 	 dde	e
j                     de	e
j                     de	e
j                     de	e   de	e   ded	e	e   d
eee
j                     ef   fd              Z xZS )VivitForVideoClassificationc                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r$  r   )r!   r"   
num_labelsr  r	  r   r   r(   Identity
classifierr#  r-   s     r1   r"   z$VivitForVideoClassification.__init__  ss      ++%@
 OUN_N_bcNc"))F$6$68I8IJikititiv 	r2   r+  r<   r   labelsr   r   r3   r   rW   c                    ||n| j                   j                  }| j                  ||||||      }|d   }	| j                  |	dddddf         }
d}|}| j                  dk(  r2t               } ||
j                  d      |j                  d            }n<t               } ||
j                  d| j                        |j                  d            }|s|
f|dd z   }||f|z   S |S t        ||
|j                  |j                        S )a(  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```N)r   r   r   r3   r   r   r   rY   r   )losslogitsr   r   )r/   r/  r	  r<  r:  r
   re   r	   r   r   r   )r.   r<   r   r=  r   r   r3   r   r   r3  r@  r?  loss_fctr   s                 r1   rA   z#VivitForVideoClassification.forward  s   @ &1%<k$++B]B]**/!5%=#  
 "!*Aq!9:!#"9BRA+-B @&++b/RY,F)-)9TGf$EvE$!//))	
 	
r2   )NNNNNFN)rC   rD   rE   r"   r   r4  r   r   r5  r   rN   r6  
LongTensorrG   r   r   rA   rH   rI   s   @r1   r8  r8    s     
 ++AB+@_ 5915-1,0/3).&*@
u001@
 E--.@
 ))*	@

 $D>@
 'tn@
 #'@
 d^@
 
uU&&')>>	?@
 ` C@
r2   r8  )r  r  r8  )r   ):rF   typingr   r   r   r   r   rN   torch.utils.checkpointr   torch.nnr	   r
   activationsr   modeling_outputsr   r   r   modeling_utilsr   r   pytorch_utilsr   r   utilsr   r   r   r   r   configuration_vivitr   
get_loggerrC   r   _CHECKPOINT_FOR_DOCr5  Moduler   rK   rq   floatr   r   r   r   r   r   r   r   r   r  VIVIT_START_DOCSTRINGr4  r  r8  __all__r   r2   r1   <module>rR     s    8 8    . ! b b F Q  - 
		H	%7 )RYY )XLbii Ln %II%<<% 
% <<	%
 U\\*% % %>; ;~bii &$RYY $N		 $"))  $ $N0
299 0
f")) 4? 4D	  2 eS
% S
	S
l  O
"6 O
O
d Pr2   