
    %	&h?                         d Z ddlZddlmZmZmZmZmZ ddlZ	ddl
mZ ddlmZ ddlmZmZmZmZmZ ddlmZmZ dd	lmZmZmZmZ erd
dlmZmZ  G d ded      Z G d ded      Z  G d de      Z!dgZ"y)z&
Image/Text processor class for OWLv2
    N)TYPE_CHECKINGListOptionalTupleUnion   )BatchFeature)
ImageInput)ImagesKwargsProcessingKwargsProcessorMixinUnpack!_validate_images_text_input_order)PreTokenizedInput	TextInput)
TensorTypeis_flax_availableis_tf_availableis_torch_available   )%Owlv2ImageGuidedObjectDetectionOutputOwlv2ObjectDetectionOutputc                       e Zd ZU ee   ed<   y)Owlv2ImagesKwargsquery_imagesN)__name__
__module____qualname__r   r
   __annotations__     /var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/owlv2/processing_owlv2.pyr   r   )   s    :&&r!   r   F)totalc                   ,    e Zd ZU eed<   ddii ddidZy)Owlv2ProcessorKwargsimages_kwargspadding
max_lengthreturn_tensorsnp)text_kwargsr&   common_kwargsN)r   r   r   r   r   	_defaultsr    r!   r"   r%   r%   -   s*    $$ |
 d
Ir!   r%   c                       e Zd ZdZddgZdZdZdgZ fdZ	 	 dddd	d
e	e
   deeeee   ee   f   dee   defdZd Z	 	 	 ddddede	eeee   f      de	eee         fdZ	 	 	 ddddedede	eeee   f      fdZd Zd Z xZS )Owlv2Processora  
    Constructs an Owlv2 processor which wraps [`Owlv2ImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into
    a single processor that interits both the image processor and tokenizer functionalities. See the
    [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information.

    Args:
        image_processor ([`Owlv2ImageProcessor`]):
            The image processor is a required input.
        tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]):
            The tokenizer is a required input.
    image_processor	tokenizerOwlv2ImageProcessor)CLIPTokenizerCLIPTokenizerFastr   c                 &    t         |   ||       y )N)super__init__)selfr0   r1   kwargs	__class__s       r"   r7   zOwlv2Processor.__init__M   s    )4r!   N)audiovideosimagestextr9   returnc                    | j                   t        fd| j                  j                  i| | j                  | }|d   j                  dd      }|d   d   }	|||t        d      t        ||      \  }}i }
|qt        |t              s#t        |t              r+t        |d   t              s | j                  |fi |d	   g}nt        |t              rt        |d   t              rvg }t        |D cg c]  }t        |       c}      }|D ]L  }t        |      |k7  r|d
g|t        |      z
  z  z   } | j                  |fi |d	   }|j                  |       N nt        d      |	dk(  rRt        j                   |D cg c]  }|d   	 c}d      }t        j                   |D cg c]  }|d   	 c}d      }n!|	dk(  rYt#               rOddlm} |j!                  |D cg c]  }|d   	 c}d      }|j!                  |D cg c]  }|d   	 c}d      }n|	dk(  rWt)               rMddl}|j-                  |D cg c]  }|d   	 c}d      }|j-                  |D cg c]  }|d   	 c}d      }ng|	dk(  rWt/               rMddl}|j3                  |D cg c]  }|d   	 c}d      }|j3                  |D cg c]  }|d   	 c}d      }nt        d      ||
d<   ||
d<   |$ | j4                  |fi |d   j6                  }d|i}
|% | j4                  |fi |d   }|j6                  |
d<   t9        |
|	      S c c}w c c}w c c}w c c}w c c}w c c}w c c}w c c}w c c}w )aO  
        Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and
        `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode:
        the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
        CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
        of the above two methods for more information.

        Args:
            images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`,
            `List[torch.Tensor]`):
                The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
                tensor. Both channels-first and channels-last formats are supported.
            text (`str`, `List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
                The query image to be prepared, one query image is expected per target image to be queried. Each image
                can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image
                should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Acceptable values are:
                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.
                - `'jax'`: Return JAX `jnp.ndarray` objects.

        Returns:
            [`BatchFeature`]: A [`BatchFeature`] with the following fields:
            - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
              `None`).
            - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
            - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`.
        tokenizer_init_kwargsr&   r   Nr,   r)   zXYou have to specify at least one text or query image or image. All three cannot be none.r   r+    zLInput text should be a string, a list of strings or a nested list of stringsr*   	input_ids)axisattention_maskjaxpt)dimtfz/Target return tensor type could not be returnedquery_pixel_valuespixel_values)datatensor_type)_merge_kwargsr%   r1   init_kwargs'prepare_and_validate_optional_call_argspop
ValueErrorr   
isinstancestrr   maxlenappend	TypeErrorr*   concatenater   	jax.numpynumpyr   torchcatr   
tensorflowstackr0   rK   r	   )r8   r=   r>   r;   r<   argsr9   output_kwargsr   r)   rL   	encodingstext_singlemax_num_queriesencodingrC   rE   jnpr\   rI   rJ   image_featuress                         r"   __call__zOwlv2Processor.__call__Q   s   d +** 
"&.."<"<
 
 ;d::DA	
 %_599.$O&78HI<L0V^j  9F$$D$)?
SWXYSZ\`Ha+T^^DQM-4PQR	D$'JtAw,E	 #&4&PKs;'7&P"Q $( /K;'?:&1SE_sS^O_=_4`&`-t~~kZ]==YZH$$X./   noo%NNR[+\hH[,A+\cde	!#\e0fPX:J1K0fmn!o5(->-@'OOS\,]xXk-B,]deOf	!$]f1gQY(;K2L1gno!p4',>,@!IIY&Wx'<&W]^I_	!&W`+a8H5E,F+agh!i4'O,='HHI%Vh{&;%V]^H_	!#V_*`(84D+E*`gh!i !!RSS )D%3D!"#!5!5!5l!emTcFd!e!r!r(*<=D1T11&[M/<Z[N#1#>#>D >BBc 'Q ,]0f
 -^1g
 'X+a
 &W*`s6   8M
M.M)MMM#)M(!M-M2c                 n    t        j                  dt                | j                  j                  |i |S )z
        This method forwards all its arguments to [`Owlv2ImageProcessor.post_process_object_detection`]. Please refer
        to the docstring of this method for more information.
        z`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. Use `post_process_grounded_object_detection` instead.)warningswarnFutureWarningr0   post_process_object_detectionr8   r`   r9   s      r"   rm   z,Owlv2Processor.post_process_object_detection   s:    
 	D	

 Bt##AA4R6RRr!   outputsr   	thresholdtarget_sizestext_labelsc                    | j                   j                  |||      }|"t        |      t        |      k7  rt        d      |0t	        ||      D ]  \  }}|d   D cg c]  }||   	 }	}|	|d<   ! |S |D ]  }d|d<   	 |S c c}w )a  
        Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
        bottom_right_x, bottom_right_y) format.

        Args:
            outputs ([`Owlv2ObjectDetectionOutput`]):
                Raw outputs of the model.
            threshold (`float`, *optional*, defaults to 0.1):
                Score threshold to keep object detection predictions.
            target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
                Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
                `(height, width)` of each image in the batch. If unset, predictions will not be resized.
            text_labels (`List[List[str]]`, *optional*):
                List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be
                set to `None`.

        Returns:
            `List[Dict]`: A list of dictionaries, each dictionary containing the following keys:
            - "scores": The confidence scores for each predicted box on the image.
            - "labels": Indexes of the classes predicted by the model on the image.
            - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
            - "text_labels": The text labels for each predicted bounding box on the image.
        )ro   rp   rq   NzAMake sure that you pass in as many lists of text labels as imageslabelsrr   )r0   rm   rV   rR   zip)
r8   ro   rp   rq   rr   outputimage_outputimage_text_labelsiobject_text_labelss
             r"   &post_process_grounded_object_detectionz5Owlv2Processor.post_process_grounded_object_detection   s    < %%CCy| D 
 "s;'73v;'F`aa "36v{3K A//DPQYDZ%[q&7&:%["%[.@]+A  !' 3.2]+3  &\s   Br   nms_thresholdc                 @    | j                   j                  ||||      S )a  
        Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO
        api.

        Args:
            outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]):
                Raw outputs of the model.
            threshold (`float`, *optional*, defaults to 0.0):
                Minimum confidence threshold to use to filter out predicted boxes.
            nms_threshold (`float`, *optional*, defaults to 0.3):
                IoU threshold for non-maximum suppression of overlapping boxes.
            target_sizes (`torch.Tensor`, *optional*):
                Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
                the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
                None, predictions will not be unnormalized.

        Returns:
            `List[Dict]`: A list of dictionaries, each dictionary containing the following keys:
            - "scores": The confidence scores for each predicted box on the image.
            - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
            - "labels": Set to `None`.
        )ro   rp   r|   rq   )r0   #post_process_image_guided_detection)r8   ro   rp   r|   rq   s        r"   r~   z2Owlv2Processor.post_process_image_guided_detection  s-    : ##GGy\h H 
 	
r!   c                 :     | j                   j                  |i |S )z
        This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
        refer to the docstring of this method for more information.
        )r1   batch_decodern   s      r"   r   zOwlv2Processor.batch_decode0  s     
 +t~~**D;F;;r!   c                 :     | j                   j                  |i |S )z
        This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
        the docstring of this method for more information.
        )r1   decodern   s      r"   r   zOwlv2Processor.decode8  s     
 %t~~$$d5f55r!   )NN)g?NN)g        g333333?N)r   r   r   __doc__
attributesimage_processor_classtokenizer_classoptional_call_argsr7   r   r
   r   r   r   r   r   r%   r	   rh   rm   floatr   r   rT   r{   r~   r   r   __classcell__)r:   s   @r"   r/   r/   :   sO   
 $[1J1<O()5 (,^b|C |C$|C I0$y/4HYCZZ[|C -.|C 
|C~
S  AE15.-. . uZe%<=>	.
 d49o..h "AE
8
 
 	

 uZe%<=>
D<6r!   r/   )#r   rj   typingr   r   r   r   r   r[   r*   image_processing_utilsr	   image_utilsr
   processing_utilsr   r   r   r   r   tokenization_utils_baser   r   utilsr   r   r   r   modeling_owlv2r   r   r   r%   r/   __all__r    r!   r"   <module>r      sr     > >  2 %  D W W a'E '
+5 
C6^ C6L 
r!   