Ë
    Y%hA  ã                   ód   — d dl mZmZmZmZmZ d dlmZ ddlm	Z	m
Z
 ddlmZmZmZ  G d„ d«      Zy)	é    )ÚOptionalÚDictÚListÚUnionÚAny)ÚAsyncioInferenceApié   )ÚEmbeddingsListÚRerankResult)ÚInferenceRequestBuilderÚ
EmbedModelÚRerankModelc                   óÔ   — e Zd ZdZeZeZdd„Z	 dde	de
e	ee   ee	   f   deee	ef      defd„Zd	gd
ddfde	de	de
ee	   eee	ef      f   dee	   dedee   deee	ef      defd„Zy)ÚAsyncioInferenceaå  
    The `AsyncioInference` class configures and uses the Pinecone Inference API to generate embeddings and
    rank documents.

    This class is generally not instantiated directly, but rather accessed through a parent `Pinecone` client
    object that is responsible for managing shared configurations.

    ```python
    from pinecone import PineconeAsyncio

    pc = PineconeAsyncio()
    embeddings = await pc.inference.embed(
        model="text-embedding-3-small",
        inputs=["Hello, world!"],
        parameters={"input_type": "passage", "truncate": "END"}
    )
    ```

    :param config: A `pinecone.config.Config` object, configured and built in the Pinecone class.
    :type config: `pinecone.config.Config`, required
    ÚreturnNc                 ó4   — || _         	 t        |«      | _        y ©N)Ú
api_clientr   Ú _AsyncioInference__inference_api)Úselfr   Úkwargss      úˆ/var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/pinecone/data/features/inference/inference_asyncio.pyÚ__init__zAsyncioInference.__init__'   s   € Ø$ˆŒØä2°:Ó>ˆÔØó    ÚmodelÚinputsÚ
parametersc              ƒ   óœ   K  — t        j                  |||¬«      }| j                  j                  |¬«      ƒ d{  –—† }t	        |«      S 7 Œ­w)a0  
        Generates embeddings for the provided inputs using the specified model and (optional) parameters.

        :param model: The model to use for generating embeddings.
        :type model: str, required

        :param inputs: A list of items to generate embeddings for.
        :type inputs: list, required

        :param parameters: A dictionary of parameters to use when generating embeddings.
        :type parameters: dict, optional

        :return: EmbeddingsList object with keys `data`, `model`, and `usage`. The `data` key contains a list of
        `n` embeddings, where `n` = len(inputs) and type(n) = Embedding. Precision of returned embeddings is either
        float16 or float32, with float32 being the default. `model` key is the model used to generate the embeddings.
        `usage` key contains the total number of tokens used at request-time.

        Example:
        >>> inputs = ["Who created the first computer?"]
        >>> outputs = await pc.inference.embed(model="multilingual-e5-large", inputs=inputs, parameters={"input_type": "passage", "truncate": "END"})
        >>> print(outputs)
        EmbeddingsList(
            model='multilingual-e5-large',
            data=[
                {'values': [0.1, ...., 0.2]},
              ],
            usage={'total_tokens': 6}
        )
        )r   r   r   )Úembed_requestN)r   r   r   Úembedr
   )r   r   r   r   Úrequest_bodyÚresps         r   r    zAsyncioInference.embed.   sO   è ø€ ôF /×<Ñ<Ø °:ô
ˆð ×)Ñ)×/Ñ/¸lÐ/ÓK×KˆÜ˜dÓ#Ð#ð Lús   ‚8AºA
»AÚtextTÚqueryÚ	documentsÚrank_fieldsÚreturn_documentsÚtop_nc           	   ƒ   ó¤   K  — t        j                  |||||||¬«      }| j                  j                  |¬«      ƒ d{  –—† }	t        |	«      S 7 Œ­w)aÿ	  
        Rerank documents with associated relevance scores that represent the relevance of each document
        to the provided query using the specified model.

        :param model: The model to use for reranking.
        :type model: str, required

        :param query: The query to compare with documents.
        :type query: str, required

        :param documents: A list of documents or strings to rank.
        :type documents: list, required

        :param rank_fields: A list of document fields to use for ranking. Defaults to ["text"].
        :type rank_fields: list, optional

        :param return_documents: Whether to include the documents in the response. Defaults to True.
        :type return_documents: bool, optional

        :param top_n: How many documents to return. Defaults to len(documents).
        :type top_n: int, optional

        :param parameters: A dictionary of parameters to use when ranking documents.
        :type parameters: dict, optional

        :return: RerankResult object with keys `data` and `usage`. The `data` key contains a list of
        `n` documents, where `n` = `top_n` and type(n) = Document. The documents are sorted in order of
        relevance, with the first being the most relevant. The `index` field can be used to locate the document
        relative to the list of documents specified in the request. Each document contains a `score` key
        representing how close the document relates to the query.

        Example:
        >>> result = await pc.inference.rerank(
                model="bge-reranker-v2-m3",
                query="Tell me about tech companies",
                documents=[
                    "Apple is a popular fruit known for its sweetness and crisp texture.",
                    "Software is still eating the world.",
                    "Many people enjoy eating apples as a healthy snack.",
                    "Acme Inc. has revolutionized the tech industry with its sleek designs and user-friendly interfaces.",
                    "An apple a day keeps the doctor away, as the saying goes.",
                ],
                top_n=2,
                return_documents=True,
            )
        >>> print(result)
        RerankResult(
          model='bge-reranker-v2-m3',
          data=[
            { index=3, score=0.020980744,
              document={text="Acme Inc. has rev..."} },
            { index=1, score=0.00034015716,
              document={text="Software is still..."} }
          ],
          usage={'rerank_units': 1}
        )
        )r   r$   r%   r&   r'   r(   r   )Úrerank_requestN)r   Úrerankr   r   )
r   r   r$   r%   r&   r'   r(   r   r*   r"   s
             r   r+   zAsyncioInference.rerankW   s]   è ø€ ôF 1×7Ñ7ØØØØ#Ø-ØØ!ô
ˆð ×)Ñ)×0Ñ0ÀÐ0ÓO×OˆÜ˜DÓ!Ð!ð Pús   ‚<A¾A¿A)r   Nr   )Ú__name__Ú
__module__Ú__qualname__Ú__doc__ÚEmbedModelEnumr   ÚRerankModelEnumr   r   Ústrr   r   r   r   r   r
   r    ÚboolÚintr   r+   © r   r   r   r      s  „ ñð,  €JØ!€Kóð 04ñ	'$àð'$ð c˜4 ™: t¨C¡yÐ0Ñ1ð'$ð ˜T # s (™^Ñ,ð	'$ð
 
ó'$ð\ #) Ø!%Ø#Ø/3ñM"àðM"ð ðM"ð ˜˜c™ D¨¨c°3¨h©Ñ$8Ð8Ñ9ð	M"ð
 ˜#‘YðM"ð ðM"ð ˜‰}ðM"ð ˜T # s (™^Ñ,ðM"ð 
ôM"r   r   N)Útypingr   r   r   r   r   Ú1pinecone.core.openapi.inference.api.inference_apir   Úmodelsr
   r   Úinference_request_builderr   r   r0   r   r1   r   r5   r   r   ú<module>r:      s'   ðß 3Õ 3å Qß 0÷ñ ÷W"ò W"r   