
    %	&hL                        d dl mZ d dlmZmZmZmZmZ d dlZd dlm	Z	 ddl
mZ ddlmZmZmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z( ddl)m*Z* ddl+m,Z,  e&       rd dl-m.Z. ddl/m0Z0  e'jb                  e2      Z3dZ4 G d de	jj                        Z6 G d de	jj                        Z7 G d de	jj                        Z8dejr                  de:dejr                  fdZ;	 d:d e	jj                  d!ejr                  d"ejr                  d#ejr                  d$eejr                     d%e<d&e<fd'Z=d( Z>d;d)Z? G d* d+e	jj                        Z@ G d, d-e	jj                        ZAd.ZB e#d/eB       G d0 d1e             ZCd2ZD e#d/eB       G d3 d4eC             ZE G d5 d6ee"      ZF G d7 d8eCe      ZGg d9ZHy)<    )partial)CallableListOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCacheStaticCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsadd_start_docstrings%add_start_docstrings_to_model_forwardcan_return_tupleis_torch_flex_attn_availableloggingreplace_return_docstrings)deprecate_kwarg   )CohereConfig)	BlockMask)make_flex_block_causal_maskr"   c                   &     e Zd Zd fd	Zd Z xZS )CohereLayerNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r	   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__s       /var/www/pru.catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/cohere/modeling_cohere.pyr)   zCohereLayerNorm.__init__F   s/    ll5::k#:; #    c                    |j                   }|j                  t        j                        }|j	                  dd      }||z
  j                  d      j	                  dd      }||z
  t        j                  || j                  z         z  }| j                  j                  t        j                        |z  }|j                  |      S )NT)keepdim   )	dtypetor+   float32meanpowrsqrtr.   r-   )r/   hidden_statesinput_dtyper=   variances        r4   forwardzCohereLayerNorm.forwardL   s    #))%((7!!"d!3!D(--a055b$5G&-XH]H]=]1^^u}}5E,,r5   )Ngh㈵>F__name__
__module____qualname__r)   rC   __classcell__r3   s   @r4   r&   r&   E   s    $-r5   r&   c                   ^     e Zd Zddef fdZ ej                         ed               Z xZ	S )CohereRotaryEmbeddingconfigc                    t         |           t        |d      rG|j                  ;|j                  j	                  d|j                  j	                  d            | _        nd| _        |j                  | _        |j                  | _        || _	        t        | j
                     | _        | j                  | j                  |      \  }| _        | j                  d|d       | j                  | _        y )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r(   r)   hasattrrN   getrO   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrL   r   rope_init_fnattention_scalingregister_bufferrR   original_inv_freq)r/   rL   devicerR   r3   s       r4   r)   zCohereRotaryEmbedding.__init__W   s    6>*v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q($(ZeD!%r5   c                 .   | j                   d d d d f   j                         j                  |j                  d   dd      }|d d d d d f   j                         }t	        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  |dd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j                  |j                   
      	j                  |j                   
      fS # 1 sw Y   AxY w)Nr   r7   r!   mpscpuF)device_typeenabledr9   dimr:   )rR   floatexpandshape
isinstancer]   rP   strr+   autocast	transposerepeat_interleavecosrZ   sinr;   r:   )
r/   xposition_idsinv_freq_expandedposition_ids_expandedra   freqsembrn   ro   s
             r4   rC   zCohereRotaryEmbedding.forwardh   sD    !MM$4-8>>@GGHZHZ[\H]_acde ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))%;C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s   BFFN)
rE   rF   rG   r"   r)   r+   no_gradr   rC   rH   rI   s   @r4   rK   rK   V   s3    /| /" U]]_<  <r5   rK   c                   $     e Zd Z fdZd Z xZS )	CohereMLPc                    t         |           || _        |j                  | _        |j                  | _        t        j                  | j                  | j                  d      | _        t        j                  | j                  | j                  d      | _        t        j                  | j                  | j                  d      | _	        t        |j                     | _        y NFr2   )r(   r)   rL   r0   intermediate_sizer	   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr/   rL   r3   s     r4   r)   zCohereMLP.__init__y   s    !--!'!9!94#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXV../r5   c                     | j                  | j                  | j                  |            | j                  |      z        }|S rv   )r   r   r   r   )r/   rp   r   s      r4   rC   zCohereMLP.forward   s6    NN4;;t~~a/@#ADLLQRO#ST	r5   rD   rI   s   @r4   ry   ry   x   s    0r5   ry   r@   n_repreturnc                     | j                   \  }}}}|dk(  r| S | dddddddddf   j                  |||||      } | j                  |||z  ||      S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r!   N)rh   rg   reshape)r@   r   batchnum_key_value_headsslenhead_dims         r4   	repeat_kvr      so    
 2?1D1D.Ehz!!Qa"23::5BUW\^bdlmM  (;e(CT8TTr5   modulequerykeyvalueattention_maskscalingdropoutc                 T   t        || j                        }t        || j                        }	t        j                  ||j	                  dd            |z  }
|#|d d d d d d d |j
                  d   f   }|
|z   }
t        j                  j                  |
dt        j                        j                  |j                        }
t        j                  j                  |
|| j                        }
t        j                  |
|	      }|j	                  dd      j                         }||
fS )Nr9   r
   r7   )rd   r:   )ptrainingr!   )r   num_key_value_groupsr+   matmulrl   rh   r	   
functionalsoftmaxr<   r;   r:   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r4   eager_attention_forwardr      s    3 ; ;<JUF$?$?@L<<z';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#k1==((2U]](SVVW\WbWbcL==((6??([L,,|\:K''1-88:K$$r5   c                     | dd d df   }| ddd df   }t        j                  | |gd      j                  d      }|S )N.r9   r!   r7   rc   r   )r+   stackflatten)rp   x1x2rot_xs       r4   rotate_halfr      sL    	
3!8B	
319BKK"b	r*2226ELr5   c                 6   | j                   }| j                         } |j                         }|j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }|j	                  |      |j	                  |      fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    re   )r:   rf   	unsqueezer   r;   )	qkrn   ro   rq   unsqueeze_dimr:   q_embedk_embeds	            r4   apply_rotary_pos_embr      s    ( GGE		A		A
--
&C
--
&C3w;q>C/0G3w;q>C/0G::E:"GJJUJ$;;;r5   c                   >    e Zd ZdZddedee   f fdZ	 	 ddej                  de
ej                  ej                  f   deej                     dee   d	eej                     d
ee   de
ej                  eej                     ee
ej                        f   fdZ xZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrL   	layer_idxc                 h   t         |           || _        || _        t	        |d|j
                  |j                  z        | _        |j                  |j                  z  | _	        | j                  dz  | _
        |j                  | _        d| _        t        j                  |j
                  |j                  | j                  z  |j                        | _        t        j                  |j
                  |j                  | j                  z  |j                        | _        t        j                  |j
                  |j                  | j                  z  |j                        | _        t        j                  |j                  | j                  z  |j
                  |j                        | _        |j(                  | _        | j(                  ret+        |j                  | j                  f|j,                        | _        t+        |j                  | j                  f|j,                        | _        y y )Nr   g      Tr|   r0   r1   )r(   r)   rL   r   getattrr0   num_attention_headsr   r   r   r   attention_dropout	is_causalr	   r~   attention_biasq_projk_projv_projo_projuse_qk_normr&   layer_norm_epsq_normk_normr/   rL   r   r3   s      r4   r)   zCohereAttention.__init__   s   "
F4F4F&JdJd4de$*$>$>&B\B\$\!}}d*!'!9!9ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii&&68J8JQWQfQf
 "--)#77GVMbMbDK *#77GVMbMbDK r5   r@   position_embeddingsr   past_key_valuecache_positionr   r   c                    |j                   d d }g |d| j                  }| j                  |      j                  |      }	| j	                  |      j                  |      }
| j                  |      j                  |      }| j                  r"| j                  |	      }	| j                  |
      }
|	j                  dd      }	|
j                  dd      }
|j                  dd      }|\  }}t        |	|
||      \  }	}
|'|||d}|j                  |
|| j                  |      \  }
}t        }| j                  j                  dk7  r^| j                  j                  dk(  r(|j!                  dd      rt"        j%                  d	       nt&        | j                  j                     } || |	|
||f| j(                  sd
n| j*                  | j,                  d|\  }} |j.                  g |d j1                         }| j3                  |      }||fS )Nr7   r!   r9   )ro   rn   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   r   )rh   r   r   viewr   r   r   r   r   rl   r   updater   r   rL   _attn_implementationrU   loggerwarning_oncer   r   r   r   r   r   r   )r/   r@   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rn   ro   cache_kwargsattention_interfacer   r   s                     r4   rC   zCohereAttention.forward   s    $))#2.88b8$--8{{=166|D[[/44\B
{{=166|D;;|4LZ0J#--a3))!Q/
#--a3&S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d##L
 '>dkk>^>^&_#$7	%
  $}}C$2H2HLL	%
 	%
!\ *k));;;;FFHkk+.L((r5   rv   )NN)rE   rF   rG   __doc__r"   r   intr)   r+   Tensorr   r   
LongTensorr   r   rC   rH   rI   s   @r4   r   r      s    G|  J +/597)||7) #5<<#=>7) !.	7)
 !7) !!1!127) -.7) 
u||Xell3XeELL>Q5RR	S7)r5   r   c                   p    e Zd Zdedef fdZ	 	 	 	 	 	 	 ddej                  deej                     deej                     dee
   dee   d	ee   d
eej                     deeej                  ej                  f      dee   deej                  eeej                  ej                  f      f   fdZ xZS )CohereDecoderLayerrL   r   c                     t         |           |j                  | _        t        ||      | _        t        |      | _        t        |j                  |j                        | _	        y )N)rL   r   r   )
r(   r)   r0   r   	self_attnry   mlpr&   r   input_layernormr   s      r4   r)   zCohereDecoderLayer.__init__2  sR    !--()LV$.F<N<NU[UjUjkr5   r@   r   rq   r   r   	use_cacher   r   r   r   c	                     |}
| j                  |      } | j                  d||||||||d|	\  }}| j                  |      }|
|z   |z   }|f}|r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r@   r   rq   r   r   r   r   r    )r   r   r   )r/   r@   r   rq   r   r   r   r   r   r   residualhidden_states_attentionself_attn_weightshidden_states_mlpoutputss                  r4   rC   zCohereDecoderLayer.forward9  s    > !,,]; 6DT^^ 
6
')%)/) 3
6
 
6
2!2 !HH]3 !#::=NN ")++Gr5   )NNNFFNN)rE   rF   rG   r"   r   r)   r+   r   r   r   r   boolr   r   r   FloatTensorrC   rH   rI   s   @r4   r   r   1  s   l| l l 2637*.,1$)59KO:||: !.: u//0	:
 !: $D>: D>: !!1!12: &eELL%,,,F&GH: -.: 
u  (51B1BEDUDU1U+V"WW	X:r5   r   aK  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`CohereConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
zTThe bare Cohere Model outputting raw hidden-states without any specific head on top.c                   F    e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZd Zy)CoherePreTrainedModelmodelTr   past_key_valuesc                    | j                   j                  }t        |t        j                        rY|j
                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j
                  j                  j                  d|       |j                  2|j
                  j                  |j                     j                          y y y )Nr   )r=   std)rL   initializer_rangeri   r	   r~   r-   datanormal_r2   zero_	Embeddingpadding_idx)r/   r   r   s      r4   _init_weightsz#CoherePreTrainedModel._init_weights  s    kk++fbii(MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> . .r5   N)rE   rF   rG   r"   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r   r5   r4   r   r     sU    
  L&*#-.#4"5!N  $!"&	?r5   r   a$  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
            `past_key_values`).

            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
            information on the default strategy.

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.n_positions - 1]`.

            [What are position IDs?](../glossary#position-ids)
        past_key_values (`Cache`, *optional*):
            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.

            It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
            of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
            the complete sequence length.
c                       e Zd ZdZdef fdZd Zd Ze e	e
      	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     d	ee   d
eej                      dee   dee   dee   deej                     dee   defd              Z	 ddej                  dej                  dej                  d	edef
dZedej                  dededej2                  dej4                  dej                  defd       Z xZS )CohereModelz
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`CohereDecoderLayer`]

    Args:
        config: CohereConfig
    rL   c           	         t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        |j                  |j                        | _        t#        |      | _        d| _        | j)                          y c c}w )Nr   )rL   F)r(   r)   pad_token_idr   
vocab_sizer	   r   r0   embed_tokens
ModuleListrangenum_hidden_layersr   layersr&   r   normrK   
rotary_embgradient_checkpointing	post_initr   s      r4   r)   zCohereModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammDI&JbJbDcdy	2d
 $1C1C&J_J_`	/v>&+# 	 es   Dc                     | j                   S rv   r  r/   s    r4   get_input_embeddingsz CohereModel.get_input_embeddings  s       r5   c                     || _         y rv   r  r/   r   s     r4   set_input_embeddingsz CohereModel.set_input_embeddings  s
    !r5   	input_idsr   rq   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr   c
                 ^   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|d u |d uz  rt	        d      | j
                  r%| j                  r|rt        j                  d       d}t        |t        d       t        f      st	        d      || j                  |      }|r|
t               }|	F||j                         nd}t        j                   |||j"                  d   z   |j$                        }	||	j'                  d      }| j)                  |||	||      }|}| j+                  ||      }|rdnd }|rdnd }| j,                  d | j                   j.                   D ]r  }|r||fz  }| j
                  r:| j                  r.| j1                  t3        |j4                  fi |
|||||||	|	      }n ||f||||||	|d	|
}|d   }|sj||d   fz  }t | j7                  |      }|r||fz  }t9        ||r|nd ||
      S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r!   r]   r   )r   rq   r   r   r   r   r   )last_hidden_stater   r@   
attentions)rL   r   r  r   
ValueErrorr  r   r   r   ri   rP   r   r  r   get_seq_lengthr+   arangerh   r]   r   _update_causal_maskr  r  r  _gradient_checkpointing_funcr   __call__r  r   )r/   r  r   rq   r   r  r   r   r  r   r  past_seen_tokensr   r@   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r4   rC   zCohereModel.forward
  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I /DJ+>?abb  --i8M0*nO!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]
 & #oom\J #7BD0d![[)H4;;+H+HI  	6M#!m%55!**t}} $ A AM22H6GH! #%"'
! !.!
!#.!-#2&7'#1(;
! (
! *!,M =#3"55A 	6D 		-0  -!11&+/8Od+%	
 	
r5   input_tensorc           
         | j                   j                  dk(  r||dk(  j                         r|S y | j                   j                  dk(  r7t        |t        j
                        rt        |      }t        |t              r|S ||j                         nd}t        |t              }| j                   j                  dk(  r(|s&|s$t        j                  |||| j                        ry |j                  |j                  }	}|j                  d   }
|r|j!                         }n1t        |t        j
                        r|j                  d   n||
z   dz   }| j#                  ||
|||	||j                  d   	      }| j                   j                  dk(  rQ|O|j                  j$                  d
v r7|s5t	        j&                  |      j(                  }t        j*                  ||      }|S )Nflash_attention_2r   flex_attentionr   r   )r  past_key_values_lengthis_trainingr!   r7   )sequence_lengthtarget_lengthr:   r]   r   
batch_size)cudaxpu)rL   r   anyri   r+   r   r$   r#   r%  r   r   _ignore_causal_mask_sdpar   r:   r]   rh   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionrP   finfomin_unmask_unattended)r/   r   r/  r   r   r   r*  using_static_cacher:   r]   r5  r6  r   	min_dtypes                 r4   r'  zCohereModel._update_causal_maskw  s    ;;++/BB)~/D.I.I.K%%;;++/??.%,,7!<^!L.)4%%
 @O?Z?99;`a'E ;;++v5>PYj%>>*'7 MM	 $**L,?,?v&,,Q/+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**o=%
 E*..I0CCKQZ[Kr5   r5  r6  r:   r]   r7  c                    | | j                         dk(  r| }|S t        j                  |      j                  }	t        j                  ||f|	||      }|dk7  rt        j
                  |d      }|t        j                  ||      |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }
|ddddddd|
f   | ddddddf   j                  |j                        z   }|dk(  }|ddddddd|
f   j                  ||	      |ddddddd|
f<   |S )	a  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            device (`torch.device`):
                The device to place the 4D attention mask on.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuer:   r]   r!   )diagonalr!  r7   r   )rd   r+   r>  r?  fulltriur&  r   rg   clonerh   r;   r]   masked_fill)r   r5  r6  r:   r]   r   r7  r   r   rB  mask_lengthpadding_masks               r4   r=  zACohereModel._prepare_4d_causal_attention_mask_with_cache_position  sy   B %.*<*<*>!*C(K* ' E*..I** -0Ye\bK !##jjqA5<<fEH^H^_acdHeeeK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r5   )	NNNNNNNNN)F)rE   rF   rG   r   r"   r)   r  r  r   r   COHERE_INPUTS_DOCSTRINGr   r+   r   r   r   r   r   r   r   r   rC   r'  staticmethodr   r:   r]   r=  rH   rI   s   @r4   r  r    s   
|  !" *+BC 151537+/59$(,0/359i
E,,-i
 !.i
 u//0	i

 "%i
   1 12i
 D>i
 $D>i
 'tni
 !!1!12i
 $$89i
 
!i
 D i
b #(DD llD 	D
 D  DL 777 7 {{	7
 7 7 7 7r5   r  c                       e Zd Zy)KwargsForCausalLMN)rE   rF   rG   r   r5   r4   rP  rP    s    r5   rP  c                       e Zd ZdgZddiZddgdgfiZ fdZd Zd Zd	 Z	d
 Z
d Zd Ze eddd       ee       eee      	 	 	 	 	 	 	 	 	 	 	 ddeej,                     deej.                     deej,                     deeeeej6                     f      deej6                     deej,                     dee   dee   dee   deej,                     deeej.                  f   dee   defd                            Z  xZ!S ) CohereForCausalLMzlm_head.weightlm_headcolwise_repr@   logitsc                 ,   t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        |j                  | _	        |j                  | _
        | j                          y r{   )r(   r)   r  r   r  r	   r~   r0   rS  logit_scaletie_word_embeddingsr  r   s     r4   r)   zCohereForCausalLM.__init__   sq      (
 ++yy!3!3V5F5FUS!--#)#=#=  	r5   c                 .    | j                   j                  S rv   r   r  r  s    r4   r  z&CohereForCausalLM.get_input_embeddings  s    zz&&&r5   c                 &    || j                   _        y rv   rZ  r  s     r4   r  z&CohereForCausalLM.set_input_embeddings  s    "'

r5   c                     | j                   S rv   rS  r  s    r4   get_output_embeddingsz'CohereForCausalLM.get_output_embeddings  s    ||r5   c                     || _         y rv   r]  )r/   new_embeddingss     r4   set_output_embeddingsz'CohereForCausalLM.set_output_embeddings  s	    %r5   c                     || _         y rv   r   )r/   decoders     r4   set_decoderzCohereForCausalLM.set_decoder  s	    
r5   c                     | j                   S rv   rc  r  s    r4   get_decoderzCohereForCausalLM.get_decoder  s    zzr5   num_logits_to_keepz4.50logits_to_keep)versionnew_name)output_typer   r  r   rq   r   r  labelsr   r   r  r   r   r   c                    ||n| j                   j                  }|	|	n| j                   j                  }	 | j                  d||||||||	|
d	|}|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }|| j                  z  }d}|* | j                  d||| j                   j                  d|}t        |||j                  |j                  |j                        S )a0  
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

            logits_to_keep (`int` or `torch.Tensor`, *optional*):
                If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
                `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
                token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
                If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
                This is useful when using packed tensor format (single dimension for batch and sequence length).

        Returns:

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r  r   rq   r   r  r   r   r  r   )rU  rm  r  )lossrU  r   r@   r#  r   )rL   r   r  r   r"  ri   r   slicerS  rW  loss_functionr  r   r   r@   r#  )r/   r  r   rq   r   r  rm  r   r   r  r   ri  r   r   r@   slice_indicesrU  ro  s                     r4   rC   zCohereForCausalLM.forward  s+   d 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,64:: ,
)%+'/!5),
 ,
  118B>SV8W~ot4]kmA}a,?@A$***%4%%pVFt{{OeOepiopD%#33!//))
 	
r5   )NNNNNNNNNNr   )"rE   rF   rG   _tied_weights_keys_tp_plan_pp_planr)   r  r  r^  ra  re  rg  r   r    r   rM  r   r   _CONFIG_FOR_DOCr   r+   r   r   r   r   r   r   r   r   r   rP  rC   rH   rI   s   @r4   rR  rR    s   *+=)H_-z:;H	'(& )6DTU*+BC+AP_` 151537KO59-1$(,0/35934Q
E,,-Q
 !.Q
 u//0	Q

 "%tE4E4E/F(F"GHQ
   1 12Q
 ))*Q
 D>Q
 $D>Q
 'tnQ
 !!1!12Q
 c5<</0Q
 *+Q
 
 Q
 a D V Q
r5   rR  )rR  r  r   )r   )Nr!   )I	functoolsr   typingr   r   r   r   r   r+   r	   activationsr   cache_utilsr   r   r   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   r   r   utils.deprecationr    configuration_coherer"   !torch.nn.attention.flex_attentionr#   integrations.flex_attentionr$   
get_loggerrE   r   rv  Moduler&   rK   ry   r   r   r   rf   r   r   r   r   r   COHERE_START_DOCSTRINGr   rM  r  rP  rR  __all__r   r5   r4   <module>r     s  <  9 9   ! ; ; ) > B O K F &   1 .  !;J 
		H	% -bii -"<BII <D		  	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %4<<Z)bii Z)zB BJ " Z?O ?	?4@ F ZI' I	IX ?,j >w
- w
t Hr5   