
    |h;                     F   d dl mZ d dlmZ d dlmZmZ d dlZd dlm	Z	 d dl
mZ d dlmZ d dlmZ 	 d dlZ G d	 d
e	j&                        Z G d de      Z G d de      Z G d de      Zddedej2                  defdZy# e$ r  ej$                  d       d dlZY iw xY w)    )abstractmethod)Path)ListUnionN)Image)checks)smart_inference_modez+git+https://github.com/ultralytics/CLIP.gitc                   B     e Zd ZdZ fdZed        Zed        Z xZS )	TextModela  
    Abstract base class for text encoding models.

    This class defines the interface for text encoding models used in vision-language tasks. Subclasses must implement
    the tokenize and encode_text methods to provide text tokenization and encoding functionality.

    Methods:
        tokenize: Convert input texts to tokens for model processing.
        encode_text: Encode tokenized texts into normalized feature vectors.
    c                 "    t         |           y)z$Initialize the TextModel base class.N)super__init__)self	__class__s    X/var/www/html/test/engine/venv/lib/python3.12/site-packages/ultralytics/nn/text_model.pyr   zTextModel.__init__!   s        c                      y)z3Convert input texts to tokens for model processing.N r   textss     r   tokenizezTextModel.tokenize%        	r   c                      y)z7Encode tokenized texts into normalized feature vectors.Nr   r   r   dtypes      r   encode_textzTextModel.encode_text*   r   r   )	__name__
__module____qualname____doc__r   r   r   r   __classcell__r   s   @r   r   r      s5    	    r   r   c                       e Zd ZdZdedej                  ddf fdZdeee	e   f   dej                  fdZ e       ej                  fdej                  d	ej                  dej                  fd
       Z e       ej                  fdeej                   ej                  f   d	ej                  dej                  fd       Z xZS )CLIPa  
    Implements OpenAI's CLIP (Contrastive Language-Image Pre-training) text encoder.

    This class provides a text encoder based on OpenAI's CLIP model, which can convert text into feature vectors
    that are aligned with corresponding image features in a shared embedding space.

    Attributes:
        model (clip.model.CLIP): The loaded CLIP model.
        device (torch.device): Device where the model is loaded.

    Methods:
        tokenize: Convert input texts to CLIP tokens.
        encode_text: Encode tokenized texts into normalized feature vectors.

    Examples:
        >>> import torch
        >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        >>> clip_model = CLIP(size="ViT-B/32", device=device)
        >>> tokens = clip_model.tokenize(["a photo of a cat", "a photo of a dog"])
        >>> text_features = clip_model.encode_text(tokens)
        >>> print(text_features.shape)
    sizedevicereturnNc                     t         |           t        j                  ||      \  | _        | _        | j                  |       || _        | j                          y)ao  
        Initialize the CLIP text encoder.

        This class implements the TextModel interface using OpenAI's CLIP model for text encoding. It loads
        a pre-trained CLIP model of the specified size and prepares it for text encoding tasks.

        Args:
            size (str): Model size identifier (e.g., 'ViT-B/32').
            device (torch.device): Device to load the model on.

        Examples:
            >>> import torch
            >>> clip_model = CLIP("ViT-B/32", device=torch.device("cuda:0"))
            >>> text_features = clip_model.encode_text(["a photo of a cat", "a photo of a dog"])
        )r&   N)	r   r   cliploadmodelimage_preprocesstor&   eval)r   r%   r&   r   s      r   r   zCLIP.__init__H   sE      	,0IId6,J)
D)		r   r   c                 ^    t        j                  |      j                  | j                        S )a  
        Convert input texts to CLIP tokens.

        Args:
            texts (str | List[str]): Input text or list of texts to tokenize.

        Returns:
            (torch.Tensor): Tokenized text tensor with shape (batch_size, context_length) ready for model processing.

        Examples:
            >>> model = CLIP("ViT-B/32", device="cpu")
            >>> tokens = model.tokenize("a photo of a cat")
            >>> print(tokens.shape)  # torch.Size([1, 77])
        )r)   r   r-   r&   r   s     r   r   zCLIP.tokenize^   s"     }}U#&&t{{33r   r   c                     | j                   j                  |      j                  |      }||j                  ddd      z  }|S )a  
        Encode tokenized texts into normalized feature vectors.

        This method processes tokenized text inputs through the CLIP model to generate feature vectors, which are then
        normalized to unit length. These normalized vectors can be used for text-image similarity comparisons.

        Args:
            texts (torch.Tensor): Tokenized text inputs, typically created using the tokenize() method.
            dtype (torch.dtype, optional): Data type for output features.

        Returns:
            (torch.Tensor): Normalized text feature vectors with unit length (L2 norm = 1).

        Examples:
            >>> clip_model = CLIP("ViT-B/32", device="cuda")
            >>> tokens = clip_model.tokenize(["a photo of a cat", "a photo of a dog"])
            >>> features = clip_model.encode_text(tokens)
            >>> features.shape
            torch.Size([2, 512])
           Tpdimkeepdimr+   r   r-   norm)r   r   r   	txt_featss       r   r   zCLIP.encode_texto   sB    , JJ**5144U;		D II	r   imagec                 .   t        |t        j                        r9| j                  |      j                  d      j	                  | j
                        }| j                  j                  |      j	                  |      }||j                  ddd      z  }|S )a  
        Encode preprocessed images into normalized feature vectors.

        This method processes preprocessed image inputs through the CLIP model to generate feature vectors, which are then
        normalized to unit length. These normalized vectors can be used for text-image similarity comparisons.

        Args:
            image (PIL.Image | torch.Tensor): Preprocessed image input. If a PIL Image is provided, it will be
                converted to a tensor using the model's image preprocessing function.
            dtype (torch.dtype, optional): Data type for output features.

        Returns:
            (torch.Tensor): Normalized image feature vectors with unit length (L2 norm = 1).

        Examples:
            >>> from ultralytics.nn.text_model import CLIP
            >>> from PIL import Image
            >>> clip_model = CLIP("ViT-B/32", device="cuda")
            >>> image = Image.open("path/to/image.jpg")
            >>> image_tensor = clip_model.image_preprocess(image).unsqueeze(0).to("cuda")
            >>> features = clip_model.encode_image(image_tensor)
            >>> features.shape
            torch.Size([1, 512])
        r   r1   r2   Tr3   )	
isinstancer   r,   	unsqueezer-   r&   r+   encode_imager8   )r   r:   r   	img_featss       r   r>   zCLIP.encode_image   sz    4 eU[[)))%0::1=@@MEJJ++E255e<		D II	r   )r   r   r   r    strtorchr&   r   r   r   Tensorr   r	   float32r   r   r   r>   r!   r"   s   @r   r$   r$   0   s    .S %,, 4 ,4eCcN3 4 4" DIMM  ekk V[VbVb  2 Y^YfYf %U\\(A"B 5;; kpkwkw  r   r$   c                        e Zd ZdZddddddZdedej                  d	d
f fdZde	e   d	ej                  fdZ e       ej                  fdej                  dej                  d	ej                  fd       Z xZS )
MobileCLIPaD  
    Implement Apple's MobileCLIP text encoder for efficient text encoding.

    This class implements the TextModel interface using Apple's MobileCLIP model, providing efficient text encoding
    capabilities for vision-language tasks with reduced computational requirements compared to standard CLIP models.

    Attributes:
        model (mobileclip.model.MobileCLIP): The loaded MobileCLIP model.
        tokenizer (callable): Tokenizer function for processing text inputs.
        device (torch.device): Device where the model is loaded.
        config_size_map (dict): Mapping from size identifiers to model configuration names.

    Methods:
        tokenize: Convert input texts to MobileCLIP tokens.
        encode_text: Encode tokenized texts into normalized feature vectors.

    Examples:
        >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        >>> text_encoder = MobileCLIP(size="s0", device=device)
        >>> tokens = text_encoder.tokenize(["a photo of a cat", "a photo of a dog"])
        >>> features = text_encoder.encode_text(tokens)
    s0s1s2b)rF   rG   rH   rI   bltr%   r&   r'   Nc                 :   	 ddl }|j                         5  |j                  dt               ddl}ddd       t        | %          | j                  |   }d| d}t        |      j                         sddlm}  |d	|        j                  d| ||
      d   | _        |j#                  d|       | _        | j'                  |       || _        | j+                          y# 1 sw Y   xY w# t
        $ r t        j                  d       ddl}Y w xY w)aK  
        Initialize the MobileCLIP text encoder.

        This class implements the TextModel interface using Apple's MobileCLIP model for efficient text encoding.

        Args:
            size (str): Model size identifier (e.g., 's0', 's1', 's2', 'b', 'blt').
            device (torch.device): Device to load the model on.

        Examples:
            >>> import torch
            >>> model = MobileCLIP("s0", device=torch.device("cpu"))
            >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
            >>> features = model.encode_text(tokens)
        r   Nignore)categoryz1git+https://github.com/ultralytics/mobileclip.gitmobileclip_z.pt)downloadzHhttps://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/)
pretrainedr&   )warningscatch_warningsfilterwarningsFutureWarning
mobileclipImportErrorr   check_requirementsr   r   config_size_mapr   is_fileultralyticsrO   create_model_and_transformsr+   get_tokenizer	tokenizerr-   r&   r.   )	r   r%   r&   rQ   rU   configfilerO   r   s	           r   r   zMobileCLIP.__init__   s    
	 ((* "''='I!" 	%%d+TF#&Dz!!#,_`d_efg;;k&<R_clr;stuv
#11Kx2HI		'" "  	%%&YZ	s'   C5 C)C5 )C2.C5 5"DDr   c                 V    | j                  |      j                  | j                        S )a  
        Convert input texts to MobileCLIP tokens.

        Args:
            texts (List[str]): List of text strings to tokenize.

        Returns:
            (torch.Tensor): Tokenized text inputs with shape (batch_size, sequence_length).

        Examples:
            >>> model = MobileCLIP("s0", "cpu")
            >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
        r]   r-   r&   r   s     r   r   zMobileCLIP.tokenize   "     ~~e$''44r   r   c                     | j                   j                  |      j                  |      }||j                  ddd      z  }|S )a  
        Encode tokenized texts into normalized feature vectors.

        Args:
            texts (torch.Tensor): Tokenized text inputs.
            dtype (torch.dtype, optional): Data type for output features.

        Returns:
            (torch.Tensor): Normalized text feature vectors with L2 normalization applied.

        Examples:
            >>> model = MobileCLIP("s0", device="cpu")
            >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
            >>> features = model.encode_text(tokens)
            >>> features.shape
            torch.Size([2, 512])  # Actual dimension depends on model size
        r1   r2   Tr3   r7   )r   r   r   text_featuress       r   r   zMobileCLIP.encode_text   sE    & 

..u588?++aR+FFr   )r   r   r   r    rX   r@   rA   r&   r   r   rB   r   r	   rC   r   r   r!   r"   s   @r   rE   rE      s    . "TCPO'S '%,, '4 'R5d3i 5ELL 5  DIMM  ekk V[VbVb  r   rE   c                        e Zd ZdZdej
                  f fdZdee   dej                  fdZ
 e       ej                  fdej                  dej                  dej                  fd       Z xZS )	MobileCLIPTSa  
    Load a TorchScript traced version of MobileCLIP.

    This class implements the TextModel interface using Apple's MobileCLIP model in TorchScript format, providing
    efficient text encoding capabilities for vision-language tasks with optimized inference performance.

    Attributes:
        encoder (torch.jit.ScriptModule): The loaded TorchScript MobileCLIP text encoder.
        tokenizer (callable): Tokenizer function for processing text inputs.
        device (torch.device): Device where the model is loaded.

    Methods:
        tokenize: Convert input texts to MobileCLIP tokens.
        encode_text: Encode tokenized texts into normalized feature vectors.

    Examples:
        >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        >>> text_encoder = MobileCLIPTS(device=device)
        >>> tokens = text_encoder.tokenize(["a photo of a cat", "a photo of a dog"])
        >>> features = text_encoder.encode_text(tokens)
    r&   c                     t         |           ddlm} t        j
                  j                   |d      |      | _        t        j                  j                  | _
        || _        y)a%  
        Initialize the MobileCLIP TorchScript text encoder.

        This class implements the TextModel interface using Apple's MobileCLIP model in TorchScript format for
        efficient text encoding with optimized inference performance.

        Args:
            device (torch.device): Device to load the model on.

        Examples:
            >>> model = MobileCLIPTS(device=torch.device("cpu"))
            >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
            >>> features = model.encode_text(tokens)
        r   )attempt_download_assetzmobileclip_blt.ts)map_locationN)r   r   ultralytics.utils.downloadsrh   rA   jitr*   encoderr)   r   r]   r&   )r   r&   rh   r   s      r   r   zMobileCLIPTS.__init__,  sK     	Fyy~~&<=P&Q`f~g++r   r   r'   c                 V    | j                  |      j                  | j                        S )a  
        Convert input texts to MobileCLIP tokens.

        Args:
            texts (List[str]): List of text strings to tokenize.

        Returns:
            (torch.Tensor): Tokenized text inputs with shape (batch_size, sequence_length).

        Examples:
            >>> model = MobileCLIPTS("cpu")
            >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
        ra   r   s     r   r   zMobileCLIPTS.tokenizeB  rb   r   r   c                 B    | j                  |      j                  |      S )a  
        Encode tokenized texts into normalized feature vectors.

        Args:
            texts (torch.Tensor): Tokenized text inputs.
            dtype (torch.dtype, optional): Data type for output features.

        Returns:
            (torch.Tensor): Normalized text feature vectors with L2 normalization applied.

        Examples:
            >>> model = MobileCLIPTS(device="cpu")
            >>> tokens = model.tokenize(["a photo of a cat", "a photo of a dog"])
            >>> features = model.encode_text(tokens)
            >>> features.shape
            torch.Size([2, 512])  # Actual dimension depends on model size
        )rl   r-   r   s      r   r   zMobileCLIPTS.encode_textR  s    ( ||E"%%e,,r   )r   r   r   r    rA   r&   r   r   r@   rB   r   r	   rC   r   r   r!   r"   s   @r   rf   rf     su    ,u|| ,5d3i 5ELL 5  DIMM - -ekk -V[VbVb - -r   rf   variantr&   r'   c                     | j                  d      \  }}|dk(  rt        ||      S |dk(  rt        |      S t        d| d      )a  
    Build a text encoding model based on the specified variant.

    Args:
        variant (str): Model variant in format "base:size" (e.g., "clip:ViT-B/32" or "mobileclip:s0").
        device (torch.device, optional): Device to load the model on.

    Returns:
        (TextModel): Instantiated text encoding model.

    Examples:
        >>> model = build_text_model("clip:ViT-B/32", device=torch.device("cuda"))
        >>> model = build_text_model("mobileclip:s0", device=torch.device("cpu"))
    :r)   rU   zUnrecognized base model: 'z/'. Supported base models: 'clip', 'mobileclip'.)splitr$   rf   
ValueError)ro   r&   baser%   s       r   build_text_modelru   i  sS     s#JD$v~D&!!		F##5dV;jkllr   )N)abcr   pathlibr   typingr   r   rA   torch.nnnnPILr   ultralytics.utilsr   ultralytics.utils.torch_utilsr	   r)   rV   rW   Moduler   r$   rE   rf   r@   r&   ru   r   r   r   <module>r      s          $ >		 6w9 wth hVQ-9 Q-hmc m5<< m9 ms
  FKLs   B B B 