
    |h                       d dl Z d dlZd dlZd dlZd dlmZ d dlmZ d dlZd dl	m
Z
 d dlmZ d dlmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=m>Z>m?Z?m@Z@mAZAmBZBmCZCmDZDmEZE d dlFmGZGmHZHmIZImJZJmKZKmLZL d dlMmNZNmOZOmPZP d dlQmRZRmSZSmTZTmUZUmVZVmWZW d d	lXmYZY d d
lZm[Z[ d dl\m]Z] d dl^m_Z_m`Z`maZambZbmcZcmdZdmeZemfZf  G d dej                  j                        Zh G d deh      Zi G d dei      Zj G d dei      Zk G d dei      Zl G d deh      Zm G d dei      Zn G d dei      Zo G d dei      Zp G d d epek      Zq G d! d"ej                  j                        Zse j                  d/d#       Zu G d$ d%      Zv G d& d'ej                        Zxd0d(Zyd1d)Zzd1d*Z{d2d+Z|d, Z}d- Z~d. Zy)3    N)deepcopy)Path)check_class_names)8AIFIC1C2C2PSAC3C3TRELAN1OBBPSASPPSPPELANSPPFA2C2fAConvADown
BottleneckBottleneckCSPC2fC2fAttnC2fCIBC2fPSAC3GhostC3k2C3xCBFuseCBLinearClassifyConcatConvConv2ConvTransposeDetectDWConvDWConvTranspose2dFocusGhostBottleneck	GhostConvHGBlockHGStemImagePoolingAttnIndexLRPCHeadPoseRepC3RepConvRepNCSPELAN4RepVGGDWResNetLayerRTDETRDecoderSCDownSegmentTorchVisionWorldDetectYOLOEDetectYOLOESegment	v10Detect)DEFAULT_CFG_DICTDEFAULT_CFG_KEYSLOGGERYAMLcolorstremojis)check_requirementscheck_suffix
check_yaml)E2EDetectLossv8ClassificationLossv8DetectionLoss	v8OBBLoss
v8PoseLossv8SegmentationLoss)make_divisible)
torch_load)feature_visualization)fuse_conv_and_bnfuse_deconv_and_bninitialize_weightsintersect_dicts
model_info	scale_imgsmart_inference_mode	time_syncc                   r     e Zd ZdZd ZddZddZd Zd ZddZ	ddZ
dd	Z fd
ZddZddZd Z xZS )	BaseModelau  
    Base class for all YOLO models in the Ultralytics family.

    This class provides common functionality for YOLO models including forward pass handling, model fusion,
    information display, and weight loading capabilities.

    Attributes:
        model (torch.nn.Module): The neural network model.
        save (list): List of layer indices to save outputs from.
        stride (torch.Tensor): Model stride values.

    Methods:
        forward: Perform forward pass for training or inference.
        predict: Perform inference on input tensor.
        fuse: Fuse Conv2d and BatchNorm2d layers for optimization.
        info: Print model information.
        load: Load weights into the model.
        loss: Compute loss for training.

    Examples:
        Create a BaseModel instance
        >>> model = BaseModel()
        >>> model.info()  # Display model information
    c                 z    t        |t              r | j                  |g|i |S  | j                  |g|i |S )a0  
        Perform forward pass of the model for either training or inference.

        If x is a dict, calculates and returns the loss for training. Otherwise, returns predictions for inference.

        Args:
            x (torch.Tensor | dict): Input tensor for inference, or dict with image tensor and labels for training.
            *args (Any): Variable length argument list.
            **kwargs (Any): Arbitrary keyword arguments.

        Returns:
            (torch.Tensor): Loss if x is a dict (training), or network predictions (inference).
        )
isinstancedictlosspredict)selfxargskwargss       S/var/www/html/test/engine/venv/lib/python3.12/site-packages/ultralytics/nn/tasks.pyforwardzBaseModel.forward{   sD     a499Q0000t||A////    c                 P    |r| j                  |      S | j                  ||||      S )a  
        Perform a forward pass through the network.

        Args:
            x (torch.Tensor): The input tensor to the model.
            profile (bool): Print the computation time of each layer if True.
            visualize (bool): Save the feature maps of the model if True.
            augment (bool): Augment image during prediction.
            embed (list, optional): A list of feature vectors/embeddings to return.

        Returns:
            (torch.Tensor): The last output of the model.
        )_predict_augment_predict_once)r_   r`   profile	visualizeaugmentembeds         rc   r^   zBaseModel.predict   s/     ((++!!!Wi??re   c                 N   g g g }}}|t        |      ndh}t        |      }| j                  D ]n  }	|	j                  dk7  rMt	        |	j                  t
              r||	j                     n#|	j                  D 
cg c]  }
|
dk(  r|n||
    c}
}|r| j                  |	||        |	|      }|j                  |	j                  | j                  v r|nd       |r#t        ||	j                  |	j                  |       |	j                  |v s|j                  t        j                  j                  j                  |d      j!                  d      j!                  d             |	j                  |k(  sDt        j"                  t        j$                  |d      d      c S  |S c c}
w )a  
        Perform a forward pass through the network.

        Args:
            x (torch.Tensor): The input tensor to the model.
            profile (bool): Print the computation time of each layer if True.
            visualize (bool): Save the feature maps of the model if True.
            embed (list, optional): A list of feature vectors/embeddings to return.

        Returns:
            (torch.Tensor): The last output of the model.
        Nsave_dir   rr   rr   r   dim	frozensetmaxmodelfr[   int_profile_one_layerappendisaverO   typetorchnn
functionaladaptive_avg_pool2dsqueezeunbindcat)r_   r`   ri   rj   rl   ydt
embeddingsmax_idxmjs              rc   rh   zBaseModel._predict_once   s[    Bz2$)$5	% B4e* 	IAssby(c2AaccFYZY\Y\8]TUa2g1Q49O8]''1b1!AHH!##*Q5%ayIsse|!!%(("5"5"I"I!V"T"\"\]_"`"h"hik"lm33'> <<		*a(@aHH	I  9^s   8F"c                 |    t        j                  | j                  j                   d       | j	                  |      S )zFPerform augmentations on input image x and return augmented inference.zR does not support 'augment=True' prediction. Reverting to single-scale prediction.)r@   warning	__class____name__rh   )r_   r`   s     rc   rg   zBaseModel._predict_augment   s;    ~~&&' (4 5	
 !!!$$re   c           	         	 ddl }|| j                  d   k(  xr t        |t              }|r0|j                  ||r|j                         n|gd      d   dz  dz  nd}t               }t        d      D ]  } ||r|j                         n|        |j                  t               |z
  d	z         || j                  d   k(  r!t        j                  d
dddddddd       t        j                  |d   dd|dd|j                  dd|j                          |r+t        j                  t        |      dddddddd       yy# t        $ r d}Y Tw xY w)a8  
        Profile the computation time and FLOPs of a single layer of the model on a given input.

        Args:
            m (torch.nn.Module): The layer to be profiled.
            x (torch.Tensor): The input data to the layer.
            dt (list): A list to store the computation time of the layer.
        r   Nrn   F)inputsverboseg    eA   
   d   z	time (ms)z>10s GFLOPsparamsz  modulez10.2f10.0f  -z  Total)thopImportErrorrx   r[   listri   copyrW   ranger|   r@   infonpr   sum)	r_   r   r`   r   r   cflopst_s	            rc   r{   zBaseModel._profile_one_layer   sV   	 B7Jq$$7^bQA1'=uMaPSVVYZZhiKr 	$A!affh#	$
		9;?c)*

1KK;t,Aht_Aht_HUVr"venAeE]!ADD<r!&&JKKK3r75/3t*Ac$ZwGH   	D	s   E EEc           	      J   | j                         s| j                  j                         D ]  }t        |t        t
        t        f      rnt        |d      rbt        |t
              r|j                          t        |j                  |j                        |_
        t        |d       |j                  |_        t        |t              rNt        |d      rBt!        |j"                  |j                        |_        t        |d       |j                  |_        t        |t$              r!|j                          |j                  |_        t        |t&              r!|j)                          |j                  |_        t        |t*              r|j)                          t        |t,              st        | d      s|j)                  | j.                  j1                  t3        | j                  j5                               j6                                | j9                  |       | S )z
        Fuse the `Conv2d()` and `BatchNorm2d()` layers of the model into a single layer for improved computation
        efficiency.

        Returns:
            (torch.nn.Module): The fused model is returned.
        bnpe)r   )is_fusedrx   modulesr[   r"   r#   r&   hasattr
fuse_convsrP   convr   delattrforward_fuserd   r$   rQ   conv_transposer2   r4   fuser=   r;   r   tonext
parametersdevicer   )r_   r   r   s      rc   r   zBaseModel.fuse   sl    }}ZZ'') Ma$v!67GAt<L!!U+-affadd;AFAt$ !AIa/GAt4D'9!:J:JADD'QA$At$ !AIa)LLN !AIa*FFH !AIa+FFHa-'$2EFF477::d4::+@+@+B&C&J&JKL)M* IIgI&re   c                     t        d t        j                  j                  j	                         D              t        fd| j                         D              |k  S )a?  
        Check if the model has less than a certain threshold of BatchNorm layers.

        Args:
            thresh (int, optional): The threshold number of BatchNorm layers.

        Returns:
            (bool): True if the number of BatchNorm layers in the model is less than the threshold, False otherwise.
        c              3   0   K   | ]  \  }}d |v s|  yw)NormN ).0kvs      rc   	<genexpr>z%BaseModel.is_fused.<locals>.<genexpr>  s     JAfk1Js   c              3   6   K   | ]  }t        |        y wN)r[   )r   r   r   s     rc   r   z%BaseModel.is_fused.<locals>.<genexpr>  s     =:a$=s   )tupler   r   __dict__itemsr   r   )r_   threshr   s     @rc   r   zBaseModel.is_fused  sD     J!2!2!8!8!:JJ=dlln==FFre   c                      t        | |||      S )a)  
        Print model information.

        Args:
            detailed (bool): If True, prints out detailed information about the model.
            verbose (bool): If True, prints out the model information.
            imgsz (int): The size of the image that the model will be trained on.
        )detailedr   imgsz)rT   )r_   r   r   r   s       rc   r   zBaseModel.info  s     $7%PPre   c                     t         |   |      } | j                  d   }t        |t              rE ||j
                        |_         ||j                        |_         ||j                        |_        | S )a  
        Apply a function to all tensors in the model that are not parameters or registered buffers.

        Args:
            fn (function): The function to apply to the model.

        Returns:
            (BaseModel): An updated BaseModel object.
        rn   )super_applyrx   r[   r%   strideanchorsstrides)r_   fnr   r   s      rc   r   zBaseModel._apply  sa     w~b!JJrNv
 !((|AH199AI199AIre   c           	      d   t        |t              r|d   n|}|j                         j                         }t	        || j                               }| j                  |d       t        |      }d}| j                         }||vrm||v ri||   j                  \  }	}
}}||   j                  \  }}}}||k(  r<||k(  r7t        |	|      t        |
|      }
}	||   d|	d|
f   ||   d|	d|
f<   |dz  }|r>t        j                  d| dt        | j                  j                                d	       yy)
z
        Load weights into the model.

        Args:
            weights (dict | torch.nn.Module): The pre-trained weights to be loaded.
            verbose (bool, optional): Whether to log the transfer progress.
        rx   F)strictzmodel.0.conv.weightNrr   zTransferred /z items from pretrained weights)r[   r\   float
state_dictrS   load_state_dictlenshapeminr@   r   rx   )r_   weightsr   rx   csdupdated_csdlen_updated_csd
first_convr   c1c2hwcc1cc2chcws                    rc   loadzBaseModel.load.  sA    %/w$= 7kkm&&(%c4??+<=[7k**
__&
[(Z:-E%j177LBAq":44Cb"Qw27Rs2s|B36z?3B383L
:&ssCRCx01$KK,&7qTZZ=R=R=T9U8VVtuv re   c                     t        | dd      | j                         | _        || j                  |d         n|}| j                  ||      S )
        Compute loss.

        Args:
            batch (dict): Batch to compute loss on.
            preds (torch.Tensor | List[torch.Tensor], optional): Predictions.
        	criterionNimg)getattrinit_criterionr   rd   r_   batchpredss      rc   r]   zBaseModel.lossH  sL     4d+3!002DN.3mU5\*~~eU++re   c                     t        d      )z0Initialize the loss criterion for the BaseModel.z4compute_loss() needs to be implemented by task heads)NotImplementedErrorr_   s    rc   r   zBaseModel.init_criterionV  s    !"XYYre   )FFFN)FFNT)r   )FTi  r   )r   
__module____qualname____doc__rd   r^   rh   rg   r{   r   r   r   r   r   r]   r   __classcell__r   s   @rc   rY   rY   a   sK    20$@$>%I4 DG	Q(w4,Zre   rY   c                   L     e Zd ZdZd fd	Z fdZedd       Zd Zd Z	 xZ
S )	DetectionModela\  
    YOLO detection model.

    This class implements the YOLO detection architecture, handling model initialization, forward pass,
    augmented inference, and loss computation for object detection tasks.

    Attributes:
        yaml (dict): Model configuration dictionary.
        model (torch.nn.Sequential): The neural network model.
        save (list): List of layer indices to save outputs from.
        names (dict): Class names dictionary.
        inplace (bool): Whether to use inplace operations.
        end2end (bool): Whether the model uses end-to-end detection.
        stride (torch.Tensor): Model stride values.

    Methods:
        __init__: Initialize the YOLO detection model.
        _predict_augment: Perform augmented inference.
        _descale_pred: De-scale predictions following augmented inference.
        _clip_augmented: Clip YOLO augmented inference tails.
        init_criterion: Initialize the loss criterion.

    Examples:
        Initialize a detection model
        >>> model = DetectionModel("yolo11n.yaml", ch=3, nc=80)
        >>> results = model.predict(image_tensor)
    c           
      Z   	 t         
           t        |t              r|n
t	        |       _         j
                  d   d   d   dk(  r*t        j                  d       d j
                  d   d   d<   | j
                  d<   |rI| j
                  d   k7  r7t        j                  d	 j
                  d    d
|        | j
                  d<   t        t         j
                        ||      \   _         _        t         j
                  d         D ci c]  }||  c} _         j
                  j                  dd       _        t#         j                  d   dd       _         j                  d   	t        	t&              rd} j                   	_        	 fd} j                  j)                          d	_        t-        j.                   |t-        j0                  d|||            D cg c]  }||j2                  d   z   c}      	_        	j4                   _         j                  j7                          	j9                          nt-        j:                  dg       _        t=                |r& j                          t        j                  d       yyc c}w c c}w )aZ  
        Initialize the YOLO detection model with the given config and parameters.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        backboner   r   SilencezYOLOv9 `Silence` module is deprecated in favor of torch.nn.Identity. Please delete local *.pt file and re-download the latest model checkpoint.znn.IdentitychannelsncOverriding model.yaml nc=	 with nc=r   r   inplaceTrn   end2endF   c                     j                   rj                  |       d   S t        t        t        t
        t        f      rj                  |       d   S j                  |       S )z_Perform a forward pass through the model, handling different Detect subclass types accordingly.one2manyr   )r  rd   r[   r8   r<   r0   r   )r`   r   r_   s    rc   _forwardz)DetectionModel.__init__.<locals>._forward  sV    <<<<?:66-7G\SWY\;]-^t||Aq)sdhdpdpqrdssre   rr        N)r   __init__r[   r\   yaml_model_loadyamlr@   r   r   parse_modelr   rx   r~   r   namesgetr  r   r  r%   evaltrainingr   tensorzerosr   r   train	bias_initTensorrR   )r_   cfgr   r   r   r}   sr	  r`   r   r   s   `        @rc   r  zDetectionModel.__init__x  s!    	%c40Coc6J	99Z #A&)3NN] +8DIIj!!$Q' !#		*"		$'KK3DIIdO3DIbTRS DIIdO +HTYY,?BPW X
DI).tyy)?@AaA3i@
yy}}Y5tzz"~y%@ JJrNa AAIt JJOOAJ||hu{{STVXZ[]^G_>`$aQ_$abAH((DKJJKKM,,t,DK 	4 IIKKKO 7 A$ %bs   J#3J(c           	      :   t        | dd      s| j                  j                  dk7  r&t        j                  d       | j                  |      S |j                  dd }g d}g d}g }t        ||      D ]~  \  }}t        |r|j                  |      n||t        | j                  j                               	      }t        
| 9  |      d
   }	| j                  |	|||      }	|j!                  |	        | j#                  |      }t%        j&                  |d      dfS )z
        Perform augmentations on input image x and return augmented inference and train outputs.

        Args:
            x (torch.Tensor): Input image tensor.

        Returns:
            (torch.Tensor): Augmented inference output.
        r  Fr   zLModel does not support 'augment=True', reverting to single-scale prediction.r
  N)rr   g(\?gq=
ףp?)N   N)gsr   rn   )r   r   r   r@   r   rh   r   ziprU   fliprz   r   rw   r   r^   _descale_predr|   _clip_augmentedr   r   )r_   r`   img_sizer  ry   r   sifixiyir   s             rc   rg   zDetectionModel._predict_augment  s     4E*dnn.E.EIY.YNNij%%a((7723<!Qi 	FB166":B3t{{?P;QRB$Q'B##BB9BHHRL		
   #yyB%%re   c                     | ddddfxx   |z  cc<   | j                  ddd| j                  |   dz
  f|      \  }}}}|dk(  r	|d   |z
  }n|dk(  r|d   |z
  }t        j                  ||||f|      S )a  
        De-scale predictions following augmented inference (inverse operation).

        Args:
            p (torch.Tensor): Predictions tensor.
            flips (int): Flip type (0=none, 2=ud, 3=lr).
            scale (float): Scale factor.
            img_size (tuple): Original image size (height, width).
            dim (int): Dimension to split at.

        Returns:
            (torch.Tensor): De-scaled predictions.
        N   rr   r   r   r  )splitr   r   r   )	pflipsscaler#  rt   r`   r   whclss	            rc   r!  zDetectionModel._descale_pred  s     	
!RaR%EAq!''#,*: ;SA1b#A:aAaZaAyy!QC#..re   c                 z   | j                   d   j                  t        d t              D              }d}|d   j                  d   |z  t        d t        |      D              z  }|d   dd| f   |d<   |d   j                  d   |z  t        fdt        |      D              z  }|d   d|df   |d<   |S )	z
        Clip YOLO augmented inference tails.

        Args:
            y (List[torch.Tensor]): List of detection tensors.

        Returns:
            (List[torch.Tensor]): Clipped detection tensors.
        rn   c              3   &   K   | ]	  }d |z    ywr)  Nr   r   r`   s     rc   r   z1DetectionModel._clip_augmented.<locals>.<genexpr>  s     (1(   rr   r   c              3   &   K   | ]	  }d |z    ywr2  r   r3  s     rc   r   z1DetectionModel._clip_augmented.<locals>.<genexpr>  s     '?1'?r4  .Nc              3   4   K   | ]  }d dz
  |z
  z    yw)r)  rr   Nr   )r   r`   nls     rc   r   z1DetectionModel._clip_augmented.<locals>.<genexpr>  s     (MqrAvz):(Ms   )rx   r7  r   r   r   )r_   r   ger}   r7  s        @rc   r"  zDetectionModel._clip_augmented  s     ZZ^(eBi((qTZZ^q C'?eAh'?$??tC1"H~!rU[[_!S(ME!H(M%MM"c12g"re   c                 H    t        | dd      rt        |       S t        |       S )z5Initialize the loss criterion for the DetectionModel.r  F)r   rG   rI   r   s    rc   r   zDetectionModel.init_criterion  s#    &-dIu&E}T"`?[_K``re   )zyolo11n.yamlr  NT)rr   )r   r   r   r   r  rg   staticmethodr!  r"  r   r   r   s   @rc   r   r   [  s2    86p&2 / /,&are   r   c                   *     e Zd ZdZd fd	Zd Z xZS )OBBModela  
    YOLO Oriented Bounding Box (OBB) model.

    This class extends DetectionModel to handle oriented bounding box detection tasks, providing specialized
    loss computation for rotated object detection.

    Methods:
        __init__: Initialize YOLO OBB model.
        init_criterion: Initialize the loss criterion for OBB detection.

    Examples:
        Initialize an OBB model
        >>> model = OBBModel("yolo11n-obb.yaml", ch=3, nc=80)
        >>> results = model.predict(image_tensor)
    c                 ,    t         |   ||||       y)aL  
        Initialize YOLO OBB model with given config and parameters.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        r  r   r   r   Nr   r  r_   r  r   r   r   r   s        rc   r  zOBBModel.__init__	       	SRB@re   c                     t        |       S )z,Initialize the loss criterion for the model.)rJ   r   s    rc   r   zOBBModel.init_criterion  s    re   )zyolo11n-obb.yamlr  NTr   r   r   r   r  r   r   r   s   @rc   r=  r=    s     
Are   r=  c                   *     e Zd ZdZd fd	Zd Z xZS )SegmentationModela
  
    YOLO segmentation model.

    This class extends DetectionModel to handle instance segmentation tasks, providing specialized
    loss computation for pixel-level object detection and segmentation.

    Methods:
        __init__: Initialize YOLO segmentation model.
        init_criterion: Initialize the loss criterion for segmentation.

    Examples:
        Initialize a segmentation model
        >>> model = SegmentationModel("yolo11n-seg.yaml", ch=3, nc=80)
        >>> results = model.predict(image_tensor)
    c                 ,    t         |   ||||       y)aa  
        Initialize Ultralytics YOLO segmentation model with given config and parameters.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        r?  Nr@  rA  s        rc   r  zSegmentationModel.__init__+  rB  re   c                     t        |       S )z8Initialize the loss criterion for the SegmentationModel.)rL   r   s    rc   r   z SegmentationModel.init_criterion7  s    !$''re   )zyolo11n-seg.yamlr  NTrD  r   s   @rc   rF  rF    s     
A(re   rF  c                   *     e Zd ZdZd fd	Zd Z xZS )	PoseModelac  
    YOLO pose model.

    This class extends DetectionModel to handle human pose estimation tasks, providing specialized
    loss computation for keypoint detection and pose estimation.

    Attributes:
        kpt_shape (tuple): Shape of keypoints data (num_keypoints, num_dimensions).

    Methods:
        __init__: Initialize YOLO pose model.
        init_criterion: Initialize the loss criterion for pose estimation.

    Examples:
        Initialize a pose model
        >>> model = PoseModel("yolo11n-pose.yaml", ch=3, nc=1, data_kpt_shape=(17, 3))
        >>> results = model.predict(image_tensor)
    c                     t        |t              st        |      }t        |      r=t	        |      t	        |d         k7  r#t        j                  d|d    d|        ||d<   t        | !  ||||       y)au  
        Initialize Ultralytics YOLO Pose model.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            data_kpt_shape (tuple): Shape of keypoints data.
            verbose (bool): Whether to display model information.
        	kpt_shapez Overriding model.yaml kpt_shape=z with kpt_shape=r?  N)	r[   r\   r  anyr   r@   r   r   r  )r_   r  r   r   data_kpt_shaper   r   s         rc   r  zPoseModel.__init__P  sz     #t$!#&C~4#74K@P;Q#QKK:3{;K:LL\]k\lmn-CSRB@re   c                     t        |       S )z0Initialize the loss criterion for the PoseModel.)rK   r   s    rc   r   zPoseModel.init_criterionb  s    $re   )zyolo11n-pose.yamlr  NNNTrD  r   s   @rc   rJ  rJ  <  s    &A$ re   rJ  c                   @     e Zd ZdZd fd	Zd Zed        Zd Z xZ	S )ClassificationModelat  
    YOLO classification model.

    This class implements the YOLO classification architecture for image classification tasks,
    providing model initialization, configuration, and output reshaping capabilities.

    Attributes:
        yaml (dict): Model configuration dictionary.
        model (torch.nn.Sequential): The neural network model.
        stride (torch.Tensor): Model stride values.
        names (dict): Class names dictionary.

    Methods:
        __init__: Initialize ClassificationModel.
        _from_yaml: Set model configurations and define architecture.
        reshape_outputs: Update model to specified class count.
        init_criterion: Initialize the loss criterion.

    Examples:
        Initialize a classification model
        >>> model = ClassificationModel("yolo11n-cls.yaml", ch=3, nc=1000)
        >>> results = model.predict(image_tensor)
    c                 J    t         |           | j                  ||||       y)ae  
        Initialize ClassificationModel with YAML, channels, number of classes, verbose flag.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        N)r   r  
_from_yamlrA  s        rc   r  zClassificationModel.__init__  s"     	RW-re   c                    t        |t              r|n
t        |      | _        | j                  j	                  d|      x}| j                  d<   |rJ|| j                  d   k7  r8t        j                  d| j                  d    d|        || j                  d<   n)|s'| j                  j	                  dd      st        d      t        t        | j                        ||      \  | _
        | _        t        j                  dg      | _        t        | j                  d         D ci c]  }||  c}| _        | j                          yc c}w )	a]  
        Set Ultralytics YOLO model configurations and define the model architecture.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        r   r   r  r  NzFnc not specified. Must specify nc in model.yaml or function arguments.r  rr   )r[   r\   r  r  r  r@   r   
ValueErrorr  r   rx   r~   r   r  r   r   r  )r_   r  r   r   r   r}   s         rc   rT  zClassificationModel._from_yaml  s    &c40Coc6J	 &*YY]]:r%BBTYYz""		$'KK3DIIdO3DIbTRS DIIdODIIMM$5eff +HTYY,?BPW X
DIllA3').tyy)?@AaA3i@
		 As   &E
c                 f   t        t        | d      r| j                  n| j                               d   \  }}t	        |t
              rT|j                  j                  |k7  r:t        j                  j                  |j                  j                  |      |_        yyt	        |t        j                  j                        rF|j                  |k7  r6t        | |t        j                  j                  |j                  |             yyt	        |t        j                  j                        r|D cg c]  }t        |       }}t        j                  j                  |v rt        |      dz
  |ddd   j!                  t        j                  j                        z
  }||   j                  |k7  r1t        j                  j                  ||   j                  |      ||<   yyt        j                  j"                  |v rt        |      dz
  |ddd   j!                  t        j                  j"                        z
  }||   j$                  |k7  r^t        j                  j#                  ||   j&                  |||   j(                  ||   j*                  ||   j,                  du      ||<   yyyyc c}w )z
        Update a TorchVision classification model to class count 'n' if required.

        Args:
            model (torch.nn.Module): Model to update.
            nc (int): New number of classes.
        rx   rn   rr   N)bias)r   r   rx   named_childrenr[   r    linearout_featuresr   r   Linearin_featuressetattr
Sequentialr   r   indexConv2dout_channelsin_channelskernel_sizer   rX  )rx   r   namer   r`   typesr}   s          rc   reshape_outputsz#ClassificationModel.reshape_outputs  s
    wug'>EYY[\]_`aa"xx$$* 88??188+?+?D +588??+~~#tUXX__Q]]B%GH $588../&'(T!W(E(xx%'JNU4R4[%6%6uxx%GGQ4$$* 88??1Q4+;+;R@AaD +E)JNU4R4[%6%6uxx%GGQ4$$* 88??!(("ad.>.>!RSTURVR[R[cgRg + AaD + * 0(s   /J.c                     t               S )z:Initialize the loss criterion for the ClassificationModel.)rH   r   s    rc   r   z"ClassificationModel.init_criterion  s    #%%re   )zyolo11n-cls.yamlr  NT)
r   r   r   r   r  rT  r;  rg  r   r   r   s   @rc   rR  rR  g  s+    0.0  8&re   rR  c                   :     e Zd ZdZd fd	Zd ZddZddZ xZS )	RTDETRDetectionModelaw  
    RTDETR (Real-time DEtection and Tracking using Transformers) Detection Model class.

    This class is responsible for constructing the RTDETR architecture, defining loss functions, and facilitating both
    the training and inference processes. RTDETR is an object detection and tracking model that extends from the
    DetectionModel base class.

    Attributes:
        nc (int): Number of classes for detection.
        criterion (RTDETRDetectionLoss): Loss function for training.

    Methods:
        __init__: Initialize the RTDETRDetectionModel.
        init_criterion: Initialize the loss criterion.
        loss: Compute loss for training.
        predict: Perform forward pass through the model.

    Examples:
        Initialize an RTDETR model
        >>> model = RTDETRDetectionModel("rtdetr-l.yaml", ch=3, nc=80)
        >>> results = model.predict(image_tensor)
    c                 ,    t         |   ||||       y)a7  
        Initialize the RTDETRDetectionModel.

        Args:
            cfg (str | dict): Configuration file name or path.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Print additional information during initialization.
        r?  Nr@  rA  s        rc   r  zRTDETRDetectionModel.__init__  rB  re   c                 6    ddl m}  || j                  d      S )z;Initialize the loss criterion for the RTDETRDetectionModel.r   )RTDETRDetectionLossT)r   use_vfl)ultralytics.models.utils.lossrm  r   )r_   rm  s     rc   r   z#RTDETRDetectionModel.init_criterion  s    E"dggt<<re   c                    t        | d      s| j                         | _        |d   }t        |      }|d   }t	        |      D cg c]#  }||k(  j                         j                         % }}|d   j                  |j                  t        j                        j                  d      |d   j                  |j                        |j                  |j                  t        j                        j                  d      |d	}|| j                  ||
      n|}| j                  r|n|d   \  }	}
}}}|d\  }}n<t        j                  |	|d   d      \  }}	t        j                  |
|d   d      \  }}
t        j                  |j!                  d      |	g      }	t        j                  |j!                  d      |
g      }
| j                  |	|
f||||      }t        |j#                               t        j$                  dD cg c]  }||   j'                          c}|j                        fS c c}w c c}w )an  
        Compute the loss for the given batch of data.

        Args:
            batch (dict): Dictionary containing image and label data.
            preds (torch.Tensor, optional): Precomputed model predictions.

        Returns:
            loss_sum (torch.Tensor): Total loss value.
            loss_items (torch.Tensor): Main three losses in a tensor.
        r   r   	batch_idxr/  )dtypern   bboxesr   )r/  rs  rq  	gt_groups)r   rr   rP  dn_num_splitr   rs   r   )	dn_bboxes	dn_scoresdn_meta)	loss_giou
loss_class	loss_bbox)r   r   r   r   r   r   itemr   r   r   longviewr^   r  r*  r   	unsqueezevalues	as_tensordetach)r_   r   r   r   bsrq  r}   ru  targets
dec_bboxes
dec_scores
enc_bboxes
enc_scoresry  rw  rx  r]   r   s                     rc   r]   zRTDETRDetectionModel.loss  s	    t[)!002DNElX+&	<A"IFqi1n))+002F	F<??3::UZZ?@EEbIHo((

(;"cjj

CHHL"	
 5:MS0uKO==%^cde^f?
J
J?#- Iy$)KK
GN<SYZ$[!Iz$)KK
GN<SYZ$[!IzYY
 4 4Q 7DE
YY
 4 4Q 7DE
~~$giah  
 4;;=!5??'OP!T!W^^PY\YcYc$
 
 	
/ G0 Qs   (H6H;
c                    g g g }	}}|t        |      ndh}t        |      }
| j                  dd D ]n  }|j                  dk7  rMt	        |j                  t
              r||j                     n#|j                  D cg c]  }|dk(  r|n||    c}}|r| j                  |||        ||      }|j                  |j                  | j                  v r|nd       |r#t        ||j                  |j                  |       |j                  |v s|	j                  t        j                  j                  j                  |d      j!                  d      j!                  d             |j                  |
k(  sDt        j"                  t        j$                  |	d      d      c S  | j                  d   } ||j                  D cg c]  }||   	 c}|      }|S c c}w c c}w )aH  
        Perform a forward pass through the model.

        Args:
            x (torch.Tensor): The input tensor.
            profile (bool): If True, profile the computation time for each layer.
            visualize (bool): If True, save feature maps for visualization.
            batch (dict, optional): Ground truth data for evaluation.
            augment (bool): If True, perform data augmentation during inference.
            embed (list, optional): A list of feature vectors/embeddings to return.

        Returns:
            (torch.Tensor): Model's output tensor.
        Nrn   ro   rq   rr   r   rs   ru   )r_   r`   ri   rj   r   rk   rl   r   r   r   r   r   r   heads                 rc   r^   zRTDETRDetectionModel.predict  s    Bz2$)$5	% B4e*CR 	IAssby(c2AaccFYZY\Y\8]TUa2g1Q49O8]''1b1!AHH!##*Q5%ayIsse|!!%(("5"5"I"I!V"T"\"\]_"`"h"hik"lm33'> <<		*a(@aHH	I zz"~'1!A$'/ 9^ (s   ;GG)zrtdetr-l.yamlr  NTr   FFNFN)	r   r   r   r   r  r   r]   r^   r   r   s   @rc   rj  rj    s    .
A=,
\!re   rj  c                   D     e Zd ZdZd fd	ZddZddZd	dZd
dZ xZ	S )
WorldModelau  
    YOLOv8 World Model.

    This class implements the YOLOv8 World model for open-vocabulary object detection, supporting text-based
    class specification and CLIP model integration for zero-shot detection capabilities.

    Attributes:
        txt_feats (torch.Tensor): Text feature embeddings for classes.
        clip_model (torch.nn.Module): CLIP model for text encoding.

    Methods:
        __init__: Initialize YOLOv8 world model.
        set_classes: Set classes for offline inference.
        get_text_pe: Get text positional embeddings.
        predict: Perform forward pass with text features.
        loss: Compute loss with text features.

    Examples:
        Initialize a world model
        >>> model = WorldModel("yolov8s-world.yaml", ch=3, nc=80)
        >>> model.set_classes(["person", "car", "bicycle"])
        >>> results = model.predict(image_tensor)
    c                 z    t        j                  d|xs dd      | _        d| _        t        |   ||||       y)aP  
        Initialize YOLOv8 world model with given config and parameters.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        rr   P      Nr?  )r   randn	txt_feats
clip_modelr   r  rA  s        rc   r  zWorldModel.__init__\  s:     Qb#6SRB@re   c                 p    | j                  |||      | _        t        |      | j                  d   _        y)a-  
        Set classes in advance so that model could do offline-inference without clip model.

        Args:
            text (List[str]): List of class names.
            batch (int): Batch size for processing text tokens.
            cache_clip_model (bool): Whether to cache the CLIP model.
        )r   cache_clip_modelrn   N)get_text_per  r   rx   r   )r_   textr   r  s       rc   set_classeszWorldModel.set_classesj  s2     ))$eN^)_I

2re   c                    ddl m} t        | j                  j	                               j
                  }t        | dd      s|r |d|      | _        |r| j                  n	 |d|      }|j                  |      }|j                  |      D cg c]!  }|j                  |      j                         # }	}t        |	      dk(  r|	d   nt        j                  |	d      }	|	j                  d	t        |      |	j                   d	         S c c}w )
aw  
        Set classes in advance so that model could do offline-inference without clip model.

        Args:
            text (List[str]): List of class names.
            batch (int): Batch size for processing text tokens.
            cache_clip_model (bool): Whether to cache the CLIP model.

        Returns:
            (torch.Tensor): Text positional embeddings.
        r   build_text_modelr  Nzclip:ViT-B/32rt  rr   rs   rn   )ultralytics.nn.text_modelr  r   rx   r   r   r   r  tokenizer*  encode_textr  r   r   r   reshaper   )
r_   r  r   r  r  r   rx   
text_tokentokenr  s
             rc   r  zWorldModel.get_text_pev  s     	?djj++-.55t\405E.vNDO#39I/bh9i^^D)
DNDTDTUZD[\5U&&u-446\	\$'	Na$7IaLUYYyVW=X	  SY	0CDD ]s   &D	c                    || j                   n|j                  |j                  |j                        }t	        |      t	        |      k7  s| j
                  d   j                  r |j                  |j                  d   dd      }|j                         }g g g }
}	}|t        |      ndh}t        |      }| j
                  D ]  }|j                  dk7  rMt        |j                  t              r||j                     n#|j                  D cg c]  }|dk(  r|n||    c}}|r| j                  |||	       t        |t               r
 |||      }n<t        |t"              r
 |||      }n"t        |t$              r
 |||      }n ||      }|j'                  |j(                  | j*                  v r|nd       |r#t-        ||j.                  |j(                  |       |j(                  |v s+|
j'                  t0        j2                  j4                  j7                  |d      j9                  d      j9                  d             |j(                  |k(  st1        j:                  t1        j<                  |
d      d      c S  |S c c}w )	a[  
        Perform a forward pass through the model.

        Args:
            x (torch.Tensor): The input tensor.
            profile (bool): If True, profile the computation time for each layer.
            visualize (bool): If True, save feature maps for visualization.
            txt_feats (torch.Tensor, optional): The text features, use it if it's given.
            augment (bool): If True, perform data augmentation during inference.
            embed (list, optional): A list of feature vectors/embeddings to return.

        Returns:
            (torch.Tensor): Model's output tensor.
        Nr   rr  rn   r   ro   rq   rr   rs   )r  r   r   rr  r   rx   exportexpandr   clonerv   rw   ry   r[   rz   r{   r   r:   r-   r|   r}   r~   rO   r   r   r   r   r   r   r   r   )r_   r`   ri   rj   r  rk   rl   ori_txt_featsr   r   r   r   r   r   s                 rc   r^   zWorldModel.predict  s    (1'8T^^iKKSTS[S[cdcjcjKk	y>SV#tzz"~'<'<!((R<I!)Bz2$)$5	% B4e* 	IAssby(c2AaccFYZY\Y\8]TUa2g1Q49O8]''1b1!W%aOA{+a'A/0aO	aDHH!##*Q5%ayIsse|!!%(("5"5"I"I!V"T"\"\]_"`"h"hik"lm33'> <<		*a(@aHH)	I* ' 9^s   Jc                     t        | d      s| j                         | _        || j                  |d   |d         }| j                  ||      S )r   r   r   r  )r  )r   r   r   rd   r   s      rc   r]   zWorldModel.loss  sO     t[)!002DN=LLu{9KLLE~~eU++re   )zyolov8s-world.yamlr  NT)r  Tr  r   )
r   r   r   r   r  r  r  r^   r]   r   r   s   @rc   r  r  C  s$    0A
&E0+Z,re   r  c                        e Zd ZdZd fd	Z e       dd       Z e       d        Zd Zd Z	d Z
d Z	 dd	Zdd
Z xZS )
YOLOEModela  
    YOLOE detection model.

    This class implements the YOLOE architecture for efficient object detection with text and visual prompts,
    supporting both prompt-based and prompt-free inference modes.

    Attributes:
        pe (torch.Tensor): Prompt embeddings for classes.
        clip_model (torch.nn.Module): CLIP model for text encoding.

    Methods:
        __init__: Initialize YOLOE model.
        get_text_pe: Get text positional embeddings.
        get_visual_pe: Get visual embeddings.
        set_vocab: Set vocabulary for prompt-free model.
        get_vocab: Get fused vocabulary layer.
        set_classes: Set classes for offline inference.
        get_cls_pe: Get class positional embeddings.
        predict: Perform forward pass with prompts.
        loss: Compute loss with prompts.

    Examples:
        Initialize a YOLOE model
        >>> model = YOLOEModel("yoloe-v8s.yaml", ch=3, nc=80)
        >>> results = model.predict(image_tensor, tpe=text_embeddings)
    c                 ,    t         |   ||||       y)aI  
        Initialize YOLOE model with given config and parameters.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        r?  Nr@  rA  s        rc   r  zYOLOEModel.__init__  rB  re   c                    ddl m} t        | j                  j	                               j
                  }t        | dd      s|r |d|      | _        |r| j                  n	 |d|      }|j                  |      }|j                  |      D 	cg c]!  }	|j                  |	      j                         # }
}	t        |
      dk(  r|
d   nt        j                  |
d      }
|
j                  d	t        |      |
j                   d	         }
|r|
S | j"                  rJ | j                  d	   }t%        |t&              sJ |j)                  |
      S c c}	w )
a  
        Set classes in advance so that model could do offline-inference without clip model.

        Args:
            text (List[str]): List of class names.
            batch (int): Batch size for processing text tokens.
            cache_clip_model (bool): Whether to cache the CLIP model.
            without_reprta (bool): Whether to return text embeddings cooperated with reprta module.

        Returns:
            (torch.Tensor): Text positional embeddings.
        r   r  r  Nzmobileclip:bltrt  rr   rs   rn   )r  r  r   rx   r   r   r   r  r  r*  r  r  r   r   r   r  r   r  r[   r;   get_tpe)r_   r  r   r  without_reprtar  r   rx   r  r  r  r  s               rc   r  zYOLOEModel.get_text_pe  s%    	?djj++-.55t\405E./?ODO#39IJZci9j^^D)
DNDTDTUZD[\5U&&u-446\	\$'	Na$7IaLUYYyVW=X	%%b#d)Y__R5HI	==  zz"~$,,,||I&& ]s   &Ec                      | ||d      S )z
        Get visual embeddings.

        Args:
            img (torch.Tensor): Input image tensor.
            visual (torch.Tensor): Visual features.

        Returns:
            (torch.Tensor): Visual positional embeddings.
        T)vpe
return_vper   )r_   r   visuals      rc   get_visual_pezYOLOEModel.get_visual_pe  s     CV55re   c           
         | j                   rJ | j                  d   }t        |t              sJ t	        | j                               j                  } | t        j                  dd| j                  d   | j                  d         j                  |             t        j                  d t        t        ||j                  |j                               D              | j                  d   _        t        |j                   |j                        D ]C  \  }}t        |t        j$                        sJ t        |t        j$                        sJ |d= |d= E t'        |      | j                  d   _        t+        |      | _        y)z
        Set vocabulary for the prompt-free model.

        Args:
            vocab (nn.ModuleList): List of vocabulary items.
            names (List[str]): List of class names.
        rn   rr   r  r   c              3   Z   K   | ]#  \  }\  }}}t        ||d    |d    |dk7         % yw)rn   r   )enabledN)r/   )r   r}   r/  pflocs        rc   r   z'YOLOEModel.set_vocab.<locals>.<genexpr>4  s<      ,
!>CS S"R&#b'16::,
s   )+N)r  rx   r[   r;   r   r   r   r   emptyra   r   r   
ModuleList	enumerater  cv3cv2lrpcr_  r   r   r   r  )r_   vocabr  r  r   loc_headcls_heads          rc   	set_vocabzYOLOEModel.set_vocab#  s6    ==  zz"~$,,, doo'(//U[[Atyy1499W3EFII&QR !mm ,
%.s5$((DHH/M%N,
 


2 #&dhh"9 	Hhh666h666		
  J

2&u-
re   c                    | j                   rJ | j                  d   }t        |t              sJ |j                  rJ | j                  |      }| j                  ||       t        | j                  j                               j                  }|j                  | j                  j                  |             t        j                         }|j                  D ]2  }t        |t        j                         sJ |j#                  |d          4 |S )z
        Get fused vocabulary layer from the model.

        Args:
            names (list): List of class names.

        Returns:
            (nn.ModuleList): List of vocabulary modules.
        rn   )r  rx   r[   r;   r   r  r  r   r   r   r   r   r   r   r  r  r_  r|   )r_   r  r  tper   r  r  s          rc   	get_vocabzYOLOEModel.get_vocab@  s     ==  zz"~$,,,==  u%$djj++-.55		$''**V$% 	'Hh666LL"&	' re   c                     t        | j                  d   d      rJ d       |j                  dk(  sJ || _        t	        |      | j                  d   _        t        |      | _        y)z
        Set classes in advance so that model could do offline-inference without clip model.

        Args:
            names (List[str]): List of class names.
            embeddings (torch.Tensor): Embeddings tensor.
        rn   r  z^Prompt-free model does not support setting classes. Please try with Text/Visual prompt models.r  N)r   rx   ndimr   r   r   r   r  )r_   r  r   s      rc   r  zYOLOEModel.set_classesZ  sa     4::b>62 	
l	
2 !###J

2&u-
re   c                 *   g }|"|j                   dk(  sJ |j                  |       |"|j                   dk(  sJ |j                  |       |s1|j                  t        | dt        j                  ddd                   t        j
                  |d      S )a  
        Get class positional embeddings.

        Args:
            tpe (torch.Tensor, optional): Text positional embeddings.
            vpe (torch.Tensor, optional): Visual positional embeddings.

        Returns:
            (torch.Tensor): Class positional embeddings.
        r  r   rr   r  r  rs   )r  r|   r   r   r  r   )r_   r  r  all_pes       rc   
get_cls_pezYOLOEModel.get_cls_pej  s     ?88q= =MM#?88q= =MM#MM'$ekk!R.EFGyyQ''re   c	                    g g g }}
}	|j                   d   }|t        |      ndh}t        |      }| j                  D ]4  }|j                  dk7  rMt        |j                  t              r|	|j                     n#|j                  D cg c]  }|dk(  r|n|	|    c}}|r| j                  |||
       t        |t              r||j                  ||      nd}|r|J | j                  rJ |c S | j                  |j                  |      |      j                  |d   j                  |d   j                        }|j                   d   |k7  s|j                   r|j#                  |dd      } |||      }n ||      }|	j%                  |j&                  | j(                  v r|nd       |r#t+        ||j,                  |j&                  |       |j&                  |v s|j%                  t.        j0                  j2                  j5                  |d      j7                  d      j7                  d             |j&                  |k(  s
t/        j8                  t/        j:                  |d      d      c S  |S c c}w )	a  
        Perform a forward pass through the model.

        Args:
            x (torch.Tensor): The input tensor.
            profile (bool): If True, profile the computation time for each layer.
            visualize (bool): If True, save feature maps for visualization.
            tpe (torch.Tensor, optional): Text positional embeddings.
            augment (bool): If True, perform data augmentation during inference.
            embed (list, optional): A list of feature vectors/embeddings to return.
            vpe (torch.Tensor, optional): Visual positional embeddings.
            return_vpe (bool): If True, return visual positional embeddings.

        Returns:
            (torch.Tensor): Model's output tensor.
        r   Nrn   r  ro   rq   rr   rs   )r   rv   rw   rx   ry   r[   rz   r{   r;   get_vper  r  r  r   r   rr  r  r  r|   r}   r~   rO   r   r   r   r   r   r   r   r   )r_   r`   ri   rj   r  rk   rl   r  r  r   r   r   br   r   r   cls_pes                    rc   r^   zYOLOEModel.predict  s   & Bz2GGAJ$)$5	% B4e* 	IAssby(c2AaccFYZY\Y\8]TUa2g1Q49O8]''1b1![)+.?aii3'?*?#}},,J3=@@![\]^[_[e[e@f<<?a'188#]]1b"5FaLaDHH!##*Q5%ayIsse|!!%(("5"5"I"I!V"T"\"\]_"`"h"hik"lm33'> <<		*a(@aHH1	I2 / 9^s   I7c                 "   t        | d      s9ddlm} |j                  dd      du}|r ||       n| j	                         | _        |7| j                  |d   |j                  dd      |j                  dd            }| j                  ||      S )	r   r   r   )TVPDetectLossvisualsNr   r  r  r  )r   ultralytics.utils.lossr  r  r   r   rd   )r_   r   r   r  visual_prompts        rc   r]   zYOLOEModel.loss  s     t[)<!IIi6dBM4A]40tGZGZG\DN=LLu599[$3OUZU^U^_hjnUoLpE~~eU++re   )zyoloe-v8s.yamlr  NT)r  FF)FFNFNNFr   )r   r   r   r   r  rV   r  r  r  r  r  r  r^   r]   r   r   s   @rc   r  r    sc    6
A ' 'B 6 6.:4. (. lq0d,re   r  c                   ,     e Zd ZdZd fd	ZddZ xZS )YOLOESegModela0  
    YOLOE segmentation model.

    This class extends YOLOEModel to handle instance segmentation tasks with text and visual prompts,
    providing specialized loss computation for pixel-level object detection and segmentation.

    Methods:
        __init__: Initialize YOLOE segmentation model.
        loss: Compute loss with prompts for segmentation.

    Examples:
        Initialize a YOLOE segmentation model
        >>> model = YOLOESegModel("yoloe-v8s-seg.yaml", ch=3, nc=80)
        >>> results = model.predict(image_tensor, tpe=text_embeddings)
    c                 ,    t         |   ||||       y)aV  
        Initialize YOLOE segmentation model with given config and parameters.

        Args:
            cfg (str | dict): Model configuration file path or dictionary.
            ch (int): Number of input channels.
            nc (int, optional): Number of classes.
            verbose (bool): Whether to display model information.
        r?  Nr@  rA  s        rc   r  zYOLOESegModel.__init__  rB  re   c                 "   t        | d      s9ddlm} |j                  dd      du}|r ||       n| j	                         | _        |7| j                  |d   |j                  dd      |j                  dd            }| j                  ||      S )	r   r   r   )TVPSegmentLossr  Nr   r  r  )r   r  r  r  r   r   rd   )r_   r   r   r  r  s        rc   r]   zYOLOESegModel.loss  s     t[)=!IIi6dBM5B^D1H[H[H]DN=LLu599[$3OUZU^U^_hjnUoLpE~~eU++re   )zyoloe-v8s-seg.yamlr  NTr   )r   r   r   r   r  r]   r   r   s   @rc   r  r    s     
A,re   r  c                   *     e Zd ZdZ fdZddZ xZS )Ensemblea  
    Ensemble of models.

    This class allows combining multiple YOLO models into an ensemble for improved performance through
    model averaging or other ensemble techniques.

    Methods:
        __init__: Initialize an ensemble of models.
        forward: Generate predictions from all models in the ensemble.

    Examples:
        Create an ensemble of models
        >>> ensemble = Ensemble()
        >>> ensemble.append(model1)
        >>> ensemble.append(model2)
        >>> results = ensemble(image_tensor)
    c                 "    t         |           y)z!Initialize an ensemble of models.Nr@  )r_   r   s    rc   r  zEnsemble.__init__  s    re   c           	      v    | D cg c]  } |||||      d    }}t        j                  |d      }|dfS c c}w )a  
        Generate the YOLO network's final layer.

        Args:
            x (torch.Tensor): Input tensor.
            augment (bool): Whether to augment the input.
            profile (bool): Whether to profile the model.
            visualize (bool): Whether to visualize the features.

        Returns:
            y (torch.Tensor): Concatenated predictions from all models.
            train_out (None): Always None for ensemble inference.
        r   r   N)r   r   )r_   r`   rk   ri   rj   moduler   s          rc   rd   zEnsemble.forward  sI     FJJ6VAw3A6JJ IIaO$w	 Ks   6)FFF)r   r   r   r   r  rd   r   r   s   @rc   r  r    s    $re   r  c           
   #     K   | i } |i }ddl }ddlm} 	 |j                         D ]R  \  }}|j	                  dd      \  }}|j	                  dd      \  }}	t         ||      |t         ||      |	             T | j                         D ]  \  }} ||      |j                  |<    d | D ]  }||j                  v s|j                  |=   y# | D ]  }||j                  v s|j                  |=   w xY ww)a  
    Context manager for temporarily adding or modifying modules in Python's module cache (`sys.modules`).

    This function can be used to change the module paths during runtime. It's useful when refactoring code,
    where you've moved a module from one location to another, but you still want to support the old import
    paths for backwards compatibility.

    Args:
        modules (dict, optional): A dictionary mapping old module paths to new module paths.
        attributes (dict, optional): A dictionary mapping old module attributes to new module attributes.

    Examples:
        >>> with temporary_modules({"old.module": "new.module"}, {"old.module.attribute": "new.module.attribute"}):
        >>> import old.module  # this will now import new.module
        >>> from old.module import attribute  # this will now import new.module.attribute

    Note:
        The changes are only in effect inside the context manager and are undone once the context manager exits.
        Be aware that directly manipulating `sys.modules` can lead to unpredictable results, especially in larger
        applications or libraries. Use this function with caution.
    Nr   )import_module.rr   )sys	importlibr  r   rsplitr^  r   r   )
r   
attributesr  r  oldnew
old_moduleold_attr
new_modulenew_attrs
             rc   temporary_modulesr  $  s    . 
'%"((* 	gHC#&::c1#5 J#&::c1#5 JM*-xzAZ\d9ef	g   	2HC,S1CKK	2 	  	%Cckk!KK$	%7 	%Cckk!KK$	%s)   C8BC ,C8 C8C5%C55C8c                       e Zd ZdZd Zd Zy)	SafeClasszAA placeholder class to replace unknown classes during unpickling.c                      y)z6Initialize SafeClass instance, ignoring all arguments.Nr   r_   ra   rb   s      rc   r  zSafeClass.__init__X      re   c                      y)z/Run SafeClass instance, ignoring all arguments.Nr   r  s      rc   __call__zSafeClass.__call__\  r  re   N)r   r   r   r   r  r  r   re   rc   r  r  U  s    Kre   r  c                   "     e Zd ZdZ fdZ xZS )SafeUnpicklerz>Custom Unpickler that replaces unknown classes with SafeClass.c                 <    d}||v rt         |   ||      S t        S )z
        Attempt to find a class, returning SafeClass if not among safe modules.

        Args:
            module (str): Module name.
            name (str): Class name.

        Returns:
            (type): Found class or SafeClass.
        )r   collectionszcollections.abcbuiltinsmathnumpy)r   
find_classr  )r_   r  re  safe_modulesr   s       rc   r  zSafeUnpickler.find_classd  s,    
 \!7%fd33re   )r   r   r   r   r  r   r   s   @rc   r  r  a  s    H re   r  c                 r   ddl m} t        | d        ||       }	 t        ddddd	d
dd      5  |rKt	        j
                  d      }t        |_        d |_        t        |d      5 }t        ||      }ddd       nt        |d      }ddd       t)        t*              s't#        j$                  d|  d        d!|j,                  i}||fS # 1 sw Y   LxY w# 1 sw Y   PxY w# t        $ r}|j                  dk(  rt        t        d|  d            ||j                  dk(  r/t        t        d|  dt        d      j                    d            |t#        j$                  |  d|j                   d|j                   d       t'        |j                         t        |d      }Y d}~d}~ww xY w)"a  
    Attempt to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised, it catches the
    error, logs a warning message, and attempts to install the missing module via the check_requirements() function.
    After installation, the function again attempts to load the model using torch.load().

    Args:
        weight (str): The file path of the PyTorch model.
        safe_only (bool): If True, replace unknown classes with SafeClass during loading.

    Returns:
        ckpt (dict): The loaded model checkpoint.
        file (str): The loaded filename.

    Examples:
        >>> from ultralytics.nn.tasks import torch_safe_load
        >>> ckpt, file = torch_safe_load("path/to/best.pt", safe_only=True)
    r   )attempt_download_assetz.pt)filesuffixzultralytics.utilszultralytics.models.yolozultralytics.data)zultralytics.yolo.utilszultralytics.yolo.v8zultralytics.yolo.dataztorch.nn.Identityz#ultralytics.nn.tasks.DetectionModelz$ultralytics.utils.loss.E2EDetectLoss)z$ultralytics.nn.modules.block.Silencez*ultralytics.nn.tasks.YOLOv10DetectionModelz$ultralytics.utils.loss.v10DetectLoss)r   r  safe_picklec                 4    t        |       j                         S r   )r  r   )file_objs    rc   <lambda>z!torch_safe_load.<locals>.<lambda>  s    M(4K4P4P4R re   rb)pickle_moduleNcpu)map_locationmodelsu   ERROR ❌️ aw   appears to be an Ultralytics YOLOv5 model originally trained with https://github.com/ultralytics/yolov5.
This model is NOT forwards compatible with YOLOv8 at https://github.com/ultralytics/ultralytics.
Recommend fixes are to train a new model using the latest 'ultralytics' package or to run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'znumpy._corez( requires numpy>=1.26.1, however numpy==r  z is installed.z appears to require 'zK', which is not in Ultralytics requirements.
AutoInstall will run now for 'z' but this feature will be removed in the future.
Recommend fixes are to train a new model using the latest 'ultralytics' package or to run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'z
The file 'z' appears to be improperly saved or formatted. For optimal results, use model.save('filename.pt') to correctly save YOLO models.rx   )ultralytics.utils.downloadsr  rE   r  rf  
ModuleTyper  	Unpicklerr   openrN   ModuleNotFoundErrorre  	TypeErrorrC   
__import____version__r@   r   rD   r[   r\   rx   )weight	safe_onlyr  r   r  ry   ckptr9  s           rc   torch_safe_loadr  ~  s   $ CfU+!&)D/4*='@); 9L>c8^
 	< #..}=(5%#R $% D%a{CDD D "$U;)	<` dD! !` a	
 $:OD D#	< 	<,  466X#F8 ,m n  VV}$%#F8+ST^_fTgTsTsSt  uC  D 	
 	h+AFF8 4//0vvh 7ef	
 	166"$U314sG   C( 7C)C7CC( C	CC%!C( (	F61B:F11F6c                   
 t               
t        | t              r| n| gD ]  }t        |      \  }}d|v ri t        |d   nd}|j                  d      xs |d   j                  |      j                         }||_        ||_	        t        |dt        |            |_        t        |d      st        j                  dg      |_        
j#                  |r*t        |d      r|j%                         j'                         n|j'                                 
j)                         D ]O  }t        |d	      r||_        t        |t        j,                  j.                        s<t        |d
      rId|_        Q t3        
      dk(  r
d   S t5        j6                  d|  d       dD ]  }	t9        
|	t        
d   |	              
t;        t        j<                  t        j                  
D cg c]  }|j                   j?                          c}                     j                   
_        tA        
fd
D              s"J d
D cg c]  }|jB                   c}        
S c c}w c c}w )a  
    Load an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a.

    Args:
        weights (str | List[str]): Model weights path(s).
        device (torch.device, optional): Device to load model to.
        inplace (bool): Whether to do inplace operations.
        fuse (bool): Whether to fuse model.

    Returns:
        (torch.nn.Module): Loaded model.
    
train_argsNemarx   taskr         @@r   r  recompute_scale_factorrr   rn   zEnsemble created with 
)r  r   r  r   c              3   V   K   | ]   }d    j                   |j                   k(   " yw)r   N)r   )r   r   ensembles     rc   r   z'attempt_load_weights.<locals>.<genexpr>  s"     8!x{~~%8s   &)zModels differ in class counts )"r  r[   r   r  r>   r  r   r   ra   pt_pathr   guess_model_taskr  r   r   r  r   r|   r   r  r   r  r   Upsampler  r   r@   r   r^  rz   argmaxrw   allr   )r   r   r  r   r   r  ra   rx   r   r   r  s             @rc   attempt_load_weightsr%    s#    zH"7D1Wy b!!$a=IT=Q9"9d<&89W[%1DM55f=CCE 
UF,<U,CD
uh' <</EL 	tv8N

))+TYT^T^T`ab   ,1i AI588,,-gaAY6Z'+A$	, 8}| KK(	45" 6!WXa[!456s5<<V^=_QRahhlln=_0`#abcjjHO8x88u<Zjr[sef\]\`\`[sZt:uu8O >`[ss   .!I'	I,c                    t        |       \  }} i t        |j                  di       }|j                  d      xs |d   j                  |      j	                         }|j                         D ci c]  \  }}|t        v s|| c}}|_        | |_        t        |dt        |            |_        t        |d      st        j                  dg      |_        |r*t        |d      r|j!                         j#                         n|j#                         }|j%                         D ]O  }	t        |	d      r||	_        t)        |	t        j*                  j,                        s<t        |	d	      rId
|	_        Q ||fS c c}}w )aq  
    Load a single model weights.

    Args:
        weight (str): Model weight path.
        device (torch.device, optional): Device to load model to.
        inplace (bool): Whether to do inplace operations.
        fuse (bool): Whether to fuse model.

    Returns:
        model (torch.nn.Module): Loaded model.
        ckpt (dict): Model checkpoint dictionary.
    r  r  rx   r  r   r  r   r  r  N)r  r>   r  r   r   r   r?   ra   r   r   r!  r  r   r   r  r   r   r  r   r  r[   r   r"  r  )
r  r   r  r   r  ra   rx   r   r   r   s
             rc   attempt_load_one_weightr'    sA    #6*LD&??488L"#=?DXXe_-W11&9??AE $(::<I41a18H3H!Q$IEJEM(8(?@EJ5(#||TF+#'GE6,BEJJL

E ]]_ ,1i AI588,,-gaAY6Z'+A$	, $;! Js   1E;E;c                 N    ddl }d}t        d      } fddD        \  }}} fddD        \  }	}
}|rQ j                  d	      }|s5t        |j	                               d   }t        j                  d
| d       ||   \  }	}
}|r9t        |      t        _	        |r#t        j                  t        d       d|        |r)t        j                  dddddddddddddd       gg g d   }}}t        h t        t        t        t        t         t"        t$        t&        t(        t*        t,        t.        t0        t2        t4        t6        t8        t:        t<        t>        t@        tB        tD        tF        tH        tJ        tL        jN                  jP                  tR        tT        tV        tX        tZ        t\        t^              }t        t0        t2        t4        t6        t8        tD        tF        tH        tJ        tT        tV        t(        t\        t*        t^        h      }ta         d    d   z         D ]  \  \  }}dv rtc        tL        jN                  dd       n2d v r"tc        te        d!      jf                  d"d       nti                  ta              D ]e  \  }}tk        |tl              sto        jp                  tr              5  |tu               v rtu               |   n|jw                  |      |<   ddd       g |d#kD  rty        t{        ||	z        d#      n|x}}|v r|   d   }}||k7  rt}        t        ||      |
z  d$      }tD        u rft}        t        d#   |d%z        |
z  d$      d#<   t        d%   d#kD  r+ty        t{        t        d%   |d%z  d&z              |
z  d#      nd%         d%<   ||gd#d |v rj                  d%|       d#}t8        u rd'}d(v rdd<   t^        u rd'}d)v rj                  d*       t\        u rd'}nt        u r
|   gnt        t        t        h      v r8|   d   d#   }}}|||gd%d t        u rj                  d+|       d#}nt        u rd   rd#   nd#   d+z  }n~tL        jN                  j                  u r|   gnZt        u rt        fd,|D              }n<t        t        t        t        t        t        t        t        t        t        h	      v r{j                  |D cg c]  }|   	 c}       t        u st        u rt}        t        d%   |      |
z  d$      d%<   t        t        t        t        t        t        hv r|_T        nt        u r$j                  d#|D cg c]  }|   	 c}       n[t        u rd   }|   }||gd#d n?t        u r	|d      }n.t        t        t        h      v rd   }|   }g d#d n|   }|d#kD  r/tM        jN                  j                  fd-t        |      D         n  }tm              d$d. j                  d/d      }t        d0 |j                         D              |_^        ||c|__        |_`        |_a        |rDt        j                  dtm        |      d|d|j                  d1d|dtm              d       |j                  fd2tk        |t              r|gn|D               |j                  |       dk(  rg j                  |        tM        jN                  j                  | t        |      fS # 1 sw Y   xY wc c}w c c}w )3a@  
    Parse a YOLO model.yaml dictionary into a PyTorch model.

    Args:
        d (dict): Model dictionary.
        ch (int): Input channels.
        verbose (bool): Whether to print model details.

    Returns:
        model (torch.nn.Sequential): PyTorch model.
        save (list): Sorted list of output layers.
    r   NTinfc              3   @   K   | ]  }j                  |        y wr   r  r   r`   ds     rc   r   zparse_model.<locals>.<genexpr>9  s     HAquuQxHs   )r   
activationscalesc              3   B   K   | ]  }j                  |d         yw)g      ?Nr+  r,  s     rc   r   zparse_model.<locals>.<genexpr>:  s     hquuQ}hs   )depth_multiplewidth_multiplerL  r-  z'no model scale passed. Assuming scale='z'.zactivation:r   r  r  z>3fromz>20nr   z>10r   r  z<45	argumentsz<30rn   r   r  znn.r  ztorchvision.ops.torchvision   rr      r   r  Fmlxlx)Tg333333?r)  c              3   (   K   | ]	  }|     y wr   r   )r   r`   r   s     rc   r   zparse_model.<locals>.<genexpr>  s     &qRU&   c              3   (   K   | ]	  }    y wr   r   )r   r   ra   r   s     rc   r   zparse_model.<locals>.<genexpr>  s     ">1d8">r<  r
  z	__main__.c              3   <   K   | ]  }|j                           y wr   )numelr3  s     rc   r   zparse_model.<locals>.<genexpr>  s     7!AGGI7s   r   c              3   4   K   | ]  }|d k7  s	|z    yw)rn   Nr   )r   r`   r}   s     rc   r   zparse_model.<locals>.<genexpr>  s     QabAEQs   

)castr   r  r   keysr@   r   r  r"   default_actr   rB   rv   r    r$   r*   r   r)   r   r   r   r	   r&   r(   r   r   r   r   r   r3   r   r   r   r   r   r
   r   r   r   r   ConvTranspose2dr'   r   r1   r   r7   r   r   r  r   r  opsglobalsr[   str
contextlibsuppressrV  localsliteral_evalrw   roundrM   r   rz   insertextendr   r,   r+   r5   BatchNorm2dr!   r   r%   r:   r;   r8   r<   r0   r   r-   r=   r|   legacyr6   r   r   r9   r.   r_  r   replacer   r   r}   ry   r   sorted)r-  r   r   rA  rP  max_channelsr   actr/  depthwidthrL  r-  layersr~   r   base_modulesrepeat_modulesry   r4  r   an_r   cmr`   m_r   ra   r}   r   s   ``                          @@@rc   r  r  '  s#     F<LH)GHOBVh6ghE5)g&++-(+ENNDUG2NO%+E]"ul
9KK8M231SE:;bBs|C8HS>HS>R]^aQbcd
B2r"v"DF#	
#	
#	
 #	
 	#	

 #	
 #	
 #	
 #	
 #	
 #	
 #	
 #	
 #	
 #	
 #	
  !#	
" ##	
$ %#	
& '#	
( )#	
* +#	
, -#	
. /#	
0 1#	
2 3#	
4 5#	
6 HH$$7#	
8 9#	
: ;#	
< =#	
> ?#	
@ A#	
B C#	
D E#	
%LL 	
N& (*&	(AB R?Aq!T z EHHae$ "Q& M266"#?1 	
 dO 	TDAq!S!((4 T-.&(]fhqk@P@PQR@SDGT T	T ./UU1u9%q)9BUDGBRx#CL$9E$A1EG|(T!Wla6G)H5)PRSTQ^bcd^ehi^ic%DG\Q5F"5L(M"NQV"VXYZostuovwQ&T!"X&DN"Aq!DyE>"DGEzD=KK,F{$YqE>D>D)VW-..AQaBBB*ab*DG|Aq!+ Gaa1B%((&&&qE7D&[&A&&B)[+wdCQaclm
 
 KK*1A*+G|qL0(T!Wl)Ce)KQOQV['<sKK!-KK1-aBqE-.(]aBAB&T!"X&D&[AbEB)[%011aBABT!"X;DABCDq5UXX  ">U1X">?aQUhF1RL  b17r}}77AbdBGKK1R&QRGBEE%=1S'#d)TWYZQ:a+=QC1QQb6B
		"eRf 88'55ST TT + .s   *.^^
	^"^c                    t        |       } | j                  d dD        v ret        j                  dd| j                        }t	        j
                  d| j                   d| d       | j                  || j                  z         } t        j                  dd	t        |             }t        |d
      xs t        |       }t        j                  |      }t        |       |d<   t        |       |d<   |S )z
    Load a YOLOv8 model from a YAML file.

    Args:
        path (str | Path): Path to the YAML file.

    Returns:
        (dict): Model dictionary.
    c              3   :   K   | ]  }d D ]  }d| | d   yw))   r8  yolov6Nr   r,  s      rc   r   z"yaml_model_load.<locals>.<genexpr>  s(     FvF!uQCs!_F_Fs   nsmlxz(\d+)([nslmx])6(.+)?$z	\1\2-p6\3z8Ultralytics YOLO P6 models now use -p6 suffix. Renaming z to r  z(\d+)([nslmx])(.+)?$z\1\3F)hardr-  	yaml_file)r   stemresubr@   r   	with_namer  rG  rF   rA   r   guess_model_scale)pathnew_stemunified_pathre  r-  s        rc   r  r    s     :DyyFgFF662L$))LQRVR[R[Q\\`ai`jjklm~~h456617CIFL<e4H
48HI		)A"4(AgJYAkNHre   c                     	 t        j                  dt        |       j                        j	                  d      S # t
        $ r Y yw xY w)a  
    Extract the size character n, s, m, l, or x of the model's scale from the model path.

    Args:
        model_path (str | Path): The path to the YOLO model's YAML file.

    Returns:
        (str): The size character of the model's scale (n, s, m, l, or x).
    zyolo(e-)?[v]?\d+([nslmx])r   r  )rg  searchr   rf  groupAttributeError)
model_paths    rc   rj  rj    sB    yy5tJ7G7L7LMSSTUVV s   7: 	AAc                 |   d }t        | t              r+t        j                  t              5   ||       cddd       S t        | t
        j                  j                        rdD ]4  }t        j                  t              5  t        |      d   cddd       c S  dD ]7  }t        j                  t              5   |t        |            cddd       c S  | j                         D ]r  }t        |t        t        f      r yt        |t              r yt        |t              r yt        |t              r y	t        |t        t         t"        t$        f      sr y
 t        | t&        t(        f      rt)        |       } d| j*                  v sd| j,                  v ryd| j*                  v sd| j,                  v ryd| j*                  v sd| j,                  v ryd| j*                  v sd	| j,                  v ry	d
| j,                  v ry
t/        j0                  d       y
# 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   xY w)a  
    Guess the task of a PyTorch model from its architecture or configuration.

    Args:
        model (torch.nn.Module | dict): PyTorch model or model configuration in YAML format.

    Returns:
        (str): Task of the model ('detect', 'segment', 'classify', 'pose', 'obb').
    c                 l    | d   d   d   j                         }|dv ryd|v ryd|v ry|dk(  ry|d	k(  ry	y
)zGuess from YAML dictionary.r  rn   r
  >   fcr/  classify
classifierrv  detectsegmentposeobbN)lower)r  r   s     rc   cfg2taskz"guess_model_task.<locals>.cfg2task  sV    KOB%%'77q=>;: re   N)z
model.argszmodel.model.argszmodel.model.model.argsr  )z
model.yamlzmodel.model.yamlzmodel.model.model.yamlry  rv  rz  r{  rx  z-segz-clsz-posez-obbzUnable to automatically guess model task, assuming 'task=detect'. Explicitly define task for your model, i.e. 'task=detect', 'segment', 'classify','pose' or 'obb'.)r[   r\   rH  rI  	Exceptionr   r   Moduler  r   r8   r<   r    r0   r   r%   r:   r;   r=   rG  r   rf  partsr@   r   )rx   r}  r`   r   s       rc   r!  r!    s    %  + 	#E?	# 	# %)K 	'A$$Y/ 'Awv' '	' L 	)A$$Y/ )Q() )	)  
	 A!g|45 Ax(!At$As#A[)LM
	  %#t%UUZZ9#;uzz!Z5;;%>

"f&;uzz!Uekk%9$ NN	l S	# 	#
' ') )s#   HH$:H1H!$H.	1H;	rP  )F)NTFr   )rH  picklerg  rf  r   r   pathlibr   r   torch.nnr   ultralytics.nn.autobackendr   ultralytics.nn.modulesr   r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   ultralytics.utilsr>   r?   r@   rA   rB   rC   ultralytics.utils.checksrD   rE   rF   r  rG   rH   rI   rJ   rK   rL   ultralytics.utils.opsrM   ultralytics.utils.patchesrN   ultralytics.utils.plottingrO   ultralytics.utils.torch_utilsrP   rQ   rR   rS   rT   rU   rV   rW   r  rY   r   r=  rF  rJ  rR  rj  r  r  r  r  r  contextmanagerr  r  r  r  r  r%  r'  r  r  rj  r!  r   re   rc   <module>r     s     	      89 9 9 9 9 9 9 9 9 9 9 9 9 9 9t a ` Q Q  1 0 <	 	 	wZ wZtZaY Zaz~ D( (D(  ( V]&) ]&@y> yxE, E,Pw, w,t-,J 1 -,`)uxx"" )^ -% -%`	 	F$$ :Od.b#Lp6f0 Dre   