
    |h                         d dl mZ d dlmZmZmZ d dlmZmZ 	 	 	 	 	 	 	 	 	 ddee	ef   de	de	de	d	e
d
e
dededeee      deee	ef      ddfdZy)    )Path)ListOptionalUnion)SAMYOLONdata	det_model	sam_modeldeviceconfiouimgszmax_detclasses
output_dirreturnc
           
      ~   t        |      }t        |      }t        |       } |	s| j                  | j                   dz  }	t        |	      j                  dd        || d||||||      }
|
D ]?  }|j                  j                  j                         j                         }|s9|j                  j                  } ||j                  |dd|      }|d   j                  j                  }t        t        |	      t        |j                        j                  z   dd	d
      5 }t!        |      D ]o  \  }}|j#                         st%        t&        |j)                  d      j                               }|j+                  ||    ddj-                  |      z   dz          q 	 ddd       B y# 1 sw Y   NxY w)a  
    Automatically annotate images using a YOLO object detection model and a SAM segmentation model.

    This function processes images in a specified directory, detects objects using a YOLO model, and then generates
    segmentation masks using a SAM model. The resulting annotations are saved as text files in YOLO format.

    Args:
        data (str | Path): Path to a folder containing images to be annotated.
        det_model (str): Path or name of the pre-trained YOLO detection model.
        sam_model (str): Path or name of the pre-trained SAM segmentation model.
        device (str): Device to run the models on (e.g., 'cpu', 'cuda', '0'). Empty string for auto-selection.
        conf (float): Confidence threshold for detection model.
        iou (float): IoU threshold for filtering overlapping boxes in detection results.
        imgsz (int): Input image resize dimension.
        max_det (int): Maximum number of detections per image.
        classes (List[int], optional): Filter predictions to specified class IDs, returning only relevant detections.
        output_dir (str | Path, optional): Directory to save the annotated results. If None, creates a default
            directory based on the input data path.

    Examples:
        >>> from ultralytics.data.annotator import auto_annotate
        >>> auto_annotate(data="ultralytics/assets", det_model="yolo11n.pt", sam_model="mobile_sam.pt")
    _auto_annotate_labelsT)exist_okparents)streamr   r   r   r   r   r   F)bboxesverbosesaver   r   z.txtwzutf-8)encoding 
N)r   r   r   parentstemmkdirboxesclsinttolistxyxyorig_imgmasksxynopenpath	enumerateanymapstrreshapewritejoin)r	   r
   r   r   r   r   r   r   r   r   det_resultsresult	class_idsr$   sam_resultssegmentsfissegments                       Y/var/www/html/test/engine/venv/lib/python3.12/site-packages/ultralytics/data/annotator.pyauto_annotater?   	   s   F YIII:D[[dii[0E#FF
D$7T&tESZdkK  OLL$$((*113	LL%%E#FOOE5W\eklK"1~++//Hj)D,=,B,BBC4H#X_` Ode%h/ ODAquuw"%c199R=+?+?+A"B9Q<. 2SXXg5F F MNOO OOO Os   '"F2
AF22F<	)	z
yolo11x.ptzsam_b.pt g      ?g?i  i,  NN)pathlibr   typingr   r   r   ultralyticsr   r   r1   floatr&   r?        r>   <module>rG      s     ( ( !
 "#'-1:O
T	
:O:O :O 	:O
 :O 
:O :O :O d3i :O sDy)*:O 
:OrF   