
    |h9                     ~    d dl mZ d dlmZmZmZmZmZ d dlZ	d dl
Z
d dlmZ d dlmZmZ d dlmZmZ  G d de      Zy)	    )Path)AnyDictListTupleUnionN)DetectionValidator)LOGGERops)
OBBMetricsbatch_probiouc            
           e Zd ZdZdd fdZdej                  j                  ddf fdZde	e
ej                  f   de	e
ej                  f   de	e
ej                  f   fd	Zdej                  dee	e
ej                  f      f fd
Zdede	e
ef   de	e
ef   fdZde	e
ej                  f   de	e
ef   de	e
ej                  f   fdZde	e
ef   deej                     deddf fdZde	e
ej                  f   dee
ef   ddfdZde	e
ej                  f   dedeeef   deddf
dZde	e
ef   de	e
ef   fdZ xZS )OBBValidatora  
    A class extending the DetectionValidator class for validation based on an Oriented Bounding Box (OBB) model.

    This validator specializes in evaluating models that predict rotated bounding boxes, commonly used for aerial and
    satellite imagery where objects can appear at various orientations.

    Attributes:
        args (dict): Configuration arguments for the validator.
        metrics (OBBMetrics): Metrics object for evaluating OBB model performance.
        is_dota (bool): Flag indicating whether the validation dataset is in DOTA format.

    Methods:
        init_metrics: Initialize evaluation metrics for YOLO.
        _process_batch: Process batch of detections and ground truth boxes to compute IoU matrix.
        _prepare_batch: Prepare batch data for OBB validation.
        _prepare_pred: Prepare predictions with scaled and padded bounding boxes.
        plot_predictions: Plot predicted bounding boxes on input images.
        pred_to_json: Serialize YOLO predictions to COCO json format.
        save_one_txt: Save YOLO detections to a txt file in normalized coordinates.
        eval_json: Evaluate YOLO output in JSON format and return performance statistics.

    Examples:
        >>> from ultralytics.models.yolo.obb import OBBValidator
        >>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml")
        >>> validator = OBBValidator(args=args)
        >>> validator(model=args["model"])
    Nreturnc                 j    t         |   ||||       d| j                  _        t	               | _        y)a  
        Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics.

        This constructor initializes an OBBValidator instance for validating Oriented Bounding Box (OBB) models.
        It extends the DetectionValidator class and configures it specifically for the OBB task.

        Args:
            dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
            save_dir (str | Path, optional): Directory to save results.
            args (dict | SimpleNamespace, optional): Arguments containing validation parameters.
            _callbacks (list, optional): List of callback functions to be called during validation.
        obbN)super__init__argstaskr   metrics)self
dataloadersave_dirr   
_callbacks	__class__s        ^/var/www/html/test/engine/venv/lib/python3.12/site-packages/ultralytics/models/yolo/obb/val.pyr   zOBBValidator.__init__+   s,     	XtZ@		!|    modelc                     t         |   |       | j                  j                  | j                  j
                  d      }t        |t              xr d|v | _        y)z
        Initialize evaluation metrics for YOLO obb validation.

        Args:
            model (torch.nn.Module): Model to validate.
         DOTAN)	r   init_metricsdatagetr   split
isinstancestris_dota)r   r   valr   s      r   r#   zOBBValidator.init_metrics<   sE     	U#iimmDIIOOR0!#s+=#r   predsbatchc                 B   t        |d         dk(  st        |d         dk(  r5dt        j                  t        |d         | j                  ft              iS t        |d   |d         }d| j                  |d   |d   |      j                         j                         iS )aU  
        Compute the correct prediction matrix for a batch of detections and ground truth bounding boxes.

        Args:
            preds (Dict[str, torch.Tensor]): Prediction dictionary containing 'cls' and 'bboxes' keys with detected
                class labels and bounding boxes.
            batch (Dict[str, torch.Tensor]): Batch dictionary containing 'cls' and 'bboxes' keys with ground truth
                class labels and bounding boxes.

        Returns:
            (Dict[str, np.ndarray]): Dictionary containing 'tp' key with the correct prediction matrix as a numpy
                array with shape (N, 10), which includes 10 IoU levels for each detection, indicating the accuracy
                of predictions compared to the ground truth.

        Examples:
            >>> detections = torch.rand(100, 7)  # 100 sample detections
            >>> gt_bboxes = torch.rand(50, 5)  # 50 sample ground truth boxes
            >>> gt_cls = torch.randint(0, 5, (50,))  # 50 ground truth class labels
            >>> correct_matrix = validator._process_batch(detections, gt_bboxes, gt_cls)
        clsr   tpdtypebboxes)	lennpzerosniouboolr   match_predictionscpunumpy)r   r+   r,   ious       r   _process_batchzOBBValidator._process_batchG   s    * uU|!Su%6!%;"((Ce$5tyy#ANOOE(OU8_=d,,U5\5<MQQSYY[\\r   c                     t         |   |      }|D ]0  }t        j                  |d   |j	                  d      gd      |d<   2 |S )z
        Args:
            preds (torch.Tensor): Raw predictions from the model.

        Returns:
            (List[Dict[str, torch.Tensor]]): Processed predictions with angle information concatenated to bboxes.
        r2   extradim)r   postprocesstorchcatpop)r   r+   predr   s      r   rB   zOBBValidator.postprocessa   sQ     #E* 	TD"YYX8I'JPRSDN	Tr   sic                 f   |d   |k(  }|d   |   j                  d      }|d   |   }|d   |   }|d   j                  dd }|d	   |   }t        |      rV|d
ddf   j                  t	        j
                  || j                        g d          t        j                  ||||d       |||||dS )a  
        Prepare batch data for OBB validation with proper scaling and formatting.

        Args:
            si (int): Batch index to process.
            batch (Dict[str, Any]): Dictionary containing batch data with keys:
                - batch_idx: Tensor of batch indices
                - cls: Tensor of class labels
                - bboxes: Tensor of bounding boxes
                - ori_shape: Original image shapes
                - img: Batch of images
                - ratio_pad: Ratio and padding information

        Returns:
            (Dict[str, Any]): Prepared batch data with scaled bounding boxes and metadata.
        	batch_idxr.   r?   r2   	ori_shapeimg   N	ratio_pad.   )device)   r   rP   r   TrM   xywh)r.   r2   rJ   imgszrM   )	squeezeshaper3   mul_rC   tensorrO   r   scale_boxes)	r   rG   r,   idxr.   bboxrJ   rS   rM   s	            r   _prepare_batchzOBBValidator._prepare_batchn   s    " K B&El3''+Xs#+&r*	e""12&+&r*	s8bqbMu||E$++F|TUOOE4idSdUajkkr   rF   pbatchc                     |d   }| j                   j                  r|dz  }t        j                  |d   |d   j	                         |d   |d   d      }||d	   |d
S )a`  
        Prepare predictions by scaling bounding boxes to original image dimensions.

        This method takes prediction tensors containing bounding box coordinates and scales them from the model's
        input dimensions to the original image dimensions using the provided batch information.

        Args:
            pred (Dict[str, torch.Tensor]): Prediction dictionary containing bounding box coordinates and other information.
            pbatch (Dict[str, Any]): Dictionary containing batch information with keys:
                - imgsz (tuple): Model input image size.
                - ori_shape (tuple): Original image shape.
                - ratio_pad (tuple): Ratio and padding information for scaling.

        Returns:
            (Dict[str, torch.Tensor]): Scaled prediction dictionary with bounding boxes in original image dimensions.
        r.   r   rS   r2   rJ   rM   TrQ   conf)r2   r^   r.   )r   
single_clsr   rX   clone)r   rF   r\   r.   r2   s        r   _prepare_predzOBBValidator._prepare_pred   sm    " 5k991HC7OT(^113VK5HTZ[fTgnr
 !$v,sCCr   nic                     |D ]/  }t        j                  |d   ddddf         |d   ddddf<   1 t        |   |||       y)am  
        Plot predicted bounding boxes on input images and save the result.

        Args:
            batch (Dict[str, Any]): Batch data containing images, file paths, and other metadata.
            preds (List[torch.Tensor]): List of prediction tensors for each image in the batch.
            ni (int): Batch index used for naming the output file.

        Examples:
            >>> validator = OBBValidator()
            >>> batch = {"img": images, "im_file": paths}
            >>> preds = [torch.rand(10, 7)]  # Example predictions for one image
            >>> validator.plot_predictions(batch, preds, 0)
        r2   NrN   )r   	xywh2xyxyr   plot_predictions)r   r,   r+   rb   pr   s        r   re   zOBBValidator.plot_predictions   sY      	CA!$q{1bqb5/A!BAhK2A2	C 	 r2r   prednfilenamec                 ^   t        |      j                  }|j                         rt        |      n|}|d   }t	        j
                  |      j                  dd      }t        |j                         |j                         |d   j                         |d   j                               D ]x  \  }}}	}
| j                  j                  || j                  t        |
         t        |	d      |D cg c]  }t        |d       c}|D cg c]  }t        |d       c}d       z y	c c}w c c}w )
a  
        Convert YOLO predictions to COCO JSON format with rotated bounding box information.

        Args:
            predn (Dict[str, torch.Tensor]): Prediction dictionary containing 'bboxes', 'conf', and 'cls' keys
                with bounding box coordinates, confidence scores, and class predictions.
            filename (str | Path): Path to the image file for which predictions are being processed.

        Notes:
            This method processes rotated bounding box predictions and converts them to both rbox format
            (x, y, w, h, angle) and polygon format (x1, y1, x2, y2, x3, y3, x4, y4) before adding them
            to the JSON dictionary.
        r2   r?      r^   r.         )image_idcategory_idscorerboxpolyN)r   stem	isnumericintr   xywhr2xyxyxyxyviewziptolistjdictappend	class_mapround)r   rg   rh   rr   rm   rp   rq   rbscxs               r   pred_to_jsonzOBBValidator.pred_to_json   s     H~"" $ 03t9dX!!$',,R3dkkmT[[]E&M<P<P<RTYZ_T`TgTgTij 		JAq!QJJ (#'>>#a&#9"1a[234QU1a[4234QU1a[4		 54s   -D%D*	save_confrU   filec                 ,   ddl }ddlm}  | |j                  |d   |d   f|j                        d| j
                  t        j                  |d   |d   j                  d      |d	   j                  d      gd
            j                  ||       y)a  
        Save YOLO OBB detections to a text file in normalized coordinates.

        Args:
            predn (torch.Tensor): Predicted detections with shape (N, 7) containing bounding boxes, confidence scores,
                class predictions, and angles in format (x, y, w, h, conf, cls, angle).
            save_conf (bool): Whether to save confidence scores in the text file.
            shape (Tuple[int, int]): Original image shape in format (height, width).
            file (Path): Output file path to save detections.

        Examples:
            >>> validator = OBBValidator()
            >>> predn = torch.tensor([[100, 100, 50, 30, 0.9, 0, 45]])  # One detection: x,y,w,h,conf,cls,angle
            >>> validator.save_one_txt(predn, True, (640, 480), "detection.txt")
        r   N)ResultsrP   r0   r2   r^   r?   r.   r@   )pathnamesr   )r   )
r:   ultralytics.engine.resultsr   r5   uint8r   rC   rD   	unsqueezesave_txt)r   rg   r   rU   r   r4   r   s          r   save_one_txtzOBBValidator.save_one_txt   s      	6BHHeAha):**		5?E&M,C,CB,GuI_I_`bIcdjkl		

 (49(
-r   statsc                    | j                   j                  r| j                  rt        | j                        rddl}ddl}ddlm} | j                  dz  }| j                  dz  }|j                  dd       |j                  t        |            }t        j                  d| d	       |D ]  }|d
   }	|d   }
| j                  |d   dz
     j!                  dd      }|d   }t        |d| z   ddd      5 }|j#                  |	 d|
 d|d    d|d    d|d    d|d    d|d    d|d    d|d    d|d    d       ddd        | j                  dz  }|j                  dd        |t$              }t        j                  d| d	       |D ]  }|d
   j'                  dd      d   }	|j)                  d       }d! |j+                  ||d
         d   j'                  d"      D        \  }}|d#   |d   |d   dz
  }}
}|dxx   |z  cc<   |dxx   |z  cc<   |j-                  |
|g       ||	   j/                  |        |j1                         D ]  \  }	}t3        j4                  |      }t3        j6                  |ddddf         j9                         dz  }|ddddf   |z  }|dddf   }|ddddf   j;                         }|ddddfxx   |z  cc<   t=        j>                  ||d$      }||   }t=        j@                  |ddddf         jC                  d%d&      }t3        jD                  ||ddddf   gd%'      jG                         D ]  }| j                  tI        |d%            j!                  dd      }|dd( D cg c]  }tK        |d       }}tK        |d(   d      }
t        |d| z   ddd      5 }|j#                  |	 d|
 d|d    d|d    d|d    d|d    d|d    d|d    d|d    d|d    d       ddd         |S # 1 sw Y   xY wc c}w # 1 sw Y   xY w))a   
        Evaluate YOLO output in JSON format and save predictions in DOTA format.

        Args:
            stats (Dict[str, Any]): Performance statistics dictionary.

        Returns:
            (Dict[str, Any]): Updated performance statistics.
        r   N)defaultdictzpredictions.jsonpredictions_txtT)parentsexist_okz'Saving predictions with DOTA format to z...rm   ro   rn   rP    -rq   Task1_z.txtazutf-8)encodingrL   rl   rN   rk         
predictions_merged_txtz.Saving merged predictions with DOTA format to __z	\d+___\d+c              3   2   K   | ]  }t        |        y w)N)rt   ).0r   s     r   	<genexpr>z)OBBValidator.eval_json.<locals>.<genexpr>  s     [1A[s   ___rp   g333333?r?   rj   r@   )&r   	save_jsonr)   r3   ry   jsonrecollectionsr   r   mkdirloadopenr
   infor   replace
writelineslistr&   compilefindallextendrz   itemsrC   rW   maxitemr`   r   nms_rotatedru   rv   rD   rx   rt   r|   )r   r   r   r   r   	pred_jsonpred_txtr$   drm   ro   	classnamerf   fpred_merged_txtmerged_resultspatternr   yrZ   r.   max_whr   scoresr~   is                             r   	eval_jsonzOBBValidator.eval_json   s    994<<C

O/(::I}}'88HNN4$N799T)_-DKKA(3OP rZ='
 JJq'7!';<DDS#N	fIX&(<<=TBCRYZ r^_LLH:QugQqtfAadV1QqTF!AaD6QRSTUVSWRXXYZ[\]Z^Y__`abcdae`ffghijkhlgmmo!pqr rr #mm.FFO!!$!>(.NKKHHYY\]^ 6Z=..tQ7:**\2[

7AjM(J1(M(S(STY(Z[1#$V9aj!M:JQ:NSeQ1Q1UCL)x(//56 #1"6"6"8 v$||D)42A2;/446:AaCL6)adBQBK%%'!RaR%AOOAvs3Aw&&tArrE{388Q?AtAqsF|#4"=DDF vA $

3qu: 6 > >sC HI./f5q!5A5!!B%OEVI;3G!G HMs]de vijz5'1Q4&!A$q1aPQRSPTvUVWXYZW[V\\]^_`a^b]ccdefgheidjjklmnolpkqqs%tuv vvv( Kr r> 6v vs    =AP)P6AP;)P3	;Q)NNNN)r   N)__name__
__module____qualname____doc__r   rC   nnModuler#   r   r(   Tensorr4   ndarrayr<   r   rB   rt   r   r[   ra   re   r   r   r   r7   r   r   r   __classcell__)r   s   @r   r   r      s   8$"	>%((// 	>d 	>]Dell):$; ]DellIZD[ ]`dehjljtjtet`u ]4 $tC<M7N2O l lT#s(^ lS#X l8D$sELL'8"9 D4S> DVZ[^`e`l`l[lVm D23d38n 3T%,,=O 3UX 3]a 3($sELL'8"9 U3PT9EU Z^ :.$sELL'8"9 .d .SXY\^aYaSb .jn .sw .6@tCH~ @$sCx. @r   r   )pathlibr   typingr   r   r   r   r   r:   r4   rC   ultralytics.models.yolo.detectr	   ultralytics.utilsr
   r   ultralytics.utils.metricsr   r   r    r   r   <module>r      s.     0 0   = ) ?a% ar   