
    >|h(6                         d dl Z d dlZd dlmZ d dlmZ d dlZd dlm	Z	 d dl
m
Z
 ddl ddlmZ  G d	 d
e      Z G d de      ZddddZdddddddddZ G d d      Zy)    N)IntEnum)io)version)tqdm   )*)
FolderDatac                       e Zd ZdZdZdZdZy)LandmarksTypeaA  Enum class defining the type of landmarks to detect.

    ``TWO_D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face
    ``TWO_HALF_D`` - this points represent the projection of the 3D points into 3D
    ``THREE_D`` - detect the points ``(x,y,z)``` in a 3D space

    r         N)__name__
__module____qualname____doc__TWO_D
TWO_HALF_DTHREE_D     Q/var/www/html/test/engine/venv/lib/python3.12/site-packages/face_alignment/api.pyr   r      s     EJGr   r   c                       e Zd ZdZy)NetworkSize   N)r   r   r   LARGEr   r   r   r   r      s	     Er   r   zFhttps://www.adrianbulat.com/downloads/python-fan/2DFAN4-cd938726ad.zipzFhttps://www.adrianbulat.com/downloads/python-fan/3DFAN4-4a694010b9.zipzEhttps://www.adrianbulat.com/downloads/python-fan/depth-6c4283c0e0.zip)z2DFAN-4z3DFAN-4depthzJhttps://www.adrianbulat.com/downloads/python-fan/2DFAN4_1.6-c827573f02.zipzJhttps://www.adrianbulat.com/downloads/python-fan/3DFAN4_1.6-ec5cf40a1d.zipzIhttps://www.adrianbulat.com/downloads/python-fan/depth_1.6-2aa3f18772.zipzJhttps://www.adrianbulat.com/downloads/python-fan/2DFAN4_1.5-a60332318a.zipzJhttps://www.adrianbulat.com/downloads/python-fan/3DFAN4_1.5-176570af4d.zipzIhttps://www.adrianbulat.com/downloads/python-fan/depth_1.5-bc10f98e39.zip)z1.6z1.5c                       e Zd Zej                  dej                  ddddfdZddZ ej                         	 	 dd       Z
 ej                         	 	 dd       Zd	d
gddddfdZy)FaceAlignmentcudaFsfdNc	                    || _         || _        || _        || _        || _        t        j                  t        j                        t        j                  d      k  rt        d      t        |      }t        j                  }	d|	v r|	j                  dd      d   }	n|	j                  dd      d   }	d|v rd	t        j                  j                  _        t        d
|z   t!               t#               |gd      }
|xs i } |
j$                  d||d|| _        |t(        j*                  k(  rdt-        |      z   }ndt-        |      z   }t        j.                  j1                  t3        t4        j7                  |	t8              |               | _        | j:                  j=                  ||       | j:                  j?                          |t(        j@                  k(  rt        j.                  j1                  t3        t4        j7                  |	t8              d               | _!        | jB                  j=                  ||       | jB                  j?                          y y )Nz1.5.0zUnsupported pytorch version detected. Minimum supported version of pytorch: 1.5.0                            Either upgrade (recommended) your pytorch setup, or downgrade to face-alignment 1.2.0dev.r   r   r   r   Tzface_alignment.detection.)deviceverbosez2DFAN-z3DFAN-dtyper   r   )"r$   
flip_inputlandmarks_typer%   r'   r   parsetorch__version__ImportErrorintrsplitbackendscudnn	benchmark
__import__globalslocalsFaceDetectorface_detectorr   r   strjitloadload_file_from_urlmodels_urlsgetdefault_model_urlsface_alignment_nettoevalr   depth_prediciton_net)selfr)   network_sizer$   r'   r(   r7   face_detector_kwargsr%   pytorch_versionface_detector_modulenetwork_names               r   __init__zFaceAlignment.__init__6   s   $,
==**+gmmG.DD !r t t <(++O#-44S!<Q?O-44S!<Q?OV-1ENN  *  **E*U*1)VXPQ S39r>1>>vfV]vauv ]000#c,&77L#c,&77L"'))..{@RST`ab#d 	""6"7$$& ]222(-		";???DV#WX_#`a)cD% %%((u(=%%**, 3r   c                 *    | j                  ||||      S )a4  Deprecated, please use get_landmarks_from_image

        Arguments:
            image_or_path {string or numpy.array or torch.tensor} -- The input image or path to it

        Keyword Arguments:
            detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found
            in the image (default: {None})
            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.
            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.
        )get_landmarks_from_image)rC   image_or_pathdetected_facesreturn_bboxesreturn_landmark_scores        r   get_landmarkszFaceAlignment.get_landmarkse   s     ,,]NM[pqqr   c           	      \   t        |      }|)| j                  j                  |j                               }t	        |      dk(  rt        j                  d       |s|ryyg }g }t        |      D ]  \  }}	t        j                  |	d   |	d   |	d   z
  dz  z
  |	d   |	d   |	d   z
  dz  z
  g      }
|
d   |	d   |	d   z
  d	z  z
  |
d<   |	d   |	d   z
  |	d   z   |	d   z
  | j                  j                  z  }t        ||
|      }t        j                  |j                  d
            j                         }|j                  | j                   | j"                        }|j%                  d      j'                  d       | j)                  |      j+                         }| j,                  r6|t/        | j)                  t/        |            j+                         d      z  }|j                  dt        j0                        j3                         }t5        ||
j3                         |      \  }}}t        j                  |      t        j                  |      }}|j7                  dd      dz  |j7                  dd      }}|j9                  d      }| j:                  t<        j>                  k(  r=tA        jB                  dt@        j0                        }tE        d      D ].  }||df   dkD  s||df   dkD  stG        ||   ||   d      ||<   0 t        j                  |      j'                  d      }|j                  | j                   | j"                        }| jI                  t        jJ                  ||fd            jL                  jO                         j7                  dd      j                  t        j0                        }t        jJ                  ||ddd|z  z  z  z  fd      }|jQ                  |j3                                |jQ                  |        |sd}|sd}|s|r|||fS |S )a  Predict the landmarks for each face present in the image.

        This function predicts a set of 68 2D or 3D images, one for each image present.
        If detect_faces is None the method will also run a face detector.

         Arguments:
            image_or_path {string or numpy.array or torch.tensor} -- The input image or path to it.

        Keyword Arguments:
            detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found
            in the image (default: {None})
            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.
            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.

        Return:
            result:
                1. if both return_bboxes and return_landmark_score are False, result will be:
                    landmark
                2. Otherwise, result will be one of the following, depending on the actual value of return_* arguments.
                    (landmark, landmark_score, detected_face)
                    (landmark, None,           detected_face)
                    (landmark, landmark_score, None         )
        Nr   No faces were detected.NNNr   g       @r   r   gQ?)r   r   r   r&   g     o@T)is_labelcpu)r$   r'   D   r   )rV      rW   g      ?g      p@g      i@))	get_imager7   detect_from_imagecopylenwarningswarn	enumerater+   tensorreference_scalecrop
from_numpy	transposefloatr@   r$   r'   div_
unsqueeze_r?   detachr(   flipfloat32numpyget_preds_fromhmviewsqueezer)   r   r   npzerosrangedraw_gaussianrB   catdatarU   append)rC   rL   rM   rN   rO   image	landmarkslandmarks_scoresidcenterscaleinpoutptspts_imgscoresheatmaps
depth_preds                      r   rK   z&FaceAlignment.get_landmarks_from_images   s   4 -(!!//AA%**,ON~!#MM34 5'	n- '	,DAq\\11!++QqTQqTAaD[C4G-GHJFq	QqTAaD[D$88F1IqTAaD[1Q4'!A$.$2D2D2T2TTEufe,C""3==$ !EG  &&DJJ&7CHHUO&&q)))#.557CtD33DI>EEGRVWW&&U]]&;AACC#3C#O C& ++C0%2B2B72KC88B?Q.R0CC^^A&F""m&;&;;88N"**Er 4A1a4y1}QTQ&3$QKQ'44 !++(jm  $;;t{{$**;E!66IIsHoq13374dd2qk""SXS`S`"Ja ))jC5EEM3J,KLMqR W]]_-##F+O'	,R !N$#1.>>r   c                    || j                   j                  |      }t        |      dk(  rt        j                  d       |s|ryyg }g }t        |      D ]  \  }}| j                  ||   j                         j                         j                  ddd      ||      }	|r|	\  }
}}|j                  |       n|	}
|
t        j                  |
d      }
ng }
|j                  |
        |sd}|sd}|s|r|||fS |S )	a  Predict the landmarks for each face present in the image.

        This function predicts a set of 68 2D or 3D images, one for each image in a batch in parallel.
        If detect_faces is None the method will also run a face detector.

         Arguments:
            image_batch {torch.tensor} -- The input images batch

        Keyword Arguments:
            detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found
            in the image (default: {None})
            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.
            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.

        Return:
            result:
                1. if both return_bboxes and return_landmark_score are False, result will be:
                    landmarks
                2. Otherwise, result will be one of the following, depending on the actual value of return_* arguments.
                    (landmark, landmark_score, detected_face)
                    (landmark, None,           detected_face)
                    (landmark, landmark_score, None         )
        Nr   rR   rS   r   r   )rM   rO   )axis)r7   detect_from_batchr[   r\   r]   r^   rK   rU   rj   rc   rt   rn   concatenate)rC   image_batchrM   rN   rO   rv   landmarks_scores_listrx   facesreslandmark_setrw   _s                r   get_landmarks_from_batchz&FaceAlignment.get_landmarks_from_batch   s(   6 !!//AA+NN~!#MM34 5'	 "!.1 	+HAu//A""$**,66q!Q?$&; 0 C
 %471.%,,-=>"'!~~lC!\*!	+$ !N$$(!13^CCr   z.jpgz.pngTc                    t        || j                  j                  ||| j                        }t        j
                  j                  j                  |dddd      }i }	t        ||       D ]g  \  }
}|
d   |d   }}
| j                  j                  |      }|s|r"| j                  ||||      \  }}}|||f|	|
<   Q| j                  ||      }||	|
<   i |	S )	a  Scan a directory for images with a given extension type(s) and predict the landmarks for each
            face present in the images found.

         Arguments:
            path {str} -- path to the target directory containing the images

        Keyword Arguments:
            extensions {list of str} -- list containing the image extensions considered (default: ['.jpg', '.png'])
            recursive {boolean} -- If True, scans for images recursively (default: True)
            show_progress_bar {boolean} -- If True displays a progress bar (default: True)
            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.
            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.
        r   Fr   r   )
batch_sizeshufflenum_workersprefetch_factor)disabler   )rN   rO   )r	   r7   tensor_or_path_to_ndarrayr%   r+   utilsrs   
DataLoaderr   rY   rK   )rC   path
extensions	recursiveshow_progress_barrN   rO   dataset
dataloaderpredictions
image_pathru   bounding_boxespredsbboxscores                   r   get_landmarks_from_directoryz*FaceAlignment.get_landmarks_from_directory  s    T4#5#5#O#OQ[]fhlhthtu[[%%00QPUcdvw0x
#'
@Q<Q#R 		0Z *1uQxJ!//AA%HN 5%)%B%B>^s &C &u"tU+0$*>J'55e^L*/J'		0 r   )NFF)r   r   r   r   r   r+   ri   rI   rP   no_gradrK   r   r   r   r   r   r   r   5   s    4?4E4EemmUZqu  @E--^r U]]_Y^7<W Wr U]]_W\7<@ @D >DV<LX\pt38PUr   r   )r+   r\   enumr   skimager   rj   rn   	packagingr   r   r   folder_datar	   r   r   r>   r<   r   r   r   r   <module>r      s            #
G 
'  XWT  `_\ `_\y yr   