얼굴 인식의 모방 변환

26843 단어 얼굴 인식
검출된 사람의 얼굴을 교정하여 다음 단계 네트워크의 입력에 사용
import numpy as np
import cv2

class FaceWarpException(Exception):
    def __str__(self):
        return 'In File {}:{}'.format(__file__,super.
                                      __str__(self))

DEFAULT_CROP_SIZE = (96, 112)
REFERENCE_FACIAL_POINTS = [
    [30.29459953,  51.69630051],
    [65.53179932,  51.50139999],
    [48.02519989,  71.73660278],
    [33.54930115,  92.3655014],
    [62.72990036,  92.20410156]
]

def get_reference_facial_points(
                                output_size=None,
                                inner_padding_factor=0.0,
                                outer_padding=(0, 0),
                                default_square=True
):
     tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
     tmp_crop_size = np.array(DEFAULT_CROP_SIZE)

     if default_square:
         size_diff = max(tmp_crop_size)-tmp_crop_size
         tmp_5pts += size_diff/2
         tmp_crop_size +=size_diff
     if (output_size and output_size[0] == tmp_crop_size[0] and
            output_size[1] == tmp_crop_size[1]):
            return tmp_5pts
     if (inner_padding_factor == 0 and outer_padding == (0,0)):
         if output_size is None:
             return tmp_5pts
         else:
             raise FaceWarpException(
                 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size)
             )
     if not (0<=inner_padding_factor<=1.0):
         raise FaceWarpException('Npt (0 <= inner_padding_factor<=1.0')

     if ((inner_padding_factor>0 or outer_padding[0]>0 or outer_padding[1]>0
     ) and output_size is None):
         output_size = tmp_crop_size*(1+inner_padding_factor*2).astype(np.int32)
         output_size += np.array(outer_padding)
     if not (outer_padding[0] < output_size[0]
            and outer_padding[1] < output_size[1]):
        raise FaceWarpException('Not (outer_padding[0] < output_size[0]'
                                'and outer_padding[1] < output_size[1])')
     if inner_padding_factor>0:
         size_diff = tmp_crop_size * inner_padding_factor*2
         tmp_5pts += size_diff/2
         tmp_crop_size += np.round(size_diff).astype(np.int32)

     size_bf_outer_pad = np.array(output_size) - np.array(outer_padding)*2

     if size_bf_outer_pad[0] *tmp_crop_size[1]!=size_bf_outer_pad[1]*tmp_crop_size[0]:
         raise FaceWarpException('Must have ()= some_scale * (crop_size * (1.0 + inner_padding_factor)')

     scale_factor = size_bf_outer_pad[0].astype(np.float32)/tmp_crop_size[0]

     tmp_5pts = tmp_5pts*scale_factor

     reference_5point = tmp_5pts+np.array(outer_padding)

     return reference_5point
def get_affine_tranform_matrix(src_pts,dst_pts):
    tfm = np.float32([[1,0,0],[0,1,0]])
    n_pts= src_pts.shape[0]
    
    ones = np.ones((n_pts,1),src_pts.dtype)
    src_pts_ = np.hstack([src_pts,ones])
    dst_pts_ = np.hstack([dst_pts,ones])
    
    A,res,rank,s = np.linalg.lstsq(src_pts_,dst_pts_)
    
    if rank == 3:
        tfm = np.float32([A[0,0],A[1,0],A[2,0]],[A[0,1],A[1,1],A[2,1]])
    elif rank ==2:
         tfm = np.float32([A[0,0],A[1.0],0],[
             A[0,1],A[1,1],0
         ])   
    return tfm

def warp_and_crop_face(
        src_img,facial_pts,
        reference_pts = None,
        crop_size = (96,112),
        align_type = 'smilarity'
):
    if reference_pts is None:
        if crop_size[0] == 96 and crop_size[1]==112:
            reference_pts = REFERENCE_FACIAL_POINTS
        else:
            default_square = False
            inner_padding_factor = 0
            outer_padding = (0,0)
            output_size = crop_size

            reference_pts = get_reference_facial_points(output_size,
                                                        inner_padding_factor,
                                                        outer_padding,
                                                        default_square)
    ref_pts = np.float32(reference_pts)
    ref_pts_shp = ref_pts.shape

    if max(ref_pts_shp)<3 or min(ref_pts_shp)!=2:
        raise FaceWarpException(
            'reference_pts.shape must be (k,2) or (2,k) and k>2'
        )
    if ref_pts_shp[0] ==2:
        ref_pts = ref_pts.T

    src_pts = np.float32(facial_pts)
    src_pts_shp = src_pts.shape
    if max(src_pts_shp)<3 or min(src_pts_shp)!=2:
        raise FaceWarpException(
            'have the same shape'
        )
    if align_type is 'cv2.affine':
        tfm = cv2.getAffineTransform(src_pts,ref_pts)
    elif align_type is 'affine': 
        tfm = get_affine_tranform_matrix(src_pts,ref_pts)
       
    face_img = cv2.warpAffine(src_img,tfm,(crop_size[0],crop_size[1]))
    return face_img

좋은 웹페이지 즐겨찾기