faceBlendCommon.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. import cv2
  2. import numpy as np
  3. import math
  4. # Constrains points to be inside boundary
  5. def constrainPoint(p, w, h):
  6. p = (min(max(p[0], 0), w - 1), min(max(p[1], 0), h - 1))
  7. return p
  8. # Compute similarity transform given two sets of two points.
  9. # OpenCV requires 3 pairs of corresponding points.
  10. # We are faking the third one.
  11. def similarityTransform(inPoints, outPoints):
  12. s60 = math.sin(60*math.pi/180)
  13. c60 = math.cos(60*math.pi/180)
  14. inPts = np.copy(inPoints).tolist()
  15. outPts = np.copy(outPoints).tolist()
  16. # The third point is calculated so that the three points make an equilateral triangle
  17. xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0]
  18. yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1]
  19. inPts.append([np.int(xin), np.int(yin)])
  20. xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]
  21. yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]
  22. outPts.append([np.int(xout), np.int(yout)])
  23. # Now we can use estimateRigidTransform for calculating the similarity transform.
  24. tform = cv2.estimateAffinePartial2D(np.array([inPts]), np.array([outPts]))
  25. return tform[0]
  26. # Check if a point is inside a rectangle
  27. def rectContains(rect, point):
  28. if point[0] < rect[0]:
  29. return False
  30. elif point[1] < rect[1]:
  31. return False
  32. elif point[0] > rect[2]:
  33. return False
  34. elif point[1] > rect[3]:
  35. return False
  36. return True
  37. # Calculate Delaunay triangles for set of points
  38. # Returns the vector of indices of 3 points for each triangle
  39. def calculateDelaunayTriangles(rect, points):
  40. # Create an instance of Subdiv2D
  41. subdiv = cv2.Subdiv2D(rect)
  42. # Insert points into subdiv
  43. for p in points:
  44. subdiv.insert((int(p[0]), int(p[1])))
  45. # Get Delaunay triangulation
  46. triangleList = subdiv.getTriangleList()
  47. # Find the indices of triangles in the points array
  48. delaunayTri = []
  49. for t in triangleList:
  50. # The triangle returned by getTriangleList is
  51. # a list of 6 coordinates of the 3 points in
  52. # x1, y1, x2, y2, x3, y3 format.
  53. # Store triangle as a list of three points
  54. pt = []
  55. pt.append((t[0], t[1]))
  56. pt.append((t[2], t[3]))
  57. pt.append((t[4], t[5]))
  58. pt1 = (t[0], t[1])
  59. pt2 = (t[2], t[3])
  60. pt3 = (t[4], t[5])
  61. if rectContains(rect, pt1) and rectContains(rect, pt2) and rectContains(rect, pt3):
  62. # Variable to store a triangle as indices from list of points
  63. ind = []
  64. # Find the index of each vertex in the points list
  65. for j in range(0, 3):
  66. for k in range(0, len(points)):
  67. if(abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0):
  68. ind.append(k)
  69. # Store triangulation as a list of indices
  70. if len(ind) == 3:
  71. delaunayTri.append((ind[0], ind[1], ind[2]))
  72. return delaunayTri
  73. # Apply affine transform calculated using srcTri and dstTri to src and
  74. # output an image of size.
  75. def applyAffineTransform(src, srcTri, dstTri, size):
  76. # Given a pair of triangles, find the affine transform.
  77. warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
  78. # Apply the Affine Transform just found to the src image
  79. dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None,
  80. flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
  81. return dst
  82. # Warps and alpha blends triangular regions from img1 and img2 to img
  83. def warpTriangle(img1, img2, t1, t2):
  84. # Find bounding rectangle for each triangle
  85. r1 = cv2.boundingRect(np.float32([t1]))
  86. r2 = cv2.boundingRect(np.float32([t2]))
  87. # Offset points by left top corner of the respective rectangles
  88. t1Rect = []
  89. t2Rect = []
  90. t2RectInt = []
  91. for i in range(0, 3):
  92. t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
  93. t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
  94. t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
  95. # Get mask by filling triangle
  96. mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
  97. cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0)
  98. # Apply warpImage to small rectangular patches
  99. img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
  100. size = (r2[2], r2[3])
  101. img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
  102. img2Rect = img2Rect * mask
  103. # Copy triangular region of the rectangular patch to the output image
  104. img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ((1.0, 1.0, 1.0) - mask)
  105. img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect