Face Detection

HARR Cascade Classifiers

We can extract features from an image and use those features to classify objects.

What is it?

HARR Cascade Classifiers is an object detection method that inputs Haar features into a series of classifiers (cascade) to identify objects in an image. They are trained to identify one type of object, however, we can use several of them in parallel e.g. detecting eyes and faces together.

Advantage and Disadvantage

  • Can only identify one type of object
  • Can be used in parallel

HAAR Classifiers Explained

HAAR Classifiers are trained using lots of positive images and negative images.

positive images = images with the object present

negative images = images without the object present

We then extract features using sliding windows of rectangular blocks. These features are single valued and are calculated by subtracting the sum of pixel intensities under the white rectangles from the black rectangles. However, this is a ridiculous number of calculations, even for a base window of 24 x 24 pixels (180,000 features generated). So the researchers devised a method called Integral Images that computed this with four array references.

However, they still had 180,000 features and the majority of them added no real value.

Boosting was then used to determine the most informative features, with Freund & Schapire’s AdaBoost the algorithm of choice due to its ease of implementation. Boosting is the process by which we use weak classifiers to build strong classifiers, simply by assigning heavier weighted penalties on incorrect classifications. Reducing the 180,000 features to 6000, which is still quite a bit features.

Think about this intuitively, if of those 6000 features, some will be more informative than others. What if we used the most informative features to first check whether the region can potentially have a face (false positives will be no big deal). Doing so eliminates the need for calculating all 6000 features at once.

This concept is called the Cascade of Classifiers - for face detection, the Viola Jones method used 38 stages.

How to use

There are pre-trained classifier can be found here

They are stored as .xml files.

A small sample program to use haarcascade_frontalface_alt.xml.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import io
import cv2

#start video capture from web-cam
vc = cv2.VideoCapture(0)

#import Haar cascade
face_cascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_alt.xml')

#read frame from video capture
rval, frame = vc.read()

while True:
if frame is not None:
#detect faces in frame
faces = face_cascade.detectMultiScale(frame, 1.1, 5)
#print "Found " + str(len(faces))+ " face(s)"

#add borders of faces in frame
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,0,255),2)

#show frame after detection
cv2.imshow('preview',frame)

#repeat reading frames
rval, frame = vc.read()

if cv2.waitKey(1) & 0xFF == ord('q'): # kill the program with key "q"
break

Lets detect eye as well, then extract it.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import cv2
import numpy as np

face_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
eye_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_eye.xml')

def face_detector(img, size=0.5):
# Convert image to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return img

for (x,y,w,h) in faces:
x = x - 50
w = w + 50
y = y - 50
h = h + 50
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_classifier.detectMultiScale(roi_gray)

for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)

roi_color = cv2.flip(roi_color,1)
return roi_color

cap = cv2.VideoCapture(0)

while True:

ret, frame = cap.read()
cv2.imshow('Our Face Extractor', face_detector(frame))
if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()

Other Objects Detection

Where HAAR Classifiers, they also provide different xml files for other objects.

Car Detection

!empty() in function detectMultiScale

Note: You might need to include the full path to use the xml file.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import cv2
import time
import numpy as np

# Create our body classifier
car_classifier = cv2.CascadeClassifier('Haarcascades\haarcascade_car.xml')
# ^might need to include the full path of xml file

# Initiate video capture for video file
cap = cv2.VideoCapture('images/cars.avi')


# Loop once video is successfully loaded
while cap.isOpened():

time.sleep(.05)
# Read first frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Pass frame to our car classifier
cars = car_classifier.detectMultiScale(gray, 1.4, 2)

# Extract bounding boxes for any bodies identified
for (x,y,w,h) in cars:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
cv2.imshow('Cars', frame)

if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()

Pedistrian Detection

!empty() in function detectMultiScale

Note: You might need to include the full path to use the xml file.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import cv2
import numpy as np

# Create our body classifier
body_classifier = cv2.CascadeClassifier('Haarcascades\haarcascade_fullbody.xml')
# ^might need to include the full path of xml file

# Initiate video capture for video file
cap = cv2.VideoCapture('images/walking.avi')

# Loop once video is successfully loaded
while cap.isOpened():

# Read first frame
ret, frame = cap.read()
frame = cv2.resize(frame, None,fx=0.5, fy=0.5, interpolation = cv2.INTER_LINEAR)

gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Pass frame to our body classifier
bodies = body_classifier.detectMultiScale(gray, 1.2, 3)

# Extract bounding boxes for any bodies identified
for (x,y,w,h) in bodies:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
cv2.imshow('Pedestrians', frame)

if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()

Face Analysis and Filters

Let say we want to make a face-swap app. Just the the one on snapchat.

Although HARR Cascade Classifiers provide excellent results in Face Detection, for face-swap, We need other solution.

We need to detect Facial landmarks.

But it is still hard to make a face-swap app.

What was so hard about that?

You need to:

  • Identifying Facial Features
  • Warping the image to fit the new and different facial expression
  • Color Matching
  • Creating seamless borders on the edges of the new swapped face

Dlib

Relately easy way.

Code Implementation

With this code, you can locate the some points on a human face.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import cv2
import dlib
import numpy

PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
detector = dlib.get_frontal_face_detector()


class TooManyFaces(Exception):
pass

class NoFaces(Exception):
pass

def get_landmarks(im):
rects = detector(im, 1)

if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces

return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])

def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,

color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im

image = cv2.imread('Obama.jpg')
# ^ might need full path
landmarks = get_landmarks(image)
image_with_landmarks = annotate_landmarks(image, landmarks)

cv2.imshow('Result', image_with_landmarks)
cv2.imwrite('image_with_landmarks.jpg',image_with_landmarks)
cv2.waitKey(0)
cv2.destroyAllWindows()

Your first Face-swapping App (Image Only)

Things you need to know :

Facial Landmarks Number Order

  • MOUTH_POINTS = 48 to 61
  • RIGHT_BROW_POINTS = 17 to 21
  • LEFT_BROW_POINTS = 22 to 27
  • RIGHT_EYE_POINTS = 36 to 42
  • LEFT_EYE_POINTS = 42 to 48
  • NOSE_POINTS = 27 to 35
  • JAW_POINTS = 0 to 17

Code Implementation

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import cv2
import dlib
import numpy
from time import sleep
import sys

PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11

FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))

# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)

# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]

# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)

class TooManyFaces(Exception):
pass

class NoFaces(Exception):
pass

def get_landmarks(im):
# Returns facial landmarks as (x,y) coordinates
rects = detector(im, 1)

if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces

return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])


def annotate_landmarks(im, landmarks):
#Overlays the landmark points on the image itself

im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im

def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)

def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)

for group in OVERLAY_POINTS:
draw_convex_hull(im,
landmarks[group],
color=1)

im = numpy.array([im, im, im]).transpose((1, 2, 0))

im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)

return im

def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem

points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)

c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2

s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2

U, S, Vt = numpy.linalg.svd(points1.T * points2)

# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T

return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])

def read_im_and_landmarks(image):
im = image
im = cv2.resize(im,None,fx=1, fy=1, interpolation = cv2.INTER_LINEAR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)

return im, s

def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im

def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)

# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)

return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))


def swappy(image1, image2):

im1, landmarks1 = read_im_and_landmarks(image1)
im2, landmarks2 = read_im_and_landmarks(image2)

M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])

mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)

warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)

output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
cv2.imwrite('output.jpg', output_im)
image = cv2.imread('output.jpg')
return image


## Enter the paths to your input images here
image1 = cv2.imread('images/Hillary.jpg')
image2 = cv2.imread('images/Trump.jpg')

swapped = swappy(image1, image2)
cv2.imshow('Face Swap 1', swapped)

swapped = swappy(image2, image1)
cv2.imshow('Face Swap 2', swapped)

cv2.waitKey(0)

cv2.destroyAllWindows()

Code Explained

Your first Face-swapping App (LIVE!)

Everyone could become Donald Trump.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import cv2
import dlib
import numpy
from time import sleep
import sys

## Our pretrained model that predicts the rectangles that correspond to the facial features of a face
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11

FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))

# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)

# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]

# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
cascade_path='Haarcascades/haarcascade_frontalface_default.xml'
cascade = cv2.CascadeClassifier(cascade_path)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)


def get_landmarks(im, dlibOn):

if (dlibOn == True):
rects = detector(im, 1)
if len(rects) > 1:
return "error"
if len(rects) == 0:
return "error"
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])

else:
rects = cascade.detectMultiScale(im, 1.3,5)
if len(rects) > 1:
return "error"
if len(rects) == 0:
return "error"
x,y,w,h =rects[0]
rect=dlib.rectangle(x,y,x+w,y+h)
return numpy.matrix([[p.x, p.y] for p in predictor(im, rect).parts()])


def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im


def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)


def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)

for group in OVERLAY_POINTS:
draw_convex_hull(im,
landmarks[group],
color=1)

im = numpy.array([im, im, im]).transpose((1, 2, 0))

im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)

return im


def transformation_from_points(points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem

points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)

c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2

s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2

U, S, Vt = numpy.linalg.svd(points1.T * points2)

# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T

return numpy.vstack([numpy.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])])


def read_im_and_landmarks(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = cv2.resize(im,None,fx=0.35, fy=0.35, interpolation = cv2.INTER_LINEAR)
im = cv2.resize(im, (im.shape[1] * SCALE_FACTOR,
im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im,dlibOn)

return im, s


def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im


def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)

# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)

return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))

def face_swap(img,name):

s = get_landmarks(img,True)

if (s == "error"):
print "No or too many faces"
return img

im1, landmarks1 = img, s
im2, landmarks2 = read_im_and_landmarks(name)

M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])

mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max([get_face_mask(im1, landmarks1), warped_mask],
axis=0)

warped_im2 = warp_im(im2, M, im1.shape)

warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)

output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask

#output_im is no longer in the expected OpenCV format so we use openCV
#to write the image to diks and then reload it
cv2.imwrite('output.jpg', output_im)
image = cv2.imread('output.jpg')

frame = cv2.resize(image,None,fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)

return image


cap = cv2.VideoCapture(0)


# Name is the image we want to swap onto ours
# dlibOn controls if use dlib's facial landmark detector (better)
# or use HAAR Cascade Classifiers (faster)

filter_image = "images/Trump.jpg" ### Put your image here!
dlibOn = False

while True:
ret, frame = cap.read()

#Reduce image size by 75% to reduce processing time and improve framerates
frame = cv2.resize(frame, None, fx=0.75, fy=0.75, interpolation = cv2.INTER_LINEAR)

# flip image so that it's more mirror like
frame = cv2.flip(frame, 1)

cv2.imshow('Our Amazing Face Swapper', face_swap(frame, filter_image))

if cv2.waitKey(1) == 13: #13 is the Enter Key
break

cap.release()
cv2.destroyAllWindows()