summaryrefslogtreecommitdiff
path: root/face-detection
diff options
context:
space:
mode:
Diffstat (limited to 'face-detection')
-rw-r--r--face-detection/01_face_capture_dataset.py4
-rw-r--r--face-detection/02_face_training.py31
-rw-r--r--face-detection/03_face_recogition.py67
3 files changed, 100 insertions, 2 deletions
diff --git a/face-detection/01_face_capture_dataset.py b/face-detection/01_face_capture_dataset.py
index 327264c..8836ce6 100644
--- a/face-detection/01_face_capture_dataset.py
+++ b/face-detection/01_face_capture_dataset.py
@@ -6,7 +6,7 @@ cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id
face_id = input('\n----Enter User-id and press <return>----')
-print("\n Initializing face capture. Look the camera and wait!")
+print("\n [INFO] Initializing face capture. Look the camera and wait!")
# Initialize individual sampling face count
count = 0
while(True):
@@ -25,6 +25,6 @@ while(True):
elif count >= 30: # Take 30 face sample and stop video capture
break
# Do a bit of cleanup
-print("\n Exiting Program and cleaning up stuff")
+print("\n [INFO] Exiting Program and cleaning up stuff")
cam.release()
cv2.destroyAllWindows()
diff --git a/face-detection/02_face_training.py b/face-detection/02_face_training.py
new file mode 100644
index 0000000..938761b
--- /dev/null
+++ b/face-detection/02_face_training.py
@@ -0,0 +1,31 @@
+import cv2
+import numpy as np
+from PIL import Image
+import os
+
+path = 'dataset'
+# Using LBPH(Local Binary Patterns Histograms) recognizer
+recognizer = cv2.face.LBPHFaceRecognizer_create()
+detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml");
+# function to get the images and label data
+def getImagesAndLabels(path):
+ imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
+ faceSamples=[]
+ ids = []
+ for imagePath in imagePaths:
+ PIL_img = Image.open(imagePath).convert('L') # grayscale
+ img_numpy = np.array(PIL_img,'uint8')
+ id = int(os.path.split(imagePath)[-1].split(".")[1])
+ faces = detector.detectMultiScale(img_numpy)
+ for (x,y,w,h) in faces:
+ faceSamples.append(img_numpy[y:y+h,x:x+w])
+ ids.append(id)
+ return faceSamples,ids
+print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
+#returns two arrays faces and ids
+faces,ids = getImagesAndLabels(path)
+recognizer.train(faces, np.array(ids))
+#save the model into trainer/trainer.yml
+recognizer.write('trainer/trainer.yml')
+# Print the numer of faces trained and end program
+print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
diff --git a/face-detection/03_face_recogition.py b/face-detection/03_face_recogition.py
new file mode 100644
index 0000000..e0da838
--- /dev/null
+++ b/face-detection/03_face_recogition.py
@@ -0,0 +1,67 @@
+import cv2
+import numpy as np
+import os
+recognizer = cv2.face.LBPHFaceRecognizer_create()
+recognizer.read('trainer/trainer.yml')
+cascadePath = "haarcascade_frontalface_default.xml"
+faceCascade = cv2.CascadeClassifier(cascadePath);
+font = cv2.FONT_HERSHEY_COMPLEX
+#font = cv2.FONT_HERSHEY_TRIPLEX
+id = 0
+# names related to id
+names = ['None', 'Obamna', 'Soda', 'Orange']
+# Initialize and start realtime video capture
+cam = cv2.VideoCapture(0)
+cam.set(3, 640) # set video widht
+cam.set(4, 480) # set video height
+# Define min window size to be recognized as a face
+minW = 0.1*cam.get(3)
+minH = 0.1*cam.get(4)
+while True:
+ ret, img =cam.read()
+ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
+
+ faces = faceCascade.detectMultiScale(
+ gray,
+ scaleFactor = 1.2,
+ minNeighbors = 5,
+ minSize = (int(minW), int(minH)),
+ )
+ for(x,y,w,h) in faces:
+ cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
+ id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
+
+ # If confidence is less them 100 ==> "0" : perfect match
+ if (confidence < 100):
+ id = names[id]
+ confidence = " {0}%".format(round(100 - confidence))
+ else:
+ id = "unknown"
+ confidence = " {0}%".format(round(100 - confidence))
+
+ cv2.putText(
+ img,
+ str(id),
+ (x+5,y-5),
+ font,
+ 1,
+ (255,255,255),
+ 2
+ )
+ cv2.putText(
+ img,
+ str(confidence),
+ (x+5,y+h-5),
+ font,
+ 1,
+ (255,255,0),
+ 1
+ )
+
+ cv2.imshow('camera',img)
+ k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
+ if k == 27:
+ break
+print("\n [INFO] Exiting Program and cleaning up stuff")
+cam.release()
+cv2.destroyAllWindows()