summaryrefslogtreecommitdiff
path: root/face-detection/02_face_training.py
diff options
context:
space:
mode:
authorSaumit Dinesan <justsaumit@protonmail.com>2023-05-09 17:47:20 +0530
committerSaumit Dinesan <justsaumit@protonmail.com>2023-05-09 17:47:20 +0530
commit2dd5321e39396383e7d604594a6940dba70d2bfc (patch)
tree29bba1aab51e673f1f6da4d9c221ed1696a000fd /face-detection/02_face_training.py
parentd6c282a733ce0a3453361973c683d251e1493b2d (diff)
face_training: Refactored face training code for improved readability and efficiency
Diffstat (limited to 'face-detection/02_face_training.py')
-rw-r--r--face-detection/02_face_training.py66
1 files changed, 40 insertions, 26 deletions
diff --git a/face-detection/02_face_training.py b/face-detection/02_face_training.py
index a0d5a43..a3b96f5 100644
--- a/face-detection/02_face_training.py
+++ b/face-detection/02_face_training.py
@@ -1,35 +1,49 @@
import cv2
-import numpy as np
-from PIL import Image
import os
+import numpy as np
-path = 'dataset'
# Using LBPH(Local Binary Patterns Histograms) recognizer
-recognizer = cv2.face.LBPHFaceRecognizer_create()
-detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); #create an instance
+recognizer=cv2.face.LBPHFaceRecognizer_create()
+face_detector=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
+path='dataset'
+
# function to read the images in the dataset, convert them to grayscale values, return samples
def getImagesAndLabels(path):
- imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
- for imagePath in imagePaths:
- PIL_img = Image.open(imagePath).convert('L') # grayscale
- img_numpy = np.array(PIL_img,'uint8')
- id = int(os.path.split(imagePath)[-1].split(".")[1])
- faces = detector.detectMultiScale(img_numpy)
- for (x,y,w,h) in faces:
- faceSamples.append(img_numpy[y:y+h,x:x+w])
- ids.append(id)
- return faceSamples,ids
-print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
-#returns two arrays faces and ids
-faces,ids = getImagesAndLabels(path)
+
+ for file_name in os.listdir(path):
+ if file_name.endswith(".jpg"):
+ id = int(file_name.split(".")[1])
+ img_path = os.path.join(path, file_name)
+ img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
+
+ faces = face_detector.detectMultiScale(img)
+
+ for (x, y, w, h) in faces:
+ faceSamples.append(img[y:y+h, x:x+w])
+ ids.append(id)
+
+ return faceSamples, ids
+
+
+def trainRecognizer(faces, ids):
+ recognizer.train(faces, np.array(ids))
+ # Create the 'trainer' folder if it doesn't exist
+ if not os.path.exists("trainer"):
+ os.makedirs("trainer")
+ # Save the model into 'trainer/trainer.yml'
+ recognizer.write('trainer/trainer.yml')
+
+
+print("\n [INFO] Training faces. It will take a few seconds. Wait ...")
+# Get face samples and their corresponding labels
+faces, ids = getImagesAndLabels(path)
+
#Train the LBPH recognizer using the face samples and their corresponding labels
-recognizer.train(faces, np.array(ids))
-# if trainer folder doesnt exist create:
-if not os.path.exists("trainer"):
- os.makedirs("trainer")
-#save the model into trainer/trainer.yml
-recognizer.write('trainer/trainer.yml')
-# Print the numer of faces trained and then exit the program
-print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
+trainRecognizer(faces, ids)
+
+
+# Print the number of unique faces trained
+num_faces_trained = len(set(ids))
+print("\n [INFO] {} faces trained. Exiting Program".format(num_faces_trained))