|
5 | 5 | Set the image folders and the path to the XML |
6 | 6 | file which defines the points in dliblib.ini |
7 | 7 | in the root of this module. |
| 8 | +
|
| 9 | +Create the training XML from running dliblib.vgg2xml first. |
8 | 10 | ''' |
9 | 11 | import os |
10 | 12 | import sys |
|
32 | 34 | options.be_verbose = True |
33 | 35 |
|
34 | 36 | dlib.train_shape_predictor(training_xml_file, predictor_out, options) |
35 | | -print("\nTraining accuracy: {}".format( |
36 | | - dlib.test_shape_predictor(training_xml_file, predictor_out))) |
37 | | - |
38 | | -#testing_xml_path = os.path.join(images_folder, "testing_with_face_landmarks.xml") |
39 | | -#print("Testing accuracy: {}".format( |
40 | | -# dlib.test_shape_predictor(testing_xml_path, "predictor.dat"))) |
41 | | - |
42 | | -# Now let's use it as you would in a normal application. First we will load it |
43 | | -# from disk. We also need to load a face detector to provide the initial |
44 | | -# estimate of the facial location. |
45 | | -predictor = dlib.shape_predictor("predictor.dat") |
46 | | -detector = dlib.get_frontal_face_detector() |
47 | | - |
48 | | - |
49 | | -print("Showing detections and predictions on the images in the faces folder...") |
50 | | -win = dlib.image_window() |
51 | | -for f in glob.glob(os.path.join(images_folder, "*.jpg")): |
52 | | - print("Processing file: {}".format(f)) |
53 | | - img = io.imread(f) |
54 | | - |
55 | | - win.clear_overlay() |
56 | | - win.set_image(img) |
57 | | - |
58 | | - dets = detector(img, 1) |
59 | | - print("Number of faces detected: {}".format(len(dets))) |
60 | | - for k, d in enumerate(dets): |
61 | | - print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( |
62 | | - k, d.left(), d.top(), d.right(), d.bottom())) |
63 | | - |
64 | | - # Get the landmarks/parts for the face in box d. |
65 | | - shape = predictor(img, d) |
66 | | - print("Part 0: {}, Part 1: {} ...".format(shape.part(0), |
67 | | - shape.part(1))) |
68 | | - # Draw the face landmarks on the screen. |
69 | | - win.add_overlay(shape) |
70 | | - |
71 | | - win.add_overlay(dets) |
72 | | - dlib.hit_enter_to_continue() |
| 37 | +print("\nTraining accuracy: {}".format(dlib.test_shape_predictor(training_xml_file, predictor_out))) |
| 38 | + |
| 39 | + |
| 40 | + |
| 41 | +if False: |
| 42 | + #testing_xml_path = os.path.join(images_folder, "testing_with_face_landmarks.xml") |
| 43 | + #print("Testing accuracy: {}".format( |
| 44 | + # dlib.test_shape_predictor(testing_xml_path, "predictor.dat"))) |
| 45 | + |
| 46 | + # Now let's use it as you would in a normal application. First we will load it |
| 47 | + # from disk. We also need to load a face detector to provide the initial |
| 48 | + # estimate of the facial location. |
| 49 | + predictor = dlib.shape_predictor("predictor.dat") |
| 50 | + detector = dlib.get_frontal_face_detector() #find the faces |
| 51 | + |
| 52 | +if False: |
| 53 | + print("Showing detections and predictions on the images in the faces folder...") |
| 54 | + win = dlib.image_window() |
| 55 | + for f in glob.glob(os.path.join(images_folder, "*.jpg")): |
| 56 | + print("Processing file: {}".format(f)) |
| 57 | + img = io.imread(f) |
| 58 | + |
| 59 | + win.clear_overlay() |
| 60 | + win.set_image(img) |
| 61 | + |
| 62 | + dets = detector(img, 1) |
| 63 | + print("Number of faces detected: {}".format(len(dets))) |
| 64 | + for k, d in enumerate(dets): |
| 65 | + print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( |
| 66 | + k, d.left(), d.top(), d.right(), d.bottom())) |
| 67 | + |
| 68 | + # Get the landmarks/parts for the face in box d. |
| 69 | + shape = predictor(img, d) |
| 70 | + print("Part 0: {}, Part 1: {} ...".format(shape.part(0), |
| 71 | + shape.part(1))) |
| 72 | + # Draw the face landmarks on the screen. |
| 73 | + win.add_overlay(shape) |
| 74 | + |
| 75 | + win.add_overlay(dets) |
| 76 | + dlib.hit_enter_to_continue() |
73 | 77 |
|
0 commit comments