-
Notifications
You must be signed in to change notification settings - Fork 42
Expand file tree
/
Copy pathtrain.py
More file actions
executable file
·130 lines (102 loc) · 4 KB
/
train.py
File metadata and controls
executable file
·130 lines (102 loc) · 4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/usr/bin/env python
import argparse
import multiprocessing
import os
from lib.Trainer import Trainer
from lib.TrainingDataUtil import TrainingDataUtil
def main():
formatter = lambda prog: argparse.HelpFormatter(prog,
max_help_position=33)
desc = '''
Handles the training of the FHOG detector and facial landmark shape
predictor
'''
parser = argparse.ArgumentParser(description=desc,
formatter_class=formatter)
parser.add_argument('-t', '--train-all',
help='''
begin full training, which includes both FHOG detector
and shape predictor training
''',
action='store_true')
parser.add_argument('-d', '--train-detector',
help='begin FHOG object detector training',
action='store_true')
parser.add_argument('-p', '--train-predictor',
help='''
begin facial landmark shape predictor training
''',
action='store_true')
parser.add_argument('-c', '--cpu-cores',
help='''
number of CPU cores to train with
''',
default=multiprocessing.cpu_count(),
metavar='<int>')
parser.add_argument('-v', '--view-detector',
help='''
View previously trained object detector SVM
''',
action='store_true')
parser.add_argument('-u', '--source-url',
help='download training data from url',
metavar='<url>')
parser.add_argument('-a', '--archive',
help='''
compress current training data directory to tar gzip
archive
''',
action='store_true')
parser.add_argument('-i', '--imglab',
help='''
Open imglab session for current training data
''',
action='store_true')
parser.add_argument('-w', '--window-size',
help='detector window size',
metavar='<int>',
default=90,
type=int)
args = vars(parser.parse_args())
if args['source_url']:
TrainingDataUtil.download_training_data(args['source_url'])
if args['archive']:
TrainingDataUtil.archive_training_data()
if args['train_all']:
train_detector(args['cpu_cores'], args['window_size'])
train_predictor(args['cpu_cores'])
if args['train_detector']:
train_detector(args['cpu_cores'], args['window_size'])
if args['train_predictor']:
train_predictor(args['cpu_cores'])
if args['imglab']:
parts = (
'left_eye',
'right_eye',
'nose',
'left_of_left_ear',
'right_of_left_ear',
'left_of_right_ear',
'right_of_right_ear',
'chin'
)
cmd = 'imglab {}/{} --parts "{}"'.format(
TrainingDataUtil.training_data_dir,
TrainingDataUtil.training_data_xml,
' '.join(parts)
)
os.system(cmd)
if args['view_detector']:
view_object_detector_svm()
def train_predictor(cpu_cores):
TrainingDataUtil.extract_training_data()
t = Trainer(TrainingDataUtil.training_data_dir, cpu_cores)
t.train_shape_predictor()
def train_detector(cpu_cores, window_size):
TrainingDataUtil.extract_training_data()
t = Trainer(TrainingDataUtil.training_data_dir, cpu_cores, window_size)
t.train_object_detector()
def view_object_detector_svm():
t = Trainer(TrainingDataUtil.training_data_dir)
t.view_object_detector()
main()