-
Notifications
You must be signed in to change notification settings - Fork 3
/
train_on_INSTANCE.py
104 lines (83 loc) · 4.44 KB
/
train_on_INSTANCE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from src.data.data_generator import SimpleTrainGeneratorINSTANCE
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger
import os
import yaml
import argparse
import pickle
import matplotlib.pyplot as plt
import numpy as np
from src.model.PickNet import PickNet_keras
def train_PickNet(cfgs=None):
if cfgs==None:
print('Empty Config')
return
model = PickNet_keras(cfgs)
# if use previous model
if cfgs['Training']['use_previous_model_as_start']:
model.load_weights(cfgs['Training']['previous_model_path'])
init_dict = dict()
# fill dict here
# load INSTANCE Params
init_dict['INSTANCE_ev_hdf5_path'] = cfgs['Training']['INSTANCE_ev_hdf5_path_train']
init_dict['INSTANCE_ev_csv_path'] = cfgs['Training']['INSTANCE_ev_csv_path_train']
init_dict['INSTANCE_batch_size'] = cfgs['Training']['INSTANCE_batch_size']
train_data_gen = SimpleTrainGeneratorINSTANCE(init_dict = init_dict,
miniepoch = cfgs['Training']['epochs'],
batch_size = cfgs['Training']['batch_size'],
duplicate_num = cfgs['PickNet']['duplicate_num'],
dim = cfgs['PickNet']['length'],
dim_y = cfgs['PickNet']['length'],
n_channels= cfgs['PickNet']['channel_num'],
shift_max = cfgs['Training']['shift_max'],
wave_type = cfgs['PickNet']['wave_type'])
init_dict['INSTANCE_ev_csv_path'] = cfgs['Training']['INSTANCE_ev_csv_path_val']
val_data_gen = SimpleTrainGeneratorINSTANCE(init_dict = init_dict,
miniepoch = cfgs['Training']['validation_steps'],
batch_size = cfgs['Training']['batch_size'],
duplicate_num = cfgs['PickNet']['duplicate_num'],
dim = cfgs['PickNet']['length'],
dim_y = cfgs['PickNet']['length'],
n_channels= cfgs['PickNet']['channel_num'],
shift_max = cfgs['Training']['shift_max'],
wave_type = cfgs['PickNet']['wave_type'])
print('Done Creating Generator')
TASK_ID = cfgs['Training']['TASK_ID']
filepath = cfgs['Training']['filepath'] + TASK_ID + '_{epoch:04d}.hdf5'
if os.path.exists(cfgs['Training']['filepath']):
pass
else:
os.makedirs(cfgs['Training']['filepath'])
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True,mode='auto', period=1)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=10,
min_lr = 0.1e-7)
# write traning loss log
if os.path.exists(cfgs['Training']['train_log_dir']):
pass
else:
os.makedirs(cfgs['Training']['train_log_dir'])
csv_logger = CSVLogger(cfgs['Training']['train_log_dir'] + cfgs['Training']['TASK_ID'] + 'train_log.csv')
hist = model.fit(train_data_gen,
workers=cfgs['Training']['num_works'],
max_queue_size=cfgs['Training']['max_queue'],
use_multiprocessing=False,
callbacks=[checkpoint,lr_reducer,csv_logger],
epochs=cfgs['Training']['epochs'],
steps_per_epoch=cfgs['Training']['steps_per_epoch'],
validation_data=val_data_gen,
validation_steps=cfgs['Training']['validation_steps']
)
histpath = cfgs['Training']['histpath'] + TASK_ID + '_hist.hdf5'
with open(histpath, 'wb') as file_pi:
pickle.dump(hist.history, file_pi)
model.save(cfgs['Training']['histpath'] + TASK_ID + '_last.hdf5')
print('Training done')
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Utility of Training PickNet keras Beta 0.01')
parser.add_argument('--config-file', dest='config_file', type=str, help='Path to Configuration file')
args = parser.parse_args()
cfgs = yaml.load(open(args.config_file), Loader=yaml.SafeLoader)
os.environ['CUDA_VISIBLE_DEVICES'] = cfgs['Training']['gpu_id']
train_PickNet(cfgs)