-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
104 lines (82 loc) · 3.44 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# coding: utf-8
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, MaxPooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model,load_model
from keras.callbacks import ModelCheckpoint,LearningRateScheduler,ReduceLROnPlateau
from utils import plot_model,getdata
import numpy as np
import tensorflow as tf
from classification_models.classification_models.resnet import ResNet18, preprocess_input
data_train,box_train,data_test,box_test=getdata()
# metric function
def my_metric(labels,predictions):
threshhold=0.75
x=predictions[:,0]*224
x=tf.maximum(tf.minimum(x,224.0),0.0)
y=predictions[:,1]*224
y=tf.maximum(tf.minimum(y,224.0),0.0)
width=predictions[:,2]*224
width=tf.maximum(tf.minimum(width,224.0),0.0)
height=predictions[:,3]*224
height=tf.maximum(tf.minimum(height,224.0),0.0)
label_x=labels[:,0]
label_y=labels[:,1]
label_width=labels[:,2]
label_height=labels[:,3]
a1=tf.multiply(width,height)
a2=tf.multiply(label_width,label_height)
x1=tf.maximum(x,label_x)
y1=tf.maximum(y,label_y)
x2=tf.minimum(x+width,label_x+label_width)
y2=tf.minimum(y+height,label_y+label_height)
IoU=tf.abs(tf.multiply((x1-x2),(y1-y2)))/(a1+a2-tf.abs(tf.multiply((x1-x2),(y1-y2))))
condition=tf.less(threshhold,IoU)
sum=tf.where(condition,tf.ones(tf.shape(condition)),tf.zeros(tf.shape(condition)))
return tf.reduce_mean(sum)
# loss function
def smooth_l1_loss(true_box,pred_box):
loss=0.0
for i in range(4):
residual=tf.abs(true_box[:,i]-pred_box[:,i]*224)
condition=tf.less(residual,1.0)
small_res=0.5*tf.square(residual)
large_res=residual-0.5
loss=loss+tf.where(condition,small_res,large_res)
return tf.reduce_mean(loss)
def resnet_block(inputs,num_filters,kernel_size,strides,activation='relu'):
x=Conv2D(num_filters,kernel_size=kernel_size,strides=strides,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(inputs)
x=BatchNormalization()(x)
if(activation):
x=Activation('relu')(x)
return x
# build model
base_model = ResNet18(input_shape=(224,224,3), weights='imagenet', include_top=False)
x=AveragePooling2D(pool_size=7,data_format="channels_last")(base_model.output)
y=Flatten()(x)
y=Dense(1000,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y)
output=Dense(4,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y)
model = keras.models.Model(inputs=[base_model.input], outputs=[output])
#model = resnet18()
model.compile(loss=smooth_l1_loss,optimizer=Adam(),metrics=[my_metric])
model.summary()
def lr_sch(epoch):
#200 total
if epoch <50:
return 1e-3
if 50<=epoch<100:
return 1e-4
if epoch>=100:
return 1e-5
lr_scheduler=LearningRateScheduler(lr_sch)
lr_reducer=ReduceLROnPlateau(monitor='val_my_metric',factor=0.2,patience=5,mode='max',min_lr=1e-3)
checkpoint=ModelCheckpoint('model.h5',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')
model_details=model.fit(data_train,box_train,batch_size=128,epochs=700,shuffle=True,validation_split=1, callbacks=[lr_scheduler,lr_reducer,checkpoint],verbose=1)
model.save('model.h5')
scores=model.evaluate(data_test,box_test,verbose=1)
print('Test loss : ',scores[0])
print('Test accuracy : ',scores[1])
plot_model(model_details)