-
Notifications
You must be signed in to change notification settings - Fork 0
/
MLmodel.py
87 lines (57 loc) · 2.83 KB
/
MLmodel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfk = tf.keras
tfkl = tf.keras.layers
tfd = tfp.distributions
tfpl = tfp.layers
def custom_loss_function(y, p_y):
#logloss
return -p_y.log_prob(y)
eps = 0.000001
input_shape = (30,)
print("updates?")
def create_model(input_shape=(30,)):
C = tf.keras.layers.Input(shape=input_shape, name='C_data')
dis = tf.keras.layers.Input(shape=(1,), name='D_data')
k_ = tf.keras.layers.Dense(100, activation='relu', name='k_layer')(C)
k_ = tf.keras.layers.Dense(100, name='k_layer2')(k_)
m_ = tf.keras.layers.Dense(100, activation='relu', name='m_layer')(C)
m_ = tf.keras.layers.Dense(100, name='m_layer2')(m_)
z = tf.math.multiply(k_,dis, name='multiply_k')
z = tf.math.add(z,m_,name='add_m')
z = tf.keras.layers.Activation('sigmoid',name='h')(z)
#z = tf.keras.layers.Dense(100,activation='sigmoid')(z)
z = tf.keras.layers.Dense(1)(z)
p = tf.keras.layers.Lambda(lambda t: tf.sigmoid(t) * (1.0 - eps) + eps / 2)(z)
Out = tfp.layers.DistributionLambda(lambda t: tfd.Bernoulli(probs=t))(p)
model = tf.keras.models.Model(inputs=[dis,C], outputs=Out)
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.001, decay=0), loss=custom_loss_function, metrics=['mae'])
return model
class MLmodel:
def __init__(self, model_shell=create_model(), save_as="mlmodel"):
self.model = model_shell
self.save_as = save_as
def train(self, C_train, D_train, deal_train, C_val, D_val, deal_val, batch_size=100, verbose=0, early_stopping_patience=10):
print("training starts")
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
verbose=verbose,
patience=early_stopping_patience)
mcp_save = tf.keras.callbacks.ModelCheckpoint(self.save_as + '.hdf5',
save_best_only=True,
monitor='val_loss',
mode='min')
val_data = ([D_val, C_val], deal_val)
self.model.fit([D_train, C_train], deal_train,
validation_data=val_data,
callbacks=[es, mcp_save],
batch_size=batch_size,
epochs=500,
verbose=verbose)
def probability(self, C_data, D_data):
best_model = tf.keras.models.load_model(self.save_as + '.hdf5',
custom_objects={'custom_loss_function': custom_loss_function})
p_pred = np.squeeze(best_model([np.expand_dims(D_data,1), C_data]).mean())
return p_pred