-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_network_generator.py
112 lines (92 loc) · 3.45 KB
/
train_network_generator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import keras
import numpy as np
import math
import json
import os
from random import shuffle
from dnn_model import model_creator
from data_extractor import reshape_moves
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import EarlyStopping, ModelCheckpoint
#-----------------------------------------
# get trining data
#-----------------------------------------
#get all data from files
#need to use GENERATOR
def get_training_data(batch_size, data_size):
print("loading files...")
count = 0
data = []
#find all files
file_names = os.listdir("./ext")
shuffle(file_names)
pop_new = True
while len(file_names):
if pop_new:
while len(data) < data_size:
if len(file_names) < 1:
break
file_name = file_names.pop()
file_extension = os.path.splitext(file_name)[1]
if file_extension != '.json':
continue
print(' ' + file_name, end='\r')
with open("ext/" + file_name, "r") as file:
try:
data = data + json.load(file)
except:
continue
pop_new = False
shuffle(data)
count = 0
x, y = return_training_data(batch_size, count, data)
if len(x) < batch_size:
pop_new = True
data = []
continue
yield x, y
count += batch_size
if len(file_names) == 0:
print('Whole dataset has been looped through... \n\n')
file_names = os.listdir("./ext")
shuffle(file_names)
print('woh')
def return_training_data(batch_size, point, data):
X = []
Y = []
for x in data[point:point+batch_size]:
X.append(x[:-1]) # first are move data
Y.append([x[-1]]) #last spot is score
return np.array(X), np.array(Y)
# should add training function and so on
def train_network(model_name):
epochs = 6
batch_size = 256
data_size = 262144 # number of datapoins loaded into momory at once
samples_per_epoch = 36*data_size/batch_size#number of datapoins traversed per epoch
validation_steps = 200
evaluate_samples_per_epoch = 100
model_filepath = "model/" + model_name + ".h5"
callbacks = []
callbacks.append(keras.callbacks.TensorBoard(log_dir='./Graph/' + model_name, histogram_freq=0, write_graph=True, write_images=True))
callbacks.append(ModelCheckpoint(model_filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max'))
#callbacks.append(EarlyStopping(monitor='val_acc', patience=4, min_delta=0.0001))
try:
model = load_model(model_filepath)
print('Loaded prevoisly saved model')
except:
model = model_creator()
print('Created new model')
model.fit_generator(get_training_data(batch_size, data_size), epochs=epochs, steps_per_epoch=samples_per_epoch, callbacks=callbacks, validation_data=get_training_data(batch_size, data_size), validation_steps=validation_steps)
loss_and_metrics = model.evaluate_generator(get_training_data(batch_size, data_size), steps=evaluate_samples_per_epoch, verbose=0)
print(loss_and_metrics)
print(model.metrics_names[1] + ": " + str(loss_and_metrics[1] * 100))
model.save(model_filepath)
def evaluate_model(model):
evaluate_samples_per_epoch = 100
batch_size = 256
loss_and_metrics = model.evaluate_generator(get_training_data(batch_size, 250000), steps=evaluate_samples_per_epoch, verbose=0)
print(loss_and_metrics)
print(model.metrics_names[1] + ": " + str(loss_and_metrics[1] * 100))