This repository has been archived by the owner on Feb 20, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 15
/
constants.py
189 lines (154 loc) · 5.59 KB
/
constants.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
import numpy as np
import os
from glob import glob
import shutil
from scipy.ndimage import imread
def get_dir(directory):
"""
Creates the given directory if it does not exist.
:param directory: The path to the directory.
:return: The path to the directory.
"""
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def clear_dir(directory):
"""
Removes all files in the given directory.
:param directory: The path to the directory.
"""
for f in os.listdir(directory):
path = os.path.join(directory, f)
try:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
except Exception as e:
print(e)
def get_test_frame_dims():
img_path = glob(os.path.join(TEST_DIR, '*/*'))[0]
img = imread(img_path, mode='RGB')
shape = np.shape(img)
return shape[0], shape[1]
def get_train_frame_dims():
img_path = glob(os.path.join(TRAIN_DIR, '*/*'))[0]
img = imread(img_path, mode='RGB')
shape = np.shape(img)
return shape[0], shape[1]
def set_test_dir(directory):
"""
Edits all constants dependent on TEST_DIR.
@param directory: The new test directory.
"""
global TEST_DIR, FULL_HEIGHT, FULL_WIDTH
TEST_DIR = directory
FULL_HEIGHT, FULL_WIDTH = get_test_frame_dims()
# root directory for all data
DATA_DIR = get_dir('data/')
# directory of unprocessed training frames
TRAIN_DIR = os.path.join(DATA_DIR, 'images/train/')
# directory of unprocessed test frames
TEST_DIR = os.path.join(DATA_DIR, 'images/test/')
# Directory of processed training clips.
# hidden so finder doesn't freeze w/ so many files. DON'T USE `ls` COMMAND ON THIS DIR!
TRAIN_DIR_CLIPS = get_dir(os.path.join(DATA_DIR, 'trainclips/'))
# For processing clips. l2 diff between frames must be greater than this
MOVEMENT_THRESHOLD = 100
# total number of processed clips in TRAIN_DIR_CLIPS
NUM_CLIPS = len(glob(TRAIN_DIR_CLIPS + '*'))
# the height and width of the full frames to test on. Set in avg_runner.py or process_data.py main.
TEST_HEIGHT = 210
TEST_WIDTH = 160
# the height and width of the patches to test on
TRAIN_HEIGHT = TRAIN_WIDTH = 32
##
# Output
##
def set_save_name(name):
"""
Edits all constants dependent on SAVE_NAME.
:param name: The new save name.
"""
global SAVE_NAME, MODEL_SAVE_DIR, SUMMARY_SAVE_DIR, IMG_SAVE_DIR
SAVE_NAME = name
MODEL_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Models/', SAVE_NAME))
SUMMARY_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Summaries/', SAVE_NAME))
IMG_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Images/', SAVE_NAME))
def clear_save_name():
"""
Clears all saved content for SAVE_NAME.
"""
clear_dir(MODEL_SAVE_DIR)
clear_dir(SUMMARY_SAVE_DIR)
clear_dir(IMG_SAVE_DIR)
# root directory for all saved content
SAVE_DIR = get_dir('../Save/')
# inner directory to differentiate between runs
SAVE_NAME = 'Default/'
# directory for saved models
MODEL_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Models/', SAVE_NAME))
# directory for saved TensorBoard summaries
SUMMARY_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Summaries/', SAVE_NAME))
# directory for saved images
IMG_SAVE_DIR = get_dir(os.path.join(SAVE_DIR, 'Images/', SAVE_NAME))
##
# General training
##
# whether to use adversarial training vs. basic training of the generator
#ADVERSARIAL = True
# the training minibatch size
BATCH_SIZE = 8
# the number of history frames to give as input to the network
HIST_LEN = 4
##
# Loss parameters
##
# for lp loss. e.g, 1 or 2 for l1 and l2 loss, respectively)
L_NUM = 2
# the power to which each gradient term is raised in GDL loss
ALPHA_NUM = 1
# the percentage of the adversarial loss to use in the combined loss
LAM_ADV = 0.05
# the percentage of the lp loss to use in the combined loss
LAM_LP = 1
# the percentage of the GDL loss to use in the combined loss
LAM_GDL = 1
##
# Generator model
##
# learning rate for the generator model
LRATE_G = 0.00004 # Value in paper is 0.04
# padding for convolutions in the generator model
# feature maps for each convolution of each scale network in the generator model
# e.g SCALE_FMS_G[1][2] is the input of the 3rd convolution in the 2nd scale network.
SCALE_FMS_G = [[3 * HIST_LEN, 128, 256, 128, 3],
[3 * (HIST_LEN + 1), 128, 256, 128, 3],
[3 * (HIST_LEN + 1), 128, 256, 512, 256, 128, 3],
[3 * (HIST_LEN + 1), 128, 256, 512, 256, 128, 3]]
# kernel sizes for each convolution of each scale network in the generator model
SCALE_KERNEL_SIZES_G = [[3, 3, 3, 3],
[5, 3, 3, 5],
[5, 3, 3, 3, 3, 5],
[7, 5, 5, 5, 5, 7]]
##
# Discriminator model
##
# learning rate for the discriminator model
LRATE_D = 0.02
# feature maps for each convolution of each scale network in the discriminator model
SCALE_CONV_FMS_D = [[3, 64],
[3, 64, 128, 128],
[3, 128, 256, 256],
[3, 128, 256, 512, 128]]
# kernel sizes for each convolution of each scale network in the discriminator model
SCALE_KERNEL_SIZES_D = [[3],
[3, 3, 3],
[5, 5, 5],
[7, 7, 5, 5]]
# layer sizes for each fully-connected layer of each scale network in the discriminator model
# layer connecting conv to fully-connected is dynamically generated when creating the model
SCALE_FC_LAYER_SIZES_D = [[512, 256, 1],
[1024, 512, 1],
[1024, 512, 1],
[1024, 512, 1]]