-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMid1_gan(linear+binary).py
143 lines (109 loc) · 4.01 KB
/
Mid1_gan(linear+binary).py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# -*- coding: utf-8 -*-
"""1_gan(linear+binary).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1FaEvFinFOKmIUT5jUAI-rrsPk3P9QTzD
"""
import time
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, LeakyReLU, Dropout, Input
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import RandomNormal
import numpy as np
import matplotlib.pyplot as plt
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x = train_x / 127.5 - 1
test_x = test_x / 127.5 - 1
train_x.min(), train_x.max()
train_x = train_x.reshape(-1, 784)
train_x.shape
# gan에 입력되는 noise에 대한 dimension
NOISE_DIM = 10
# adam optimizer 정의, learning_rate = 0.0002, beta_1=0.5
# Vanilla Gan과 DCGAN에서의 세팅 방식, 가장 학습을 잘 하는 방식
adam = Adam(learning_rate=0.0002, beta_1=0.5)
gen = Sequential([
Dense(256, input_dim=NOISE_DIM),
LeakyReLU(0.2),
Dense(512),
LeakyReLU(0.2),
Dense(1024),
LeakyReLU(0.2),
Dense(28*28, activation='tanh'),
])
gen.summary()
dis = Sequential([
Dense(1024, input_shape=(784,), kernel_initializer=RandomNormal(stddev=0.02)),
LeakyReLU(0.2),
Dropout(0.3),
Dense(512),
LeakyReLU(0.2),
Dropout(0.3),
Dense(256),
LeakyReLU(0.2),
Dropout(0.3),
Dense(1, activation='sigmoid')
])
dis.summary()
dis.compile(loss='binary_crossentropy', optimizer=adam)
# discriminator는 학습을 하지 않도록 하고 generative 모델에서는 generator만 학습합니다
dis.trainable = False
input_gan = Input(shape=(NOISE_DIM,))
x = gen(inputs=input_gan)
output = dis(x)
gan = Model(input_gan, output)
gan.summary()
gan.compile(loss='binary_crossentropy', optimizer=adam)
def def_batches(train_data, batch_size):
list_of_batches = []
for i in range(int(train_data.shape[0] // batch_size)):
single_batch = train_data[i * batch_size: (i + 1) * batch_size]
list_of_batches.append(single_batch)
return np.asarray(list_of_batches)
def train_visualize(epoch, d_losses, g_losses):
#샘플 데이터 생성 후 시각화
noise = np.random.normal(0, 1, size=(24, NOISE_DIM))
generated_img = gen.predict(noise)
generated_img = generated_img.reshape(-1, 28, 28)
plt.figure(figsize=(8, 4))
for i in range(generated_img.shape[0]):
plt.subplot(4, 6, i+1)
plt.imshow(generated_img[i], interpolation='nearest', cmap='gray')
plt.axis('off')
plt.tight_layout()
plt.show()
BATCH_SIZE = 256
EPOCHS = 20
# discriminator와 gan 모델의 loss 측정 위한 list 생성
list_d_loss = []
list_g_loss = []
for epoch in range(1, EPOCHS + 1):
start = time.time()
# 각 배치별 학습
for real_img in def_batches(train_x, BATCH_SIZE):
# 랜덤 노이즈 생성
input_noise = np.random.uniform(-1, 1, size=[BATCH_SIZE, NOISE_DIM])
# 가짜 이미지 데이터 생성
generated_img = gen.predict(input_noise)
# Gan에 학습할 X 데이터 정의
dis_x = np.concatenate([real_img, generated_img])
# Gan에 학습할 Y 데이터 정의
dis_y = np.zeros(2 * BATCH_SIZE)
dis_y[:BATCH_SIZE] = 0.9
# Discriminator 훈련
dis.trainable = True
d_loss = dis.train_on_batch(dis_x, dis_y)
# Gan 훈련
noise = np.random.uniform(-1, 1, size=[BATCH_SIZE, NOISE_DIM])
gan_y = np.ones(BATCH_SIZE)
# Discriminator의 판별 학습을 방지
dis.trainable = False
g_loss = gan.train_on_batch(noise, gan_y)
list_d_loss.append(d_loss)
list_g_loss.append(g_loss)
if epoch == 1:
train_visualize(epoch, list_d_loss, list_g_loss)
print('에포크 {} 에서 소요된 시간은 {} 초'.format(epoch, time.time()-start))
if epoch % 20 == 0:
train_visualize(epoch, list_d_loss, list_g_loss)