質問編集履歴

1

コードの追加

2020/05/24 05:33

投稿

tomoki_fab
tomoki_fab

スコア25

test CHANGED
File without changes
test CHANGED
@@ -95,3 +95,379 @@
95
95
  インターネットで手当たり次第に調べたり、書籍で参考になりそうな情報を探したり、手を尽くしましたがなぜ全くうまくいかないのかわかりません。問題がレイヤーの組み方なのか、何か決定的に重要な処理を飛ばしてしまっているのか、損失関数などの評価の仕方が悪いのかも見当もつきません。
96
96
 
97
97
  なにか見落としているポイントなどがありますでしょうか?
98
+
99
+
100
+
101
+
102
+
103
+ コード
104
+
105
+ ```Python
106
+
107
+ import numpy as np
108
+
109
+ from PIL import Image
110
+
111
+
112
+
113
+ from keras.datasets import mnist
114
+
115
+ from keras.layers import *
116
+
117
+ from keras.models import *
118
+
119
+ from keras.optimizers import *
120
+
121
+
122
+
123
+
124
+
125
+ # --- プログレスバーを表示するクラス(学習自体には関係ありません) ---------
126
+
127
+ class ProgressBar:
128
+
129
+ def __init__(self, entireJob):
130
+
131
+ self.job = entireJob
132
+
133
+ self.width = 40
134
+
135
+ def draw(self, progress):
136
+
137
+ print( ("\r["+"#"*int((progress+1)*self.width/self.job)+" "*(self.width-int((progress+1)*self.width/self.job) ) +"] %d/%d")%(progress+1,self.job), end="")
138
+
139
+
140
+
141
+
142
+
143
+ # --- Generatorモデルの定義 -----------
144
+
145
+ class Generator:
146
+
147
+ def __init__(self):
148
+
149
+ layer0 = Input(shape=(1,1,100))
150
+
151
+
152
+
153
+ layer1 = UpSampling2D(size=(3,3))(layer0)
154
+
155
+ layer1 = Conv2D(
156
+
157
+ filters=100,
158
+
159
+ kernel_size=(2,2),
160
+
161
+ strides=(1,1),
162
+
163
+ padding='same',
164
+
165
+ activation='relu' )(layer1)
166
+
167
+ layer1 = BatchNormalization()(layer1)
168
+
169
+
170
+
171
+ layer2 = UpSampling2D(size=(3,3))(layer1)
172
+
173
+ layer2 = Conv2D(
174
+
175
+ filters=100,
176
+
177
+ kernel_size=(2,2),
178
+
179
+ strides=(1,1),
180
+
181
+ padding='same',
182
+
183
+ activation='relu' )(layer2)
184
+
185
+ layer2 = BatchNormalization()(layer2)
186
+
187
+
188
+
189
+ layer3 = UpSampling2D(size=(2,2))(layer2)
190
+
191
+ layer3 = Conv2D(
192
+
193
+ filters=80,
194
+
195
+ kernel_size=(3,3),
196
+
197
+ strides=(1,1),
198
+
199
+ padding='valid',
200
+
201
+ activation='elu' )(layer3)
202
+
203
+ layer3 = BatchNormalization()(layer3)
204
+
205
+
206
+
207
+ layer4 = UpSampling2D(size=(2,2))(layer3)
208
+
209
+ layer4 = Conv2D(
210
+
211
+ filters=50,
212
+
213
+ kernel_size=(3,3),
214
+
215
+ strides=(1,1),
216
+
217
+ padding='same',
218
+
219
+ activation='elu' )(layer4)
220
+
221
+ layer4 = BatchNormalization()(layer4)
222
+
223
+
224
+
225
+ layer5 = UpSampling2D(size=(2,2))(layer4)
226
+
227
+ layer5 = Conv2D(
228
+
229
+ filters=20,
230
+
231
+ kernel_size=(4,4),
232
+
233
+ strides=(2,2),
234
+
235
+ padding='valid',
236
+
237
+ activation='elu' )(layer5)
238
+
239
+ layer5 = BatchNormalization()(layer5)
240
+
241
+
242
+
243
+ layer6 = Conv2D(
244
+
245
+ filters=1,
246
+
247
+ kernel_size=(4,4),
248
+
249
+ strides=(1,1),
250
+
251
+ padding='valid',
252
+
253
+ activation='tanh' )(layer5)
254
+
255
+
256
+
257
+ self.model = Model(layer0, layer6)
258
+
259
+ self.model.summary()
260
+
261
+
262
+
263
+ # --- Discriminatorモデルの定義 -------
264
+
265
+ class Discriminator:
266
+
267
+ def __init__(self):
268
+
269
+ layer0 = Input(shape=(28,28,1))
270
+
271
+ layer1 = Conv2D(
272
+
273
+ filters=5,
274
+
275
+ kernel_size=(3,3),
276
+
277
+ strides=(2,2),
278
+
279
+ padding='valid',
280
+
281
+ activation='elu' )(layer0)
282
+
283
+ layer1 = BatchNormalization()(layer1)
284
+
285
+
286
+
287
+ layer2 = Conv2D(
288
+
289
+ filters=10,
290
+
291
+ kernel_size=(3,3),
292
+
293
+ strides=(2,2),
294
+
295
+ padding='valid',
296
+
297
+ activation='elu' )(layer1)
298
+
299
+ layer2 = BatchNormalization()(layer2)
300
+
301
+
302
+
303
+ layer3 = Conv2D(
304
+
305
+ filters=5,
306
+
307
+ kernel_size=(3,3),
308
+
309
+ strides=(1,1),
310
+
311
+ padding='valid',
312
+
313
+ activation='relu' )(layer2)
314
+
315
+ layer3 = BatchNormalization()(layer3)
316
+
317
+
318
+
319
+ layer4 = Flatten()(layer3)
320
+
321
+ layer4 = Dense(units=30, activation='tanh')(layer4)
322
+
323
+ layer4 = BatchNormalization()(layer4)
324
+
325
+
326
+
327
+ layer5 = Dense(units=1, activation='sigmoid' )(layer4)
328
+
329
+
330
+
331
+ self.model = Model(layer0, layer5)
332
+
333
+ self.model.summary()
334
+
335
+
336
+
337
+
338
+
339
+
340
+
341
+ class Main:
342
+
343
+ def __init__(self):
344
+
345
+ # --- Discriminatorの定義 -----------------
346
+
347
+ self.discriminator = Discriminator().model
348
+
349
+ self.discriminator.compile(
350
+
351
+ optimizer=SGD(learning_rate=1e-4),
352
+
353
+ loss='binary_crossentropy',
354
+
355
+ metrics=['accuracy'] )
356
+
357
+
358
+
359
+ # --- GeneratorとDiscriminatorを連結したモデルの定義 ---
360
+
361
+ self.generator = Generator().model
362
+
363
+ z = Input(shape=(1,1,100))
364
+
365
+ img = self.generator(z)
366
+
367
+ self.discriminator.trainable = False # Discriminatorを更新しないよう設定
368
+
369
+ valid = self.discriminator(img)
370
+
371
+ self.combined = Model(z, valid)
372
+
373
+ self.combined.compile(
374
+
375
+ optimizer=Adam(learning_rate=1e-6),
376
+
377
+ loss='binary_crossentropy',
378
+
379
+ metrics=['accuracy'] )
380
+
381
+
382
+
383
+ # --- MNISTデータセットの用意 ---------------
384
+
385
+ (x_train, t_train), (x_test, t_test) = mnist.load_data()
386
+
387
+ x_train = x_train.reshape(60000, 28, 28, 1)
388
+
389
+ x_test = x_test.reshape(10000, 28, 28, 1)
390
+
391
+ self.x_train = x_train.astype('float32')
392
+
393
+ self.x_test = x_test.astype('float32')
394
+
395
+
396
+
397
+ # --- 学習 -------------------------------------
398
+
399
+ def _train(self, iteration, batch_size):
400
+
401
+ progress = ProgressBar(iteration) # プログレスバーを用意
402
+
403
+ for i in range(iteration):
404
+
405
+ z = np.random.uniform(-1,1,(batch_size//2,1,1,100)) # ノイズベクトルの生成
406
+
407
+ f_img = self.generator.predict(z) # f_img(fake_img)の生成
408
+
409
+ r_img = self.x_train[np.random.randint(0, 60000, batch_size//2)] # r_img(real_img)を読み込み
410
+
411
+ loss_d, acc_d = self.discriminator.train_on_batch(f_img, np.zeros((batch_size//2,1))) # Discriminatorの学習
412
+
413
+ loss_d_, acc_d_ = self.discriminator.train_on_batch(r_img, np.ones( (batch_size//2,1))) # acc_d = Discriminatorのaccuracy
414
+
415
+ acc_d += acc_d_
416
+
417
+
418
+
419
+ z = np.random.uniform(-1,1,(batch_size,1,1,100)) # ノイズベクトルの生成
420
+
421
+ loss_g, acc_g = self.combined.train_on_batch(z, np.ones((batch_size,1))) # Generatorの学習
422
+
423
+ progress.draw(i) # プログレスバーの表示
424
+
425
+ print(" Accuracy=(%f,%f)"%(acc_g, acc_d/2), end="")
426
+
427
+
428
+
429
+ def train(self, iteration, batch_size, epoch):
430
+
431
+ for i in range(epoch):
432
+
433
+ print("Epoch %d/%d\n"%(i+1, epoch))
434
+
435
+ self._train(iteration, batch_size) # _train()をepoch回繰り返します
436
+
437
+
438
+
439
+ # --- 学習が終わった時の確認用に一枚だけ画像を作ります -------
440
+
441
+ def create_image(self):
442
+
443
+ z = np.random.uniform(-1,1,(1,1,1,100))
444
+
445
+ img = self.generator.predict(z)
446
+
447
+ return img.reshape(1,28,28)
448
+
449
+
450
+
451
+
452
+
453
+ if __name__ == "__main__":
454
+
455
+ main = Main()
456
+
457
+ main.train(iteration=1875, batch_size=32, epoch=1)
458
+
459
+
460
+
461
+ # --- 画像を表示 -----------------------
462
+
463
+ img = main.create_image()
464
+
465
+ img = Image.fromarray(np.uint8(img.reshape(28,28) * 255))
466
+
467
+ img.show()
468
+
469
+ img.save("gan_generated_img.png")
470
+
471
+
472
+
473
+ ```