質問編集履歴
1
修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -1,43 +1,453 @@
|
|
1
|
-
下記のように
|
1
|
+
下記のようにtrain.pyがあります
|
2
|
+
|
3
|
+
|
4
|
+
|
5
|
+
その上で下記コードを実行するとエラーが出ます。
|
6
|
+
|
7
|
+
原理的には合ってるはずなのですが、何故下記エラーが出てしますのでしょうか?
|
8
|
+
|
9
|
+
|
10
|
+
|
11
|
+
修正させていただきました。回答よろし願いします。
|
2
12
|
|
3
13
|
```
|
4
14
|
|
15
|
+
import sys
|
16
|
+
|
5
|
-
cl
|
17
|
+
import pickle
|
18
|
+
|
6
|
-
|
19
|
+
import numpy as np
|
20
|
+
|
7
|
-
|
21
|
+
import tensorflow as tf
|
22
|
+
|
8
|
-
|
23
|
+
import os
|
24
|
+
|
25
|
+
import matplotlib.pyplot as plt
|
26
|
+
|
9
|
-
|
27
|
+
import scipy.misc
|
10
|
-
|
28
|
+
|
11
|
-
|
29
|
+
import time
|
30
|
+
|
31
|
+
|
32
|
+
|
12
|
-
|
33
|
+
from train import DCGAN
|
34
|
+
|
13
|
-
|
35
|
+
from layer import show_all_variables
|
14
|
-
|
15
|
-
|
16
|
-
|
36
|
+
|
37
|
+
|
38
|
+
|
17
|
-
|
39
|
+
def unpickle(file):
|
18
|
-
|
40
|
+
|
19
|
-
|
41
|
+
fp = open(file, 'rb')
|
42
|
+
|
20
|
-
|
43
|
+
if sys.version_info.major == 2:
|
21
|
-
|
22
|
-
|
44
|
+
|
23
|
-
d
|
45
|
+
data = pickle.load(fp)
|
46
|
+
|
24
|
-
|
47
|
+
elif sys.version_info.major == 3:
|
48
|
+
|
49
|
+
data = pickle.load(fp, encoding='latin-1')
|
50
|
+
|
51
|
+
fp.close()
|
52
|
+
|
53
|
+
|
54
|
+
|
25
|
-
|
55
|
+
return data
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
|
60
|
+
|
61
|
+
test= unpickle("train_image.pickle")
|
62
|
+
|
63
|
+
|
64
|
+
|
65
|
+
|
66
|
+
|
67
|
+
X_image=np.array(test)/127.5 - 1
|
68
|
+
|
69
|
+
run_config = tf.ConfigProto()
|
70
|
+
|
71
|
+
run_config.gpu_options.allow_growth=True
|
72
|
+
|
73
|
+
|
74
|
+
|
75
|
+
with tf.Session(config=run_config) as sess:
|
76
|
+
|
77
|
+
dcgan = DCGAN(sess, X_image=X_image, epochs=10, step=20)
|
78
|
+
|
79
|
+
show_all_variables()
|
80
|
+
|
81
|
+
dcgan.train()
|
82
|
+
|
83
|
+
|
84
|
+
|
85
|
+
---------------------------------------------------------------------------
|
86
|
+
|
87
|
+
NameError Traceback (most recent call last)
|
88
|
+
|
89
|
+
<ipython-input-16-fb64556ff7b2> in <module>()
|
90
|
+
|
91
|
+
3
|
92
|
+
|
93
|
+
4 with tf.Session(config=run_config) as sess:
|
94
|
+
|
95
|
+
----> 5 dcgan = DCGAN(sess, X_image=X_image, epochs=10, step=20)
|
96
|
+
|
97
|
+
6 show_all_variables()
|
98
|
+
|
99
|
+
7 dcgan.train()
|
100
|
+
|
101
|
+
|
102
|
+
|
103
|
+
~/DCGAN-tensorflow/train.py in __init__(self, sess, X_image, epochs, step)
|
104
|
+
|
105
|
+
27 self.step = step
|
106
|
+
|
107
|
+
28
|
108
|
+
|
109
|
+
---> 29 self.build_model()
|
110
|
+
|
111
|
+
30
|
112
|
+
|
113
|
+
31
|
114
|
+
|
115
|
+
|
116
|
+
|
117
|
+
~/DCGAN-tensorflow/train.py in build_model(self)
|
118
|
+
|
119
|
+
35 batch_size=64
|
120
|
+
|
121
|
+
36 with tf.variable_scope("discriminator") as scope:
|
122
|
+
|
123
|
+
---> 37 h0 = lrelu(conv2d(image, 64, name='d_h0_conv'))
|
124
|
+
|
125
|
+
38 h1 = lrelu(batch_norm(conv2d(h0, 128, name='d_h1_conv'),'d_bn1'))
|
126
|
+
|
127
|
+
39 h2 = lrelu(batch_norm(conv2d(h1, 256, name='d_h2_conv'),'d_bn2'))
|
128
|
+
|
129
|
+
|
130
|
+
|
131
|
+
NameError: name 'generator' is not defined
|
26
132
|
|
27
133
|
```
|
28
134
|
|
29
135
|
|
30
136
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
137
|
+
|
138
|
+
|
139
|
+
|
140
|
+
|
141
|
+
|
142
|
+
|
143
|
+
|
144
|
+
|
145
|
+
|
146
|
+
|
147
|
+
|
148
|
+
|
149
|
+
|
150
|
+
|
151
|
+
|
152
|
+
|
153
|
+
```
|
154
|
+
|
155
|
+
train.py
|
156
|
+
|
157
|
+
# coding: utf-8
|
158
|
+
|
159
|
+
|
160
|
+
|
161
|
+
# In[1]:
|
162
|
+
|
163
|
+
|
164
|
+
|
165
|
+
|
166
|
+
|
167
|
+
import sys
|
168
|
+
|
169
|
+
import pickle
|
170
|
+
|
171
|
+
import numpy as np
|
172
|
+
|
173
|
+
import tensorflow as tf
|
174
|
+
|
175
|
+
import os
|
176
|
+
|
177
|
+
import matplotlib.pyplot as plt
|
178
|
+
|
179
|
+
import scipy.misc
|
180
|
+
|
181
|
+
import time
|
182
|
+
|
183
|
+
from layer import *
|
184
|
+
|
185
|
+
|
186
|
+
|
187
|
+
|
188
|
+
|
189
|
+
|
190
|
+
|
191
|
+
# In[2]:
|
192
|
+
|
193
|
+
|
194
|
+
|
195
|
+
|
196
|
+
|
197
|
+
class DCGAN(object):
|
198
|
+
|
199
|
+
def __init__(self, sess, X_image, epochs, step):
|
200
|
+
|
201
|
+
self.sess = sess
|
202
|
+
|
203
|
+
self.X_image=X_image
|
204
|
+
|
205
|
+
self.epochs = epochs
|
206
|
+
|
207
|
+
self.step = step
|
208
|
+
|
209
|
+
|
210
|
+
|
211
|
+
self.build_model()
|
212
|
+
|
213
|
+
|
214
|
+
|
215
|
+
|
216
|
+
|
217
|
+
|
218
|
+
|
219
|
+
|
220
|
+
|
221
|
+
def discriminator(self, image):
|
222
|
+
|
223
|
+
batch_size=64
|
224
|
+
|
225
|
+
with tf.variable_scope("discriminator") as scope:
|
226
|
+
|
227
|
+
h0 = lrelu(conv2d(image, 64, name='d_h0_conv'))
|
228
|
+
|
229
|
+
h1 = lrelu(batch_norm(conv2d(h0, 128, name='d_h1_conv'),'d_bn1'))
|
230
|
+
|
231
|
+
h2 = lrelu(batch_norm(conv2d(h1, 256, name='d_h2_conv'),'d_bn2'))
|
232
|
+
|
233
|
+
h3 = lrelu(batch_norm(conv2d(h2, 512, name='d_h3_conv'),'d_bn3')) # shape=(batch_size, 64, 64, 3)
|
234
|
+
|
235
|
+
h4 = linear_d(tf.reshape(h3, [batch_size, -1]),2,'d_h4_lin')
|
236
|
+
|
237
|
+
return h4
|
238
|
+
|
239
|
+
|
240
|
+
|
241
|
+
|
242
|
+
|
243
|
+
|
244
|
+
|
245
|
+
# shape=(batch_size, 64, 64, 3)
|
246
|
+
|
247
|
+
def generator(self, z_):
|
248
|
+
|
249
|
+
batch_size=64
|
250
|
+
|
251
|
+
with tf.variable_scope("generator") as scope:
|
252
|
+
|
253
|
+
# project `z` and reshape
|
254
|
+
|
255
|
+
z, h0_w, h0_b = linear(z_, 64*8*4*4, 'g_h0_lin',with_w=True)
|
256
|
+
|
257
|
+
h0 = tf.nn.relu(batch_norm(tf.reshape(z, [-1, 4, 4, 64*8]), 'g_bn0'))
|
258
|
+
|
259
|
+
h1, h1_w, h1_b = deconv2d(h0, [batch_size, 8, 8, 64*4], name='g_h1', with_w=True)
|
260
|
+
|
261
|
+
h1 = tf.nn.relu(batch_norm(h1, 'g_bn1'))
|
262
|
+
|
263
|
+
h2, h2_w, h2_b = deconv2d(h1, [batch_size, 16, 16, 64*2], name='g_h2', with_w=True)
|
264
|
+
|
265
|
+
h2 = tf.nn.relu(batch_norm(h2, 'g_bn2'))
|
266
|
+
|
267
|
+
h3, h3_w, h3_b = deconv2d(h2, [batch_size, 32, 32, 64*1], name='g_h3', with_w=True)
|
268
|
+
|
269
|
+
h3 = tf.nn.relu(batch_norm(h3, 'g_bn3'))
|
270
|
+
|
271
|
+
h4, h4_w, h4_b = deconv2d(h3, [batch_size, 64, 64, 3], name='g_h4', with_w=True)
|
272
|
+
|
273
|
+
return tf.nn.tanh(h4) #shape=(batch_size, 64, 64, 3)
|
274
|
+
|
275
|
+
|
276
|
+
|
277
|
+
|
278
|
+
|
279
|
+
def sampler(self, z_):# shape=(batch_size, 64, 64, 3)
|
280
|
+
|
281
|
+
batch_size=64
|
282
|
+
|
283
|
+
with tf.variable_scope("generator") as scope:
|
284
|
+
|
285
|
+
# project `z` and reshape
|
286
|
+
|
287
|
+
z= linear(z_, 64*8*4*4,'g_h0_lin')
|
288
|
+
|
289
|
+
h0 = tf.nn.relu(batch_norm(tf.reshape(z, [-1, 4, 4, 64*8]),'g_bn0',train=False))
|
290
|
+
|
291
|
+
h1 = deconv2d(h0, [batch_size, 8, 8, 64*4], name='g_h1')
|
292
|
+
|
293
|
+
h1 = tf.nn.relu(batch_norm(h1,'g_bn1',train=False))
|
294
|
+
|
295
|
+
h2 = deconv2d(h1, [batch_size, 16, 16, 64*2], name='g_h2')
|
296
|
+
|
297
|
+
h2 = tf.nn.relu(batch_norm(h2,'g_bn2',train=False))
|
298
|
+
|
299
|
+
h3 = deconv2d(h2, [batch_size, 32, 32, 64*1], name='g_h3')
|
300
|
+
|
301
|
+
h3 = tf.nn.relu(batch_norm(h3,'g_bn3',train=False))
|
302
|
+
|
303
|
+
h4 = deconv2d(h3, [batch_size, 64, 64, 3], name='g_h4')
|
304
|
+
|
305
|
+
return tf.nn.tanh(h4) #shape=(batch_size, 64, 64, 3)
|
306
|
+
|
307
|
+
|
308
|
+
|
309
|
+
|
310
|
+
|
311
|
+
|
312
|
+
|
313
|
+
def build_model(self):
|
314
|
+
|
315
|
+
self.z = tf.placeholder(tf.float32, [64, 100])
|
316
|
+
|
317
|
+
self.image = tf.placeholder(tf.float32, [64, 64, 64, 3])
|
318
|
+
|
319
|
+
|
320
|
+
|
321
|
+
self.G=self.generator(self.z) #G(z)
|
322
|
+
|
323
|
+
self.D_logits = self.discriminator(self.image) #D(x)
|
324
|
+
|
325
|
+
self.sampler = self.sampler(self.z)
|
326
|
+
|
327
|
+
self.D_logits_ = self.discriminator(self.G) #D(G(z))
|
328
|
+
|
329
|
+
|
330
|
+
|
331
|
+
batch_label=64
|
332
|
+
|
333
|
+
|
334
|
+
|
335
|
+
self.d_loss_real = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.D_logits, labels=tf.ones([batch_label], dtype=tf.int64)))
|
336
|
+
|
337
|
+
self.d_loss_fake = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.zeros([batch_label], dtype=tf.int64)))
|
338
|
+
|
339
|
+
|
340
|
+
|
341
|
+
self.g_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.ones([batch_label], dtype=tf.int64)))
|
342
|
+
|
343
|
+
self.d_loss = self.d_loss_real + self.d_loss_fake
|
344
|
+
|
345
|
+
|
346
|
+
|
347
|
+
self.d_vars = [var for var in tf.trainable_variables() if 'd_' in var.name]
|
348
|
+
|
349
|
+
self.g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name]
|
350
|
+
|
351
|
+
|
352
|
+
|
353
|
+
self.saver=tf.train.Saver()
|
354
|
+
|
355
|
+
|
356
|
+
|
357
|
+
|
358
|
+
|
359
|
+
def train(self):
|
360
|
+
|
361
|
+
g_optim = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(self.g_loss, var_list=self.g_vars)
|
362
|
+
|
363
|
+
d_optim = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(self.d_loss, var_list=self.d_vars)
|
364
|
+
|
365
|
+
|
366
|
+
|
367
|
+
tf.global_variables_initializer().run()
|
368
|
+
|
369
|
+
|
370
|
+
|
371
|
+
sample_z = np.random.uniform(-1, 1, size=(64, 100))
|
372
|
+
|
373
|
+
batch_z = np.random.uniform(-1, 1, [64, 100])
|
374
|
+
|
375
|
+
|
376
|
+
|
377
|
+
sample_files = self.X_image[0:64]
|
378
|
+
|
379
|
+
sample = [sample_file for sample_file in sample_files]
|
380
|
+
|
381
|
+
sample_images = np.array(sample).astype(np.float32)
|
382
|
+
|
383
|
+
|
384
|
+
|
385
|
+
counter=1
|
386
|
+
|
387
|
+
|
388
|
+
|
389
|
+
start_time=time.time()
|
390
|
+
|
391
|
+
for epoch in range(self.epochs):
|
392
|
+
|
393
|
+
batch_idxs= min (len(self.X_image), np.inf) // 64
|
394
|
+
|
395
|
+
for idx in range (0, batch_idxs):
|
396
|
+
|
397
|
+
bacth_files= self.X_image[idx*64:(idx+1)*64]
|
398
|
+
|
399
|
+
batch = [batch_file for batch_file in bacth_files]
|
400
|
+
|
401
|
+
batch_images = np.array(batch).astype(np.float32)
|
402
|
+
|
403
|
+
|
404
|
+
|
405
|
+
self.sess.run(d_optim, feed_dict = {self.z: batch_z, self.image: batch_images})
|
406
|
+
|
407
|
+
self.sess.run(g_optim, feed_dict = {self.z: batch_z})
|
408
|
+
|
409
|
+
|
410
|
+
|
411
|
+
# Run g_optim twice to realize loss value
|
412
|
+
|
413
|
+
self.sess.run(g_optim, feed_dict = {self.z: batch_z})
|
414
|
+
|
415
|
+
errD_fake = self.d_loss_fake.eval({self.z: batch_z })
|
416
|
+
|
417
|
+
errD_real = self.d_loss_real.eval({self.image: batch_images})
|
418
|
+
|
419
|
+
errG = self.g_loss.eval({self.z: batch_z})
|
420
|
+
|
421
|
+
counter += 1
|
422
|
+
|
423
|
+
print("Epoch: [%2d] [%4d/%4d] time:%4.4f, d_loss: %.8f, g_loss: %.8f" % (epoch, idx, batch_idxs,
|
424
|
+
|
425
|
+
time.time()-start_time, errD_fake+errD_real, errG))
|
426
|
+
|
427
|
+
if np.mod(counter, self.step)==1:
|
428
|
+
|
429
|
+
samples, d_loss_sample, g_loss_sample = sess.run([self.sampler, self.d_loss, self.g_loss],
|
430
|
+
|
431
|
+
feed_dict={self.z: sample_z, self.image: sample_images})
|
432
|
+
|
433
|
+
|
434
|
+
|
435
|
+
print("[Sample] d_loss:%.8f, g_loss:%.8f" % (d_loss_sample, g_loss_sample))
|
436
|
+
|
437
|
+
col=8
|
438
|
+
|
439
|
+
rows=[]
|
440
|
+
|
441
|
+
for i in range(8):
|
442
|
+
|
443
|
+
rows.append(np.hstack(samples[col * i + 0:col * i + col]))
|
444
|
+
|
445
|
+
vnari=np.vstack(rows)
|
446
|
+
|
447
|
+
plt.imshow(vnari)
|
448
|
+
|
449
|
+
plt.show()
|
450
|
+
|
451
|
+
|
452
|
+
|
453
|
+
```
|