質問編集履歴

3

追記

2019/02/23 11:31

投稿

Reach
Reach

スコア733

test CHANGED
File without changes
test CHANGED
@@ -417,3 +417,13 @@
417
417
  netG = tf.contrib.tpu.keras_to_tpu_model(netG, strategy=strategy)
418
418
 
419
419
  ```
420
+
421
+
422
+
423
+ 追記:
424
+
425
+ 複数のモデルを 同時に ColabのTPUでは 登録できないみたいです?
426
+
427
+ 1つを TPUで 別のものは CPUで 実行させるのは
428
+
429
+ 果たして (すべてのモデルを) GPUで 走らせるのとでは どちらが 速いかも 微妙です

2

コード 一部記載

2019/02/23 11:31

投稿

Reach
Reach

スコア733

test CHANGED
File without changes
test CHANGED
@@ -82,4 +82,338 @@
82
82
 
83
83
  改変コードが 字数制限で掲載できません
84
84
 
85
+ ```python
86
+
87
+ import os
88
+
89
+
90
+
91
+ import tensorflow as tf
92
+
93
+ import tensorflow.keras.backend as K
94
+
95
+
96
+
97
+ from tensorflow.contrib.tpu.python.tpu import keras_support
98
+
99
+ K.set_image_data_format('channels_last')
100
+
85
- どうすれば よいのでしょうか?
101
+ channel_axis=-1
102
+
103
+ channel_first = False
104
+
105
+
106
+
107
+
108
+
109
+ from tensorflow.keras.models import Sequential, Model
110
+
111
+ from tensorflow.keras.layers import Conv2D, ZeroPadding2D, BatchNormalization, Input, Dropout
112
+
113
+ from tensorflow.keras.layers import Conv2DTranspose, Reshape, Activation, Cropping2D, Flatten
114
+
115
+ from tensorflow.keras.layers import Concatenate
116
+
117
+ from tensorflow.keras.layers import LeakyReLU
118
+
119
+ from tensorflow.keras.activations import relu
120
+
121
+ from tensorflow.keras.initializers import RandomNormal
122
+
123
+
124
+
125
+
126
+
127
+ def __conv_init(a):
128
+
129
+ print("conv_init", a)
130
+
131
+ k = RandomNormal(0, 0.02)(a)
132
+
133
+ k.conv_weight = True
134
+
135
+ return k
136
+
137
+ conv_init = RandomNormal(0, 0.02)
138
+
139
+ gamma_init = RandomNormal(1., 0.02)
140
+
141
+
142
+
143
+
144
+
145
+ def conv2d(f, *a, **k):
146
+
147
+ return Conv2D(f, kernel_initializer = conv_init, *a, **k)
148
+
149
+ def batchnorm():
150
+
151
+ return BatchNormalization(momentum=0.9, axis=channel_axis, epsilon=1.01e-5,
152
+
153
+ gamma_initializer = gamma_init)
154
+
155
+ def BASIC_D(nc_in, nc_out, ndf, max_layers=3):
156
+
157
+
158
+
159
+ if channel_first:
160
+
161
+ input_a, input_b = Input(shape=(nc_in, None, None)), Input(shape=(nc_out, None, None))
162
+
163
+ else:
164
+
165
+ input_a, input_b = Input(shape=(None, None, nc_in)), Input(shape=(None, None, nc_out))
166
+
167
+ _ = Concatenate(axis=channel_axis)([input_a, input_b])
168
+
169
+ _ = conv2d(ndf, kernel_size=4, strides=2, padding="same", name = 'First') (_)
170
+
171
+ _ = LeakyReLU(alpha=0.2)(_)
172
+
173
+
174
+
175
+ for layer in range(1, max_layers):
176
+
177
+ out_feat = ndf * min(2**layer, 8)
178
+
179
+ _ = conv2d(out_feat, kernel_size=4, strides=2, padding="same",
180
+
181
+ use_bias=False, name = 'pyramid.{0}'.format(layer)
182
+
183
+ ) (_)
184
+
185
+ _ = batchnorm()(_, training=1)
186
+
187
+ _ = LeakyReLU(alpha=0.2)(_)
188
+
189
+
190
+
191
+ out_feat = ndf*min(2**max_layers, 8)
192
+
193
+ _ = ZeroPadding2D(1)(_)
194
+
195
+ _ = conv2d(out_feat, kernel_size=4, use_bias=False, name = 'pyramid_last') (_)
196
+
197
+ _ = batchnorm()(_, training=1)
198
+
199
+ _ = LeakyReLU(alpha=0.2)(_)
200
+
201
+
202
+
203
+ # final layer
204
+
205
+ _ = ZeroPadding2D(1)(_)
206
+
207
+ _ = conv2d(1, kernel_size=4, name = 'final'.format(out_feat, 1),
208
+
209
+ activation = "sigmoid") (_)
210
+
211
+ return Model(inputs=[input_a, input_b], outputs=_)
212
+
213
+
214
+
215
+ def UNET_G(isize, nc_in=3, nc_out=3, ngf=64, fixed_input_size=True):
216
+
217
+ max_nf = 8*ngf
218
+
219
+ def block(x, s, nf_in, use_batchnorm=True, nf_out=None, nf_next=None):
220
+
221
+
222
+
223
+ assert s>=2 and s%2==0
224
+
225
+ if nf_next is None:
226
+
227
+ nf_next = min(nf_in*2, max_nf)
228
+
229
+ if nf_out is None:
230
+
231
+ nf_out = nf_in
232
+
233
+ x = conv2d(nf_next, kernel_size=4, strides=2, use_bias=(not (use_batchnorm and s>2)),
234
+
235
+ padding="same", name = 'conv_{0}'.format(s)) (x)
236
+
237
+ if s>2:
238
+
239
+ if use_batchnorm:
240
+
241
+ x = batchnorm()(x, training=1)
242
+
243
+ x2 = LeakyReLU(alpha=0.2)(x)
244
+
245
+ x2 = block(x2, s//2, nf_next)
246
+
247
+ x = Concatenate(axis=channel_axis)([x, x2])
248
+
249
+ x = Activation("relu")(x)
250
+
251
+ x = Conv2DTranspose(nf_out, kernel_size=4, strides=2, use_bias=not use_batchnorm,
252
+
253
+ kernel_initializer = conv_init,
254
+
255
+ name = 'convt.{0}'.format(s))(x)
256
+
257
+ x = Cropping2D(1)(x)
258
+
259
+ if use_batchnorm:
260
+
261
+ x = batchnorm()(x, training=1)
262
+
263
+ if s <=8:
264
+
265
+ x = Dropout(0.5)(x, training=1)
266
+
267
+ return x
268
+
269
+
270
+
271
+ s = isize if fixed_input_size else None
272
+
273
+ if channel_first:
274
+
275
+ _ = inputs = Input(shape=(nc_in, s, s))
276
+
277
+ else:
278
+
279
+ _ = inputs = Input(shape=(s, s, nc_in))
280
+
281
+ _ = block(_, isize, nc_in, False, nf_out=nc_out, nf_next=ngf)
282
+
283
+ _ = Activation('tanh')(_)
284
+
285
+ return Model(inputs=inputs, outputs=[_])
286
+
287
+
288
+
289
+ nc_in = 3
290
+
291
+ nc_out = 3
292
+
293
+ ngf = 64
294
+
295
+ ndf = 64
296
+
297
+ λ = 10
298
+
299
+
300
+
301
+ loadSize = 256
302
+
303
+ imageSize = 256
304
+
305
+ batchSize = 1
306
+
307
+ lrD = 2e-4
308
+
309
+ lrG = 2e-4
310
+
311
+
312
+
313
+
314
+
315
+ netD = BASIC_D(nc_in, nc_out, ndf)
316
+
317
+ netD.summary()
318
+
319
+
320
+
321
+ from IPython.display import SVG
322
+
323
+
324
+
325
+ netG = UNET_G(imageSize, nc_in, nc_out, ngf)
326
+
327
+ netG.summary()
328
+
329
+
330
+
331
+
332
+
333
+
334
+
335
+
336
+
337
+ from tensorflow.keras.optimizers import RMSprop, SGD, Adam
338
+
339
+
340
+
341
+ real_A = netG.input
342
+
343
+ fake_B = netG.output
344
+
345
+ netG_generate = K.function([real_A], [fake_B])
346
+
347
+ real_B = netD.inputs[1]
348
+
349
+ output_D_real = netD([real_A, real_B])
350
+
351
+ output_D_fake = netD([real_A, fake_B])
352
+
353
+
354
+
355
+ loss_fn = lambda output, target : -K.mean(K.log(output+1e-12)*target+K.log(1-output+1e-12)*(1-target))
356
+
357
+
358
+
359
+ loss_D_real = loss_fn(output_D_real, K.ones_like(output_D_real))
360
+
361
+ loss_D_fake = loss_fn(output_D_fake, K.zeros_like(output_D_fake))
362
+
363
+ loss_G_fake = loss_fn(output_D_fake, K.ones_like(output_D_fake))
364
+
365
+
366
+
367
+
368
+
369
+ loss_L1 = K.mean(K.abs(fake_B-real_B))
370
+
371
+
372
+
373
+ loss_D = loss_D_real +loss_D_fake
374
+
375
+ training_updates = Adam(lr=lrD, beta_1=0.5).get_updates(netD.trainable_weights,[],loss_D)
376
+
377
+ #training_updates = tf.train.AdamOptimizer(learning_rate=lrD, beta1=0.5).get_updates(netD.trainable_weights,[],loss_D)
378
+
379
+ netD_train = K.function([real_A, real_B],[loss_D/2], training_updates)
380
+
381
+
382
+
383
+ loss_G = loss_G_fake + 100 * loss_L1
384
+
385
+ training_updates = Adam(lr=lrG, beta_1=0.5).get_updates(netG.trainable_weights,[], loss_G)
386
+
387
+ #training_updates = tf.train.AdamOptimizer(learning_rate=lrG, beta1=0.5).get_updates(netG.trainable_weights,[], loss_G)
388
+
389
+ netG_train = K.function([real_A, real_B], [loss_G_fake, loss_L1], training_updates)
390
+
391
+
392
+
393
+ try:
394
+
395
+ netG.load_weights('netG.hdf5')
396
+
397
+ netD.load_weights('netD.hdf5')
398
+
399
+ print('重みデータの読み込み完了')
400
+
401
+ except:
402
+
403
+ print('重みデータを読み込めませんでした')
404
+
405
+ pass
406
+
407
+
408
+
409
+ tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
410
+
411
+ tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
412
+
413
+ strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
414
+
415
+ netD = tf.contrib.tpu.keras_to_tpu_model(netD, strategy=strategy)
416
+
417
+ netG = tf.contrib.tpu.keras_to_tpu_model(netG, strategy=strategy)
418
+
419
+ ```

1

追記

2019/02/18 01:18

投稿

Reach
Reach

スコア733

test CHANGED
File without changes
test CHANGED
@@ -80,4 +80,6 @@
80
80
 
81
81
 
82
82
 
83
- ここにより詳細な情報を記してください。
83
+ 改変コードが 字数制限で掲できません
84
+
85
+ どうすれば よいのでしょうか?