質問編集履歴
2
誤字
test
CHANGED
File without changes
|
test
CHANGED
@@ -249,201 +249,3 @@
|
|
249
249
|
以下略
|
250
250
|
|
251
251
|
```
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
修正後
|
256
|
-
|
257
|
-
```
|
258
|
-
|
259
|
-
def inference(images_placeholder, keep_prob):
|
260
|
-
|
261
|
-
""" 予測モデルを作成する関数
|
262
|
-
|
263
|
-
引数:
|
264
|
-
|
265
|
-
images_placeholder: 画像のplaceholder
|
266
|
-
|
267
|
-
keep_prob: dropout率のplace_holder
|
268
|
-
|
269
|
-
返り値:
|
270
|
-
|
271
|
-
y_conv: 各クラスの確率(のようなもの)
|
272
|
-
|
273
|
-
"""
|
274
|
-
|
275
|
-
# 重みを標準偏差0.1の正規分布で初期化
|
276
|
-
|
277
|
-
def weight_variable(shape):
|
278
|
-
|
279
|
-
initial = tf.truncated_normal(shape, stddev=0.1)
|
280
|
-
|
281
|
-
return tf.Variable(initial)
|
282
|
-
|
283
|
-
# バイアスを標準偏差0.1の正規分布で初期化
|
284
|
-
|
285
|
-
def bias_variable(shape):
|
286
|
-
|
287
|
-
initial = tf.constant(0.1, shape=shape)
|
288
|
-
|
289
|
-
return tf.Variable(initial)
|
290
|
-
|
291
|
-
# 畳み込み層の作成
|
292
|
-
|
293
|
-
def conv2d(x, W):
|
294
|
-
|
295
|
-
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
|
296
|
-
|
297
|
-
# プーリング層の作成
|
298
|
-
|
299
|
-
def max_pool_2x2(x):
|
300
|
-
|
301
|
-
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
|
302
|
-
|
303
|
-
strides=[1, 2, 2, 1], padding='SAME')
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
# 入力を28x28x3に変形
|
308
|
-
|
309
|
-
x_image = tf.reshape(images_placeholder, [-1, IMAGE_SIZE, IMAGE_SIZE, 3])
|
310
|
-
|
311
|
-
# 畳み込み層1の作成
|
312
|
-
|
313
|
-
with tf.name_scope('conv1') as scope:
|
314
|
-
|
315
|
-
W_conv1 = weight_variable([5, 5, 3, 32])
|
316
|
-
|
317
|
-
b_conv1 = bias_variable([32])
|
318
|
-
|
319
|
-
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
# プーリング層1の作成
|
324
|
-
|
325
|
-
with tf.name_scope('pool1') as scope:
|
326
|
-
|
327
|
-
h_pool1 = max_pool_2x2(h_conv1)
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
# 畳み込み層2の作成
|
332
|
-
|
333
|
-
with tf.name_scope('conv2') as scope:
|
334
|
-
|
335
|
-
W_conv2 = weight_variable([5, 5, 32, 64])
|
336
|
-
|
337
|
-
b_conv2 = bias_variable([64])
|
338
|
-
|
339
|
-
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
# プーリング層2の作成
|
344
|
-
|
345
|
-
with tf.name_scope('pool2') as scope:
|
346
|
-
|
347
|
-
h_pool2 = max_pool_2x2(h_conv2)
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
# 畳み込み層3の作成
|
352
|
-
|
353
|
-
with tf.name_scope('conv3') as scope:
|
354
|
-
|
355
|
-
W_conv3 = weight_variable([5, 5, 64, 64])
|
356
|
-
|
357
|
-
b_conv3 = bias_variable([64])
|
358
|
-
|
359
|
-
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
# 畳み込み層4の作成
|
364
|
-
|
365
|
-
with tf.name_scope('conv4') as scope:
|
366
|
-
|
367
|
-
W_conv4 = weight_variable([5, 5, 64, 64])
|
368
|
-
|
369
|
-
b_conv4 = bias_variable([64])
|
370
|
-
|
371
|
-
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
# 畳み込み層5の作成
|
376
|
-
|
377
|
-
with tf.name_scope('conv5') as scope:
|
378
|
-
|
379
|
-
W_conv5 = weight_variable([5, 5, 64, 64])
|
380
|
-
|
381
|
-
b_conv5 = bias_variable([64])
|
382
|
-
|
383
|
-
h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5) + b_conv5)
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
# プーリング層3の作成
|
388
|
-
|
389
|
-
with tf.name_scope('pool3') as scope:
|
390
|
-
|
391
|
-
h_pool3 = max_pool_2x2(h_conv5)
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
# 全結合層1の作成
|
396
|
-
|
397
|
-
with tf.name_scope('fc1') as scope:
|
398
|
-
|
399
|
-
W_fc1 = weight_variable([12*12*64, 1024])
|
400
|
-
|
401
|
-
b_fc1 = bias_variable([1024])
|
402
|
-
|
403
|
-
h_pool3_flat = tf.reshape(h_pool3, [-1, 7*7*64])
|
404
|
-
|
405
|
-
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
|
406
|
-
|
407
|
-
# dropoutの設定
|
408
|
-
|
409
|
-
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
# 全結合層2の作成
|
414
|
-
|
415
|
-
with tf.name_scope('fc2') as scope:
|
416
|
-
|
417
|
-
W_fc2 = weight_variable([1024, 1024])
|
418
|
-
|
419
|
-
b_fc2 = bias_variable([1024])
|
420
|
-
|
421
|
-
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
|
422
|
-
|
423
|
-
# dropoutの設定
|
424
|
-
|
425
|
-
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
|
426
|
-
|
427
|
-
# 全結合層3の作成
|
428
|
-
|
429
|
-
with tf.name_scope('fc3') as scope:
|
430
|
-
|
431
|
-
W_fc3 = weight_variable([1024, NUM_CLASSES])
|
432
|
-
|
433
|
-
b_fc3 = bias_variable([NUM_CLASSES])
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
y_conv = tf.matmul(h_fc2_drop, W_fc3) + b_fc3
|
438
|
-
|
439
|
-
# ソフトマックス関数による正規化
|
440
|
-
|
441
|
-
with tf.name_scope('softmax') as scope:
|
442
|
-
|
443
|
-
y_conv=tf.nn.softmax(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
|
444
|
-
|
445
|
-
# 各ラベルの確率のようなものを返す
|
446
|
-
|
447
|
-
return y_con
|
448
|
-
|
449
|
-
```
|
1
修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -249,3 +249,201 @@
|
|
249
249
|
以下略
|
250
250
|
|
251
251
|
```
|
252
|
+
|
253
|
+
|
254
|
+
|
255
|
+
修正後
|
256
|
+
|
257
|
+
```
|
258
|
+
|
259
|
+
def inference(images_placeholder, keep_prob):
|
260
|
+
|
261
|
+
""" 予測モデルを作成する関数
|
262
|
+
|
263
|
+
引数:
|
264
|
+
|
265
|
+
images_placeholder: 画像のplaceholder
|
266
|
+
|
267
|
+
keep_prob: dropout率のplace_holder
|
268
|
+
|
269
|
+
返り値:
|
270
|
+
|
271
|
+
y_conv: 各クラスの確率(のようなもの)
|
272
|
+
|
273
|
+
"""
|
274
|
+
|
275
|
+
# 重みを標準偏差0.1の正規分布で初期化
|
276
|
+
|
277
|
+
def weight_variable(shape):
|
278
|
+
|
279
|
+
initial = tf.truncated_normal(shape, stddev=0.1)
|
280
|
+
|
281
|
+
return tf.Variable(initial)
|
282
|
+
|
283
|
+
# バイアスを標準偏差0.1の正規分布で初期化
|
284
|
+
|
285
|
+
def bias_variable(shape):
|
286
|
+
|
287
|
+
initial = tf.constant(0.1, shape=shape)
|
288
|
+
|
289
|
+
return tf.Variable(initial)
|
290
|
+
|
291
|
+
# 畳み込み層の作成
|
292
|
+
|
293
|
+
def conv2d(x, W):
|
294
|
+
|
295
|
+
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
|
296
|
+
|
297
|
+
# プーリング層の作成
|
298
|
+
|
299
|
+
def max_pool_2x2(x):
|
300
|
+
|
301
|
+
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
|
302
|
+
|
303
|
+
strides=[1, 2, 2, 1], padding='SAME')
|
304
|
+
|
305
|
+
|
306
|
+
|
307
|
+
# 入力を28x28x3に変形
|
308
|
+
|
309
|
+
x_image = tf.reshape(images_placeholder, [-1, IMAGE_SIZE, IMAGE_SIZE, 3])
|
310
|
+
|
311
|
+
# 畳み込み層1の作成
|
312
|
+
|
313
|
+
with tf.name_scope('conv1') as scope:
|
314
|
+
|
315
|
+
W_conv1 = weight_variable([5, 5, 3, 32])
|
316
|
+
|
317
|
+
b_conv1 = bias_variable([32])
|
318
|
+
|
319
|
+
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
|
320
|
+
|
321
|
+
|
322
|
+
|
323
|
+
# プーリング層1の作成
|
324
|
+
|
325
|
+
with tf.name_scope('pool1') as scope:
|
326
|
+
|
327
|
+
h_pool1 = max_pool_2x2(h_conv1)
|
328
|
+
|
329
|
+
|
330
|
+
|
331
|
+
# 畳み込み層2の作成
|
332
|
+
|
333
|
+
with tf.name_scope('conv2') as scope:
|
334
|
+
|
335
|
+
W_conv2 = weight_variable([5, 5, 32, 64])
|
336
|
+
|
337
|
+
b_conv2 = bias_variable([64])
|
338
|
+
|
339
|
+
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
|
340
|
+
|
341
|
+
|
342
|
+
|
343
|
+
# プーリング層2の作成
|
344
|
+
|
345
|
+
with tf.name_scope('pool2') as scope:
|
346
|
+
|
347
|
+
h_pool2 = max_pool_2x2(h_conv2)
|
348
|
+
|
349
|
+
|
350
|
+
|
351
|
+
# 畳み込み層3の作成
|
352
|
+
|
353
|
+
with tf.name_scope('conv3') as scope:
|
354
|
+
|
355
|
+
W_conv3 = weight_variable([5, 5, 64, 64])
|
356
|
+
|
357
|
+
b_conv3 = bias_variable([64])
|
358
|
+
|
359
|
+
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
|
360
|
+
|
361
|
+
|
362
|
+
|
363
|
+
# 畳み込み層4の作成
|
364
|
+
|
365
|
+
with tf.name_scope('conv4') as scope:
|
366
|
+
|
367
|
+
W_conv4 = weight_variable([5, 5, 64, 64])
|
368
|
+
|
369
|
+
b_conv4 = bias_variable([64])
|
370
|
+
|
371
|
+
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
|
372
|
+
|
373
|
+
|
374
|
+
|
375
|
+
# 畳み込み層5の作成
|
376
|
+
|
377
|
+
with tf.name_scope('conv5') as scope:
|
378
|
+
|
379
|
+
W_conv5 = weight_variable([5, 5, 64, 64])
|
380
|
+
|
381
|
+
b_conv5 = bias_variable([64])
|
382
|
+
|
383
|
+
h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5) + b_conv5)
|
384
|
+
|
385
|
+
|
386
|
+
|
387
|
+
# プーリング層3の作成
|
388
|
+
|
389
|
+
with tf.name_scope('pool3') as scope:
|
390
|
+
|
391
|
+
h_pool3 = max_pool_2x2(h_conv5)
|
392
|
+
|
393
|
+
|
394
|
+
|
395
|
+
# 全結合層1の作成
|
396
|
+
|
397
|
+
with tf.name_scope('fc1') as scope:
|
398
|
+
|
399
|
+
W_fc1 = weight_variable([12*12*64, 1024])
|
400
|
+
|
401
|
+
b_fc1 = bias_variable([1024])
|
402
|
+
|
403
|
+
h_pool3_flat = tf.reshape(h_pool3, [-1, 7*7*64])
|
404
|
+
|
405
|
+
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
|
406
|
+
|
407
|
+
# dropoutの設定
|
408
|
+
|
409
|
+
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
|
410
|
+
|
411
|
+
|
412
|
+
|
413
|
+
# 全結合層2の作成
|
414
|
+
|
415
|
+
with tf.name_scope('fc2') as scope:
|
416
|
+
|
417
|
+
W_fc2 = weight_variable([1024, 1024])
|
418
|
+
|
419
|
+
b_fc2 = bias_variable([1024])
|
420
|
+
|
421
|
+
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
|
422
|
+
|
423
|
+
# dropoutの設定
|
424
|
+
|
425
|
+
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
|
426
|
+
|
427
|
+
# 全結合層3の作成
|
428
|
+
|
429
|
+
with tf.name_scope('fc3') as scope:
|
430
|
+
|
431
|
+
W_fc3 = weight_variable([1024, NUM_CLASSES])
|
432
|
+
|
433
|
+
b_fc3 = bias_variable([NUM_CLASSES])
|
434
|
+
|
435
|
+
|
436
|
+
|
437
|
+
y_conv = tf.matmul(h_fc2_drop, W_fc3) + b_fc3
|
438
|
+
|
439
|
+
# ソフトマックス関数による正規化
|
440
|
+
|
441
|
+
with tf.name_scope('softmax') as scope:
|
442
|
+
|
443
|
+
y_conv=tf.nn.softmax(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
|
444
|
+
|
445
|
+
# 各ラベルの確率のようなものを返す
|
446
|
+
|
447
|
+
return y_con
|
448
|
+
|
449
|
+
```
|