質問編集履歴

4

タイトル変更

2019/11/28 06:36

投稿

sinugo
sinugo

スコア5

test CHANGED
@@ -1 +1 @@
1
- 各クラスごとの正解率を出力させたいです、!
1
+ cifar-10 各クラスごとの正解率を出力させる方法
test CHANGED
File without changes

3

説明の修正

2019/11/28 06:36

投稿

sinugo
sinugo

スコア5

test CHANGED
File without changes
test CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
 
4
4
 
5
- ここに質問の内容を詳しく書いてください。
5
+
6
6
 
7
7
  今大学でtensorflowを使った雲画像の自動認識を勉強しています。
8
8
 
@@ -14,6 +14,20 @@
14
14
 
15
15
 
16
16
 
17
+ *現在の出力
18
+
19
+ epoch ●● duration = ●● sec, prediction = ●●
20
+
21
+
22
+
23
+ *実現したい出力
24
+
25
+ epoch ●● class : 0 duration = ●● sec, prediction = ●●
26
+
27
+ epoch ●● class : 1 duration = ●● sec, prediction = ●●
28
+
29
+ epoch ●● class : 2 duration = ●● sec, prediction = ●●
30
+
17
31
 
18
32
 
19
33
 

2

文の修正

2019/11/26 09:05

投稿

sinugo
sinugo

スコア5

test CHANGED
File without changes
test CHANGED
@@ -316,325 +316,23 @@
316
316
 
317
317
  ```
318
318
 
319
- ```python3
319
+
320
-
321
- model.py
322
-
323
-
324
-
325
- # coding: UTF-8
326
-
327
- from __future__ import absolute_import
328
-
329
- from __future__ import division
330
-
331
- from __future__ import print_function
332
-
333
-
334
-
335
- import tensorflow.compat.v1 as tf
336
-
337
- tf.disable_v2_behavior()
338
-
339
-
340
-
341
- NUM_CLASSES = 3
342
-
343
-
344
-
345
-
346
-
347
- def _get_weights(shape, stddev=1.0):
348
-
349
- var = tf.get_variable('weights', shape,
350
-
351
- initializer=tf.truncated_normal_initializer(stddev=stddev))
352
-
353
- return var
354
-
355
-
356
-
357
-
358
-
359
- def _get_biases(shape, value=0.0):
360
-
361
- var = tf.get_variable('biases', shape,
362
-
363
- initializer=tf.constant_initializer(value))
364
-
365
- return var
366
-
367
-
368
-
369
-
370
-
371
- def inference(image_node):
372
-
373
- # conv1
374
-
375
- with tf.variable_scope('conv1') as scope:
376
-
377
- weights = _get_weights(shape=[3, 3, 3, 64], stddev=0.05)
378
-
379
- conv = tf.nn.conv2d(image_node, weights, [1, 1, 1, 1], padding='SAME')
380
-
381
- biases = _get_biases([64], value=0.1)
382
-
383
- bias = tf.nn.bias_add(conv, biases)
384
-
385
- conv1 = tf.nn.relu(bias, name=scope.name)
386
-
387
-
388
-
389
- # pool1
390
-
391
- pool1 = tf.nn.max_pool(conv1, ksize=[1 ,2, 2, 1], strides=[1, 2, 2, 1],
392
-
393
- padding='SAME', name='pool1')
394
-
395
-
396
-
397
- # norm1
398
-
399
- norm1 = tf.nn.lrn(pool1, 4,
400
-
401
- bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
402
-
403
-
404
-
405
- # conv2
406
-
407
- with tf.variable_scope('conv2') as scope:
408
-
409
- weights = _get_weights(shape=[3, 3, 64, 64], stddev=0.05)
410
-
411
- conv = tf.nn.conv2d(norm1, weights, [1, 1, 1, 1], padding='SAME')
412
-
413
- biases = _get_biases([64], value=0.1)
414
-
415
- bias = tf.nn.bias_add(conv, biases)
416
-
417
- conv2 = tf.nn.relu(bias, name=scope.name)
418
-
419
-
420
-
421
- # norm2
422
-
423
- norm2 = tf.nn.lrn(conv2, 4,
424
-
425
- bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
426
-
427
-
428
-
429
- # pool2
430
-
431
- pool2 = tf.nn.max_pool(norm2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
432
-
433
- padding='SAME', name='pool2')
434
-
435
-
436
-
437
- # conv3
438
-
439
- with tf.variable_scope('conv3') as scope:
440
-
441
- weights = _get_weights(shape=[3, 3, 64, 128], stddev=0.05)
442
-
443
- conv = tf.nn.conv2d(pool2, weights, [1, 1, 1, 1], padding='SAME')
444
-
445
- biases = _get_biases([128], value=0.1)
446
-
447
- bias = tf.nn.bias_add(conv, biases)
448
-
449
- conv3 = tf.nn.relu(bias, name=scope.name)
450
-
451
-
452
-
453
- # pool3
454
-
455
- pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
456
-
457
- padding='SAME', name='pool3')
458
-
459
-
460
-
461
- # norm3
462
-
463
- norm3 = tf.nn.lrn(pool3, 4,
464
-
465
- bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm3')
466
-
467
-
468
-
469
-
470
-
471
-
472
-
473
- reshape = tf.reshape(norm3, [1, -1])
474
-
475
- dim = reshape.get_shape()[1].value
476
-
477
-
478
-
479
- # fc3
480
-
481
- with tf.variable_scope('fc3') as scope:
482
-
483
- weights = _get_weights(shape=[dim, 256], stddev=0.05)
484
-
485
- biases = _get_biases([256], value=0.1)
486
-
487
- fc3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
488
-
489
-
490
-
491
- # fc4
492
-
493
- with tf.variable_scope('fc4') as scope:
494
-
495
- weights = _get_weights(shape=[256, 128], stddev=0.05)
496
-
497
- biases = _get_biases([128], value=0.1)
498
-
499
- fc4 = tf.nn.relu(tf.matmul(fc3, weights) + biases, name=scope.name)
500
-
501
-
502
-
503
- # output
504
-
505
- with tf.variable_scope('output') as scope:
506
-
507
- weights = _get_weights(shape=[128, NUM_CLASSES], stddev=1 / 128.0)
508
-
509
- biases = _get_biases([NUM_CLASSES], value=0.0)
510
-
511
- logits = tf.add(tf.matmul(fc4, weights), biases, name='logits')
512
-
513
-
514
-
515
- return logits
516
-
517
- ```
518
-
519
- ```python3
520
-
521
- reader.py
522
-
523
-
524
-
525
- # coding: UTF-8
526
-
527
- from __future__ import absolute_import
528
-
529
- from __future__ import division
530
-
531
- from __future__ import print_function
532
-
533
-
534
-
535
- import os
536
-
537
-
538
-
539
- import numpy as np
540
-
541
-
542
-
543
-
544
-
545
- class Cifar10Record(object):
546
-
547
- width = 32
548
-
549
- height = 32
550
-
551
- depth = 3
552
-
553
-
554
-
555
- def set_label(self, label_byte):
556
-
557
- self.label = np.frombuffer(label_byte, dtype=np.uint8)
558
-
559
-
560
-
561
- def set_image(self, image_bytes):
562
-
563
- byte_buffer = np.frombuffer(image_bytes, dtype=np.int8)
564
-
565
- reshaped_array = np.reshape(byte_buffer,
566
-
567
- [self.depth, self.height, self.width])
568
-
569
- self.byte_array = np.transpose(reshaped_array, [1, 2, 0])
570
-
571
- self.byte_array = self.byte_array.astype(np.float32)
572
-
573
-
574
-
575
- class Cifar10Reader(object):
576
-
577
- def __init__(self, filename):
578
-
579
- if not os.path.exists(filename):
580
-
581
- print(filename + ' is not exist')
582
-
583
- return
584
-
585
-
586
-
587
- self.bytestream = open(filename, mode="rb")
588
-
589
-
590
-
591
- def close(self):
592
-
593
- if not self.bytestream:
594
-
595
- self.bytestream.close()
596
-
597
-
598
-
599
- def read(self, index):
600
-
601
- result = Cifar10Record()
602
-
603
-
604
-
605
- label_bytes = 1
606
-
607
- image_bytes = result.height * result.width * result.depth
608
-
609
- record_bytes = label_bytes + image_bytes
610
-
611
-
612
-
613
- self.bytestream.seek(record_bytes * index, 0)
614
-
615
-
616
-
617
- result.set_label(self.bytestream.read(label_bytes))
618
-
619
- result.set_image(self.bytestream.read(image_bytes))
620
-
621
-
622
-
623
-
624
-
625
- return result
626
-
627
- ```
628
320
 
629
321
 
630
322
 
631
323
  ### 試したこと
632
324
 
325
+ _eval関数をもう1つ作って、
326
+
327
+ prediction = _eval(sess, top_k_op,
328
+
633
- いくつかサイトを調べて自分のプログラムに合うように調整してみたのですが、
329
+ train_placeholder, label_placeholder)
330
+
634
-
331
+ print('epoch %d duration = %d sec, prediction = %.3f'
332
+
333
+ % (epoch, duration, prediction))
334
+
635
- なかなかうまく出力できませんでした、、、
335
+ をfor文で回そとしたのですがうまくきませんでした
636
-
637
-
638
336
 
639
337
  ### 補足情報(FW/ツールのバージョンなど)
640
338
 

1

2019/11/22 08:38

投稿

sinugo
sinugo

スコア5

test CHANGED
@@ -1 +1 @@
1
- 各クラスごとの正解率を出力させたいです、
1
+ 各クラスごとの正解率を出力させたいです、!
test CHANGED
File without changes