質問編集履歴
1
修正
test
CHANGED
@@ -1 +1 @@
|
|
1
|
-
|
1
|
+
passだけしか返さないclassを定義する意味
|
test
CHANGED
@@ -1,52 +1,14 @@
|
|
1
|
-
|
1
|
+
以下のように関数の中でclassを定義していますが、```pass```何も返しません。
|
2
2
|
|
3
|
-
【質問】
|
4
|
-
|
5
|
-
*1箇所目
|
6
|
-
|
7
|
-
```
|
8
|
-
|
9
|
-
# Compute gradients.
|
10
|
-
|
11
|
-
with tf.control_dependencies([loss_averages_op]):
|
12
|
-
|
13
|
-
opt = tf.train.GradientDescentOptimizer(lr)
|
14
|
-
|
15
|
-
|
3
|
+
一方関数の```return```はresultになっています。
|
16
4
|
|
17
5
|
|
18
6
|
|
19
|
-
|
7
|
+
ここで質問があるのですが、
|
20
8
|
|
21
|
-
|
9
|
+
質問1:このclassを定義する意味はなんなのでしょうか?
|
22
10
|
|
23
|
-
```
|
24
|
-
|
25
|
-
勾配を定義し、```compute_gradients```で値を更新しているのですが、```opt.apply_gradients```というのは勾配(opt)に勾配の更新した値を適用しているのでしょうか?
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
*2箇所目
|
30
|
-
|
31
|
-
```
|
32
|
-
|
33
|
-
|
11
|
+
質問2: このコードはcifar10のものですが、```result = CIFAR10Record()```の部分の意味を教えてください。
|
34
|
-
|
35
|
-
train_op = tf.no_op(name='train')
|
36
|
-
|
37
|
-
```
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
tf.control_dependenciesで訓練中の勾配の更新値と重みバイアスの更新値とを結びつけ(依存関係を構築し)ているのでしょうか?これは二つがバラバラに更新されているため、二つを結びつけているのですか?
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
また```tf.no_op(name='train')```というのは何をしているのでしょうか?
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
ご教授いただけないでしょうか?宜しくおねがいします
|
50
12
|
|
51
13
|
|
52
14
|
|
@@ -58,116 +20,52 @@
|
|
58
20
|
|
59
21
|
|
60
22
|
|
23
|
+
```
|
24
|
+
|
25
|
+
def read_cifar10(filename_queue):
|
26
|
+
|
27
|
+
class CIFAR10Record(object):
|
28
|
+
|
29
|
+
pass
|
30
|
+
|
31
|
+
result = CIFAR10Record()
|
32
|
+
|
33
|
+
|
34
|
+
|
35
|
+
label_bytes = 1 # 2 for CIFAR-100
|
36
|
+
|
37
|
+
result.height = 128
|
38
|
+
|
39
|
+
result.width = 128
|
40
|
+
|
41
|
+
result.depth = 3
|
42
|
+
|
43
|
+
image_bytes = result.height * result.width * result.depth # 49152
|
44
|
+
|
45
|
+
|
46
|
+
|
47
|
+
record_bytes = label_bytes + image_bytes
|
48
|
+
|
49
|
+
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) # #<tensorflow.python.ops.io_ops.FixedLengthRecordReader at 0x112c44128>
|
50
|
+
|
51
|
+
result.key, value = reader.read(filename_queue) # # key:<tf.Tensor 'ReaderReadV2:0' shape=() dtype=string> vaule:ReaderReadV2:1'
|
61
52
|
|
62
53
|
|
63
54
|
|
55
|
+
record_bytes = tf.decode_raw(value, tf.uint8) # <tf.Tensor 'DecodeRaw:0' shape=(?,) dtype=uint8>
|
64
56
|
|
65
|
-
該当コード
|
66
|
-
|
67
|
-
```
|
68
|
-
|
69
|
-
def train(total_loss, global_step):
|
70
|
-
|
71
|
-
"""Train CIFAR-10 model.
|
72
|
-
|
73
|
-
Create an optimizer and apply to all trainable variables. Add moving
|
74
|
-
|
75
|
-
average for all trainable variables.
|
76
|
-
|
77
|
-
Args:
|
78
|
-
|
79
|
-
total_loss: Total loss from loss().
|
80
|
-
|
81
|
-
global_step: Integer Variable counting the number of training steps
|
82
|
-
|
83
|
-
processed.
|
84
|
-
|
85
|
-
Returns:
|
86
|
-
|
87
|
-
train_op: op for training.
|
88
|
-
|
89
|
-
"""
|
90
|
-
|
91
|
-
# Variables that affect learning rate.
|
92
|
-
|
93
|
-
|
57
|
+
result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32) # # <tf.Tensor 'Cast:0' shape=(?,) dtype=int32>
|
94
|
-
|
95
|
-
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
|
96
58
|
|
97
59
|
|
98
60
|
|
99
|
-
|
61
|
+
depth_major = tf.reshape(tf.strided_slice(record_bytes, [label_bytes],
|
100
62
|
|
101
|
-
l
|
63
|
+
[label_bytes + image_bytes]),[result.depth, result.height, result.width]) # # <tf.Tensor 'Reshape:0' shape=(3, 128, 128) dtype=uint8>
|
102
64
|
|
103
|
-
global_step,
|
104
|
-
|
105
|
-
decay_steps,
|
106
|
-
|
107
|
-
LEARNING_RATE_DECAY_FACTOR,
|
108
|
-
|
109
|
-
staircase=True)
|
110
|
-
|
111
|
-
tf.s
|
65
|
+
result.uint8image = tf.transpose(depth_major, [1, 2, 0]) # <tf.Tensor 'transpose_1:0' shape=(128, 128, 3) dtype=uint8>
|
112
66
|
|
113
67
|
|
114
68
|
|
115
|
-
# Generate moving averages of all losses and associated summaries.
|
116
|
-
|
117
|
-
loss_averages_op = _add_loss_summaries(total_loss)
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
# Compute gradients.
|
122
|
-
|
123
|
-
with tf.control_dependencies([loss_averages_op]):
|
124
|
-
|
125
|
-
opt = tf.train.GradientDescentOptimizer(lr)
|
126
|
-
|
127
|
-
grads = opt.compute_gradients(total_loss)
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
# Apply gradients.
|
132
|
-
|
133
|
-
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
# Add histograms for trainable variables.
|
138
|
-
|
139
|
-
for var in tf.trainable_variables():
|
140
|
-
|
141
|
-
tf.summary.histogram(var.op.name, var)
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
# Add histograms for gradients.
|
146
|
-
|
147
|
-
for grad, var in grads:
|
148
|
-
|
149
|
-
if grad is not None:
|
150
|
-
|
151
|
-
tf.summary.histogram(var.op.name + '/gradients', grad)
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
# Track the moving averages of all trainable variables.
|
156
|
-
|
157
|
-
variable_averages = tf.train.ExponentialMovingAverage(
|
158
|
-
|
159
|
-
MOVING_AVERAGE_DECAY, global_step)
|
160
|
-
|
161
|
-
variables_averages_op = variable_averages.apply(tf.trainable_variables())
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
|
166
|
-
|
167
|
-
train_op = tf.no_op(name='train')
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
return t
|
69
|
+
return result
|
172
70
|
|
173
71
|
```
|