質問編集履歴
5
test
CHANGED
File without changes
|
test
CHANGED
@@ -2,4 +2,4 @@
|
|
2
2
|
|
3
3
|
|
4
4
|
|
5
|
-
なぜ
|
5
|
+
なぜ二つも存在するのでしょうか?
|
4
test
CHANGED
File without changes
|
test
CHANGED
@@ -1 +1,5 @@
|
|
1
1
|
conv2dメソッドのpaddingsにはzeroとvalidの二つがありますが、この二つをかえたケースでは、精度に違いは出るのでしょうか?
|
2
|
+
|
3
|
+
|
4
|
+
|
5
|
+
なぜこの二つが存在するのでしょうか?
|
3
test
CHANGED
File without changes
|
test
CHANGED
@@ -1,409 +1 @@
|
|
1
|
-
conv2dメソッドのpaddingsにはzeroとvalidの二つがありますが、この二つをかえた
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
またcaifar10でinceptionresnetv2を実行したとき下記のようなエラーが出ます。
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
```python
|
14
|
-
|
15
|
-
sess = K.get_session()
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
test_image, test_labels = input_data('/Users/tatsuyahagiwara/Downloads/test.tfrecords',128, distort=False) #image=[128,32,32,3] label=[128,10]
|
20
|
-
|
21
|
-
```
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
エラー
|
26
|
-
|
27
|
-
```python
|
28
|
-
|
29
|
-
InvalidArgumentError Traceback (most recent call last)
|
30
|
-
|
31
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/framework/common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn)
|
32
|
-
|
33
|
-
685 graph_def_version, node_def_str, input_shapes, input_tensors,
|
34
|
-
|
35
|
-
--> 686 input_tensors_as_shapes, status)
|
36
|
-
|
37
|
-
687 except errors.InvalidArgumentError as err:
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
|
42
|
-
|
43
|
-
515 compat.as_text(c_api.TF_Message(self.status.status)),
|
44
|
-
|
45
|
-
--> 516 c_api.TF_GetCode(self.status.status))
|
46
|
-
|
47
|
-
517 # Delete the underlying status object from memory otherwise it stays alive
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
InvalidArgumentError: Negative dimension size caused by subtracting 3 from 1 for 'conv2d_349/convolution' (op: 'Conv2D') with input shapes: [128,1,1,320], [3,3,320,384].
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
During handling of the above exception, another exception occurred:
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
ValueError Traceback (most recent call last)
|
60
|
-
|
61
|
-
<ipython-input-8-57784ad80619> in <module>()
|
62
|
-
|
63
|
-
3 test_image, test_labels = input_data('/Users/tatsuyahagiwara/Downloads/test.tfrecords',128, distort=False)
|
64
|
-
|
65
|
-
4 test_model_input = layers.Input(tensor=test_image)
|
66
|
-
|
67
|
-
----> 5 test_model_output = InceptionResNetV2(img_input=test_model_input)
|
68
|
-
|
69
|
-
6 test_model = Model(inputs=test_model_input, outputs=test_model_output, name='inception_resnet_v2')
|
70
|
-
|
71
|
-
7
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
<ipython-input-7-a281b3095889> in InceptionResNetV2(img_input, weights, pooling, classes)
|
76
|
-
|
77
|
-
32
|
78
|
-
|
79
|
-
33 # Mixed 6a (Reduction-A block): 17 x 17 x 1088
|
80
|
-
|
81
|
-
---> 34 branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
|
82
|
-
|
83
|
-
35 branch_1 = conv2d_bn(x, 256, 1)
|
84
|
-
|
85
|
-
36 branch_1 = conv2d_bn(branch_1, 256, 3)
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
<ipython-input-2-1ddc007b0073> in conv2d_bn(x, filters, kernel_size, strides, padding, activation, use_bias, name)
|
90
|
-
|
91
|
-
2 padding='same', activation='relu', use_bias=False, name=None):
|
92
|
-
|
93
|
-
3 x = Conv2D(filters, kernel_size, strides=strides, padding=padding,
|
94
|
-
|
95
|
-
----> 4 use_bias=use_bias, name=name)(x)
|
96
|
-
|
97
|
-
5 if use_bias is False:
|
98
|
-
|
99
|
-
6 bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/keras/engine/topology.py in __call__(self, inputs, **kwargs)
|
104
|
-
|
105
|
-
617
|
106
|
-
|
107
|
-
618 # Actually call the layer, collecting output(s), mask(s), and shape(s).
|
108
|
-
|
109
|
-
--> 619 output = self.call(inputs, **kwargs)
|
110
|
-
|
111
|
-
620 output_mask = self.compute_mask(inputs, previous_mask)
|
112
|
-
|
113
|
-
621
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/keras/layers/convolutional.py in call(self, inputs)
|
118
|
-
|
119
|
-
166 padding=self.padding,
|
120
|
-
|
121
|
-
167 data_format=self.data_format,
|
122
|
-
|
123
|
-
--> 168 dilation_rate=self.dilation_rate)
|
124
|
-
|
125
|
-
169 if self.rank == 3:
|
126
|
-
|
127
|
-
170 outputs = K.conv3d(
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in conv2d(x, kernel, strides, padding, data_format, dilation_rate)
|
132
|
-
|
133
|
-
3333 strides=strides,
|
134
|
-
|
135
|
-
3334 padding=padding,
|
136
|
-
|
137
|
-
-> 3335 data_format=tf_data_format)
|
138
|
-
|
139
|
-
3336
|
140
|
-
|
141
|
-
3337 if data_format == 'channels_first' and tf_data_format == 'NHWC':
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py in convolution(input, filter, padding, strides, dilation_rate, name, data_format)
|
146
|
-
|
147
|
-
780 name=name,
|
148
|
-
|
149
|
-
781 data_format=data_format)
|
150
|
-
|
151
|
-
--> 782 return op(input, filter)
|
152
|
-
|
153
|
-
783
|
154
|
-
|
155
|
-
784
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py in __call__(self, inp, filter)
|
160
|
-
|
161
|
-
868
|
162
|
-
|
163
|
-
869 def __call__(self, inp, filter): # pylint: disable=redefined-builtin
|
164
|
-
|
165
|
-
--> 870 return self.conv_op(inp, filter)
|
166
|
-
|
167
|
-
871
|
168
|
-
|
169
|
-
872
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py in __call__(self, inp, filter)
|
174
|
-
|
175
|
-
520
|
176
|
-
|
177
|
-
521 def __call__(self, inp, filter): # pylint: disable=redefined-builtin
|
178
|
-
|
179
|
-
--> 522 return self.call(inp, filter)
|
180
|
-
|
181
|
-
523
|
182
|
-
|
183
|
-
524
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py in __call__(self, inp, filter)
|
188
|
-
|
189
|
-
204 padding=self.padding,
|
190
|
-
|
191
|
-
205 data_format=self.data_format,
|
192
|
-
|
193
|
-
--> 206 name=self.name)
|
194
|
-
|
195
|
-
207
|
196
|
-
|
197
|
-
208
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/ops/gen_nn_ops.py in conv2d(input, filter, strides, padding, use_cudnn_on_gpu, data_format, dilations, name)
|
202
|
-
|
203
|
-
951 "Conv2D", input=input, filter=filter, strides=strides,
|
204
|
-
|
205
|
-
952 padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
|
206
|
-
|
207
|
-
--> 953 data_format=data_format, dilations=dilations, name=name)
|
208
|
-
|
209
|
-
954 _result = _op.outputs[:]
|
210
|
-
|
211
|
-
955 _inputs_flat = _op.inputs
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
|
216
|
-
|
217
|
-
785 op = g.create_op(op_type_name, inputs, output_types, name=scope,
|
218
|
-
|
219
|
-
786 input_types=input_types, attrs=attr_protos,
|
220
|
-
|
221
|
-
--> 787 op_def=op_def)
|
222
|
-
|
223
|
-
788 return output_structure, op_def.is_stateful, op
|
224
|
-
|
225
|
-
789
|
226
|
-
|
227
|
-
```
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
下記のコードのvalidのところを全てsameに変更するとエラーが消えます。
|
232
|
-
|
233
|
-
個人的には精度に影響が出なければどちらでも良いのですが、ちなみにkerasのバックえんどはtensorflowです
|
234
|
-
|
235
|
-
原因がわからないのですが、何か心当たりがあれば、ご教授お願いします
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
```python
|
244
|
-
|
245
|
-
def InceptionResNetV2(img_input, weights=None,
|
246
|
-
|
247
|
-
pooling=None, classes=10):
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
# Stem block: 35 x 35 x 192
|
252
|
-
|
253
|
-
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
|
254
|
-
|
255
|
-
x = conv2d_bn(x, 32, 3, padding='valid')
|
256
|
-
|
257
|
-
x = conv2d_bn(x, 64, 3)
|
258
|
-
|
259
|
-
x = MaxPooling2D(3, strides=2)(x)
|
260
|
-
|
261
|
-
x = conv2d_bn(x, 80, 1, padding='valid')
|
262
|
-
|
263
|
-
x = conv2d_bn(x, 192, 3, padding='valid')
|
264
|
-
|
265
|
-
x = MaxPooling2D(3, strides=2)(x)
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
# Mixed 5b (Inception-A block): 35 x 35 x 320
|
270
|
-
|
271
|
-
branch_0 = conv2d_bn(x, 96, 1)
|
272
|
-
|
273
|
-
branch_1 = conv2d_bn(x, 48, 1)
|
274
|
-
|
275
|
-
branch_1 = conv2d_bn(branch_1, 64, 5)
|
276
|
-
|
277
|
-
branch_2 = conv2d_bn(x, 64, 1)
|
278
|
-
|
279
|
-
branch_2 = conv2d_bn(branch_2, 96, 3)
|
280
|
-
|
281
|
-
branch_2 = conv2d_bn(branch_2, 96, 3)
|
282
|
-
|
283
|
-
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
|
284
|
-
|
285
|
-
branch_pool = conv2d_bn(branch_pool, 64, 1)
|
286
|
-
|
287
|
-
branches = [branch_0, branch_1, branch_2, branch_pool]
|
288
|
-
|
289
|
-
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
|
290
|
-
|
291
|
-
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
|
296
|
-
|
297
|
-
for block_idx in range(1, 11):
|
298
|
-
|
299
|
-
x = inception_resnet_block(x,
|
300
|
-
|
301
|
-
scale=0.17,
|
302
|
-
|
303
|
-
block_type='block35',
|
304
|
-
|
305
|
-
block_idx=block_idx)
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
|
310
|
-
|
311
|
-
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
|
312
|
-
|
313
|
-
branch_1 = conv2d_bn(x, 256, 1)
|
314
|
-
|
315
|
-
branch_1 = conv2d_bn(branch_1, 256, 3)
|
316
|
-
|
317
|
-
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
|
318
|
-
|
319
|
-
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
|
320
|
-
|
321
|
-
branches = [branch_0, branch_1, branch_pool]
|
322
|
-
|
323
|
-
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
|
328
|
-
|
329
|
-
for block_idx in range(1, 21):
|
330
|
-
|
331
|
-
x = inception_resnet_block(x,
|
332
|
-
|
333
|
-
scale=0.1,
|
334
|
-
|
335
|
-
block_type='block17',
|
336
|
-
|
337
|
-
block_idx=block_idx)
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
|
342
|
-
|
343
|
-
branch_0 = conv2d_bn(x, 256, 1)
|
344
|
-
|
345
|
-
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
|
346
|
-
|
347
|
-
branch_1 = conv2d_bn(x, 256, 1)
|
348
|
-
|
349
|
-
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
|
350
|
-
|
351
|
-
branch_2 = conv2d_bn(x, 256, 1)
|
352
|
-
|
353
|
-
branch_2 = conv2d_bn(branch_2, 288, 3)
|
354
|
-
|
355
|
-
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
|
356
|
-
|
357
|
-
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
|
358
|
-
|
359
|
-
branches = [branch_0, branch_1, branch_2, branch_pool]
|
360
|
-
|
361
|
-
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
|
366
|
-
|
367
|
-
for block_idx in range(1, 10):
|
368
|
-
|
369
|
-
x = inception_resnet_block(x,
|
370
|
-
|
371
|
-
scale=0.2,
|
372
|
-
|
373
|
-
block_type='block8',
|
374
|
-
|
375
|
-
block_idx=block_idx)
|
376
|
-
|
377
|
-
x = inception_resnet_block(x,
|
378
|
-
|
379
|
-
scale=1.,
|
380
|
-
|
381
|
-
activation=None,
|
382
|
-
|
383
|
-
block_type='block8',
|
384
|
-
|
385
|
-
block_idx=10)
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
# Final convolution block: 8 x 8 x 1536
|
390
|
-
|
391
|
-
x = conv2d_bn(x, 1536, 1, name='conv_7b')
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
# Classification block
|
398
|
-
|
399
|
-
x = GlobalAveragePooling2D(name='avg_pool')(x)
|
400
|
-
|
401
|
-
return Dense(classes, activation='softmax', name='predictions')(x)
|
402
|
-
|
403
|
-
```
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
#追記
|
408
|
-
|
409
|
-
ちなみに他のデータだとvalidでも問題なく動作しました。cifar10の中にチャンネル数が1のものが入っているということでしょうか?
|
1
|
+
conv2dメソッドのpaddingsにはzeroとvalidの二つがありますが、この二つをかえたケースでは、精度に違いは出るのでしょうか?
|
2
編集
test
CHANGED
File without changes
|
test
CHANGED
@@ -250,7 +250,7 @@
|
|
250
250
|
|
251
251
|
# Stem block: 35 x 35 x 192
|
252
252
|
|
253
|
-
x = conv2d_bn(img_input, 32, 3, strides=2, padding='
|
253
|
+
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
|
254
254
|
|
255
255
|
x = conv2d_bn(x, 32, 3, padding='valid')
|
256
256
|
|
1
追記
test
CHANGED
File without changes
|
test
CHANGED
@@ -401,3 +401,9 @@
|
|
401
401
|
return Dense(classes, activation='softmax', name='predictions')(x)
|
402
402
|
|
403
403
|
```
|
404
|
+
|
405
|
+
|
406
|
+
|
407
|
+
#追記
|
408
|
+
|
409
|
+
ちなみに他のデータだとvalidでも問題なく動作しました。cifar10の中にチャンネル数が1のものが入っているということでしょうか?
|