質問編集履歴

2

コードを現状のものに更新

2020/12/18 17:10

投稿

MyuW
MyuW

スコア0

test CHANGED
File without changes
test CHANGED
@@ -28,12 +28,12 @@
28
28
 
29
29
  ・TensorFlow2.0.0のKerasライブラリ
30
30
 
31
+ ・Python3.6.9
32
+
31
33
 
32
34
 
33
35
  ### ソースコード(predict_camera.py)
34
36
 
35
-
36
-
37
37
  ```Python
38
38
 
39
39
  import os, sys
@@ -44,12 +44,12 @@
44
44
 
45
45
  import numpy as np
46
46
 
47
+ import tensorflow as tf
48
+
47
49
  from tensorflow.keras.utils import to_categorical
48
50
 
49
51
  from sklearn.model_selection import train_test_split
50
52
 
51
- import tensorflow as tf
52
-
53
53
  import tensorflow.keras.callbacks
54
54
 
55
55
  from tensorflow.keras.applications.vgg16 import VGG16
@@ -60,6 +60,8 @@
60
60
 
61
61
  from tensorflow.keras.optimizers import Nadam
62
62
 
63
+ from tensorflow.keras.callbacks import EarlyStopping
64
+
63
65
 
64
66
 
65
67
  # tpu用
@@ -74,18 +76,22 @@
74
76
 
75
77
 
76
78
 
79
+ tf.compat.v1.disable_v2_behavior()
80
+
81
+ tf.compat.v1.disable_eager_execution()
82
+
77
83
 
78
84
 
79
85
  CATEGORIES = 35
80
86
 
81
87
  frames = 100
82
88
 
83
- channels = 1
84
-
85
89
  rows = 100
86
90
 
87
91
  columns = 100
88
92
 
93
+ channels = 3
94
+
89
95
 
90
96
 
91
97
  folder = ["00", "01", "02"]#, "03", "04", "05", "06", "07", "08", "09",
@@ -134,7 +140,7 @@
134
140
 
135
141
  image = Image.open(file)
136
142
 
137
- #image = image.convert("RGB")
143
+ image = image.convert("RGB")
138
144
 
139
145
  data = np.asarray(image)
140
146
 
@@ -162,21 +168,31 @@
162
168
 
163
169
 
164
170
 
171
+ print(x_train.shape)
172
+
173
+ print(y_train.shape)
174
+
175
+ print(x_test.shape)
176
+
177
+ print(y_test.shape)
178
+
179
+
180
+
165
181
  def build_model():
166
182
 
167
183
  video = Input(shape=(frames,
168
184
 
169
- channels,
170
-
171
185
  rows,
172
186
 
173
- columns))
174
-
175
- cnn_base = VGG16(input_shape=(channels,
176
-
177
- rows,
178
-
179
- columns),
187
+ columns,
188
+
189
+ channels))
190
+
191
+ cnn_base = VGG16(input_shape=(rows,
192
+
193
+ columns,
194
+
195
+ channels),
180
196
 
181
197
  weights="imagenet",
182
198
 
@@ -184,7 +200,7 @@
184
200
 
185
201
  cnn_out = GlobalAveragePooling2D()(cnn_base.output)
186
202
 
187
- cnn = Model(input=cnn_base.input, output=cnn_out)
203
+ cnn = Model(inputs=cnn_base.input, outputs=cnn_out)
188
204
 
189
205
  cnn.trainable = False
190
206
 
@@ -192,9 +208,9 @@
192
208
 
193
209
  encoded_sequence = LSTM(256)(encoded_frames)
194
210
 
195
- hidden_layer = Dense(output_dim=1024, activation="relu")(encoded_sequence)
211
+ hidden_layer = Dense(1024, activation="relu")(encoded_sequence)
196
-
212
+
197
- outputs = Dense(output_dim=CATEGORIES, activation="softmax")(hidden_layer)
213
+ outputs = Dense(CATEGORIES, activation="softmax")(hidden_layer)
198
214
 
199
215
  model = Model([video], outputs)
200
216
 
@@ -222,6 +238,8 @@
222
238
 
223
239
  model = build_model()
224
240
 
241
+
242
+
225
243
  model.summary()
226
244
 
227
245
 
@@ -242,6 +260,8 @@
242
260
 
243
261
  callbacks=[early_stopping])
244
262
 
263
+
264
+
245
265
  evaluation=model.evaluate(x_test, y_test, batch_size=batch, verbose=1)
246
266
 
247
267
 
@@ -252,146 +272,62 @@
252
272
 
253
273
 
254
274
 
255
- そもそもとして正しく入力ができていないのかもしれません。。
256
-
257
- また、メモリが不足してしまうことから、読み込みの時点で前処理を施す必要があると思います。。
258
-
259
-
260
-
261
- 問題点が非常に多い状態での質問になってしまい申し訳ないのですが、何かご指摘いただけると幸いです。
262
-
263
-
264
-
265
- ### 追記
275
+ ### 現状のプログラムの挙動
266
-
267
- jbpb0様にいただいたご指摘をもとに、現状以下のエラーメッセージが返されています。
268
276
 
269
277
  ```
270
278
 
271
- Traceback (most recent call last):
272
-
273
- File "predict_camera.py", line 112, in <module>
274
-
275
- model = build_model()
276
-
277
- File "predict_camera.py", line 82, in build_model
278
-
279
- include_top=False)
280
-
281
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/applications/__init__.py", line 49, in wrapper
282
-
283
- return base_fun(*args, **kwargs)
284
-
285
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/applications/vgg16.py", line 32, in VGG16
286
-
287
- return vgg16.VGG16(*args, **kwargs)
288
-
289
- File "/usr/local/lib/python3.6/dist-packages/keras_applications/vgg16.py", line 112, in VGG16
290
-
291
- name='block1_conv1')(img_input)
292
-
293
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 817, in __call__
294
-
295
- self._maybe_build(inputs)
296
-
297
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 2141, in _maybe_build
298
-
299
- self.build(input_shapes)
300
-
301
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/layers/convolutional.py", line 165, in build
302
-
303
- dtype=self.dtype)
304
-
305
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 522, in add_weight
306
-
307
- aggregation=aggregation)
308
-
309
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/tracking/base.py", line 744, in _add_variable_with_custom_getter
310
-
311
- **kwargs_for_getter)
312
-
313
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 139, in make_variable
314
-
315
- shape=variable_shape if variable_shape else None)
316
-
317
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 258, in __call__
318
-
319
- return cls._variable_v1_call(*args, **kwargs)
320
-
321
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 219, in _variable_v1_call
322
-
323
- shape=shape)
324
-
325
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 197, in <lambda>
326
-
327
- previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
328
-
329
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 2507, in default_variable_creator
330
-
331
- shape=shape)
332
-
333
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 262, in __call__
334
-
335
- return super(VariableMetaclass, cls).__call__(*args, **kwargs)
336
-
337
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 1406, in __init__
338
-
339
- distribute_strategy=distribute_strategy)
340
-
341
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 1537, in _init_from_args
342
-
343
- initial_value() if init_from_fn else initial_value,
344
-
345
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 119, in <lambda>
346
-
347
- init_val = lambda: initializer(shape, dtype=dtype)
348
-
349
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops_v2.py", line 437, in __call__
350
-
351
- return self._random_generator.random_uniform(shape, -limit, limit, dtype)
352
-
353
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops_v2.py", line 800, in random_uniform
354
-
355
- shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
356
-
357
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/random_ops.py", line 238, in random_uniform
358
-
359
- minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
360
-
361
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1184, in convert_to_tensor
362
-
363
- return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
364
-
365
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1242, in convert_to_tensor_v2
366
-
367
- as_ref=False)
368
-
369
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1296, in internal_convert_to_tensor
370
-
371
- ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
372
-
373
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/tensor_conversion_registry.py", line 52, in _default_conversion_function
374
-
375
- return constant_op.constant(value, dtype, name=name)
376
-
377
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 227, in constant
378
-
379
- allow_broadcast=True)
380
-
381
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 235, in _constant_impl
382
-
383
- t = convert_to_eager_tensor(value, ctx, dtype)
384
-
385
- File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 96, in convert_to_eager_tensor
386
-
387
- return ops.EagerTensor(value, ctx.device_name, dtype)
388
-
389
- RuntimeError:
390
-
391
- Additional GRPC error information:
392
-
393
- {"created":"@1608290676.637613791","description":"Error received from peer","file":"external/grpc/src/core/lib/surface/call.cc","file_line":1039,"grpc_message":"","grpc_status":12}
279
+ Model: "model_1"
280
+
281
+ _________________________________________________________________
282
+
283
+ Layer (type) Output Shape Param #
284
+
285
+ =================================================================
286
+
287
+ input_1 (InputLayer) [(None, 100, 100, 100, 3) 0
288
+
289
+ _________________________________________________________________
290
+
291
+ time_distributed (TimeDistri (None, 100, 512) 14714688
292
+
293
+ _________________________________________________________________
294
+
295
+ lstm (LSTM) (None, 256) 787456
296
+
297
+ _________________________________________________________________
298
+
299
+ dense (Dense) (None, 1024) 263168
300
+
301
+ _________________________________________________________________
302
+
303
+ dense_1 (Dense) (None, 35) 35875
304
+
305
+ =================================================================
306
+
307
+ Total params: 15,801,187
308
+
309
+ Trainable params: 1,086,499
310
+
311
+ Non-trainable params: 14,714,688
312
+
313
+ _________________________________________________________________
314
+
315
+ Train on 67 samples, validate on 17 samples
316
+
317
+ Epoch 1/100
318
+
319
+ 2020-12-18 17:01:29.295779: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 8192000000 exceeds 10% of system memory.
320
+
321
+ tcmalloc: large alloc 8192000000 bytes == 0xc62d4000 @ 0x7fd92effbb6b 0x7fd92f01b379 0x7fd9175cfc27 0x7fd9173c2a7f 0x7fd91728e3cb 0x7fd917254526 0x7fd9172553b3 0x7fd917255583 0x7fd91dec45b1 0x7fd9174f5afc 0x7fd9174e8205 0x7fd9175a8811 0x7fd9175a5f08 0x7fd92d8fb6df 0x7fd92e9dd6db 0x7fd92ed1671f
322
+
323
+ tcmalloc: large alloc 3456065536 bytes == 0x2aef54000 @ 0x7fd92f0191e7 0x7fd91b034ab2 0x7fd91da96e8a 0x7fd91de97282 0x7fd91de98afd 0x7fd91dec089e 0x7fd91dec3d76 0x7fd91dec4837 0x7fd9174f5afc 0x7fd9174e8205 0x7fd9175a8811 0x7fd9175a5f08 0x7fd92d8fb6df 0x7fd92e9dd6db 0x7fd92ed1671f
324
+
325
+ 2020-12-18 17:01:43.445971: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 8192000000 exceeds 10% of system memory.
326
+
327
+ tcmalloc: large alloc 8192000000 bytes == 0x2aef54000 @ 0x7fd92effbb6b 0x7fd92f01b379 0x7fd9175cfc27 0x7fd9173c2a7f 0x7fd91728e3cb 0x7fd917254526 0x7fd9172553b3 0x7fd917255583 0x7fd91dec45b1 0x7fd9174f5afc 0x7fd9174e8205 0x7fd9175a8811 0x7fd9175a5f08 0x7fd92d8fb6df 0x7fd92e9dd6db 0x7fd92ed1671f
328
+
329
+ tcmalloc: large alloc 73728262144 bytes == 0x565bca000 @ 0x7fd92f0191e7 0x7fd91b034ab2 0x7fd91da96e8a 0x7fd91de97282 0x7fd91de98afd 0x7fd91dec089e 0x7fd91dec3d76 0x7fd91dec4837 0x7fd9174f5afc 0x7fd9174e8205 0x7fd9175a8811 0x7fd9175a5f08 0x7fd92d8fb6df 0x7fd92e9dd6db 0x7fd92ed1671f
330
+
331
+ ^C (強制停止)
394
332
 
395
333
  ```
396
-
397
- 現状これについて自分の方でも調べを進めている段階です。

1

追記以降を編集

2020/12/18 17:10

投稿

MyuW
MyuW

スコア0

test CHANGED
File without changes
test CHANGED
@@ -252,11 +252,19 @@
252
252
 
253
253
 
254
254
 
255
+ そもそもとして正しく入力ができていないのかもしれません。。
256
+
257
+ また、メモリが不足してしまうことから、読み込みの時点で前処理を施す必要があると思います。。
258
+
259
+
260
+
261
+ 問題点が非常に多い状態での質問になってしまい申し訳ないのですが、何かご指摘いただけると幸いです。
262
+
263
+
264
+
255
- ### エラーメッセージ
265
+ ### 追記
256
-
257
- 現状の読み込み方法では、全ての画像を扱うとメモリが不足してしまうため、00,01,02ディレクトリ内の画像のみを読み込んでいます。
266
+
258
-
259
- その際返されたエラーメッセージがこちらです。
267
+ jbpb0様だいたご指摘をもとに、現状以下のエラーメッセージが返されています。
260
268
 
261
269
  ```
262
270
 
@@ -278,24 +286,112 @@
278
286
 
279
287
  return vgg16.VGG16(*args, **kwargs)
280
288
 
281
- File "/usr/local/lib/python3.6/dist-packages/keras_applications/vgg16.py", line 99, in VGG16
289
+ File "/usr/local/lib/python3.6/dist-packages/keras_applications/vgg16.py", line 112, in VGG16
290
+
282
-
291
+ name='block1_conv1')(img_input)
292
+
293
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 817, in __call__
294
+
295
+ self._maybe_build(inputs)
296
+
297
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 2141, in _maybe_build
298
+
299
+ self.build(input_shapes)
300
+
301
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/layers/convolutional.py", line 165, in build
302
+
303
+ dtype=self.dtype)
304
+
305
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 522, in add_weight
306
+
307
+ aggregation=aggregation)
308
+
309
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/tracking/base.py", line 744, in _add_variable_with_custom_getter
310
+
283
- weights=weights)
311
+ **kwargs_for_getter)
312
+
284
-
313
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 139, in make_variable
314
+
315
+ shape=variable_shape if variable_shape else None)
316
+
317
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 258, in __call__
318
+
319
+ return cls._variable_v1_call(*args, **kwargs)
320
+
285
- File "/usr/local/lib/python3.6/dist-packages/keras_applications/imagenet_utils.py", line 316, in _obtain_input_shape
321
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 219, in _variable_v1_call
322
+
286
-
323
+ shape=shape)
324
+
325
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 197, in <lambda>
326
+
327
+ previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
328
+
329
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 2507, in default_variable_creator
330
+
331
+ shape=shape)
332
+
333
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/variables.py", line 262, in __call__
334
+
335
+ return super(VariableMetaclass, cls).__call__(*args, **kwargs)
336
+
337
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 1406, in __init__
338
+
339
+ distribute_strategy=distribute_strategy)
340
+
341
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 1537, in _init_from_args
342
+
343
+ initial_value() if init_from_fn else initial_value,
344
+
345
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 119, in <lambda>
346
+
287
- '`input_shape=' + str(input_shape) + '`')
347
+ init_val = lambda: initializer(shape, dtype=dtype)
348
+
288
-
349
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops_v2.py", line 437, in __call__
350
+
351
+ return self._random_generator.random_uniform(shape, -limit, limit, dtype)
352
+
353
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops_v2.py", line 800, in random_uniform
354
+
289
- ValueError: The input must have 3 channels; got `input_shape=(1, 100, 100)`
355
+ shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
356
+
357
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/random_ops.py", line 238, in random_uniform
358
+
359
+ minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
360
+
361
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1184, in convert_to_tensor
362
+
363
+ return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
364
+
365
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1242, in convert_to_tensor_v2
366
+
367
+ as_ref=False)
368
+
369
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py", line 1296, in internal_convert_to_tensor
370
+
371
+ ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
372
+
373
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/tensor_conversion_registry.py", line 52, in _default_conversion_function
374
+
375
+ return constant_op.constant(value, dtype, name=name)
376
+
377
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 227, in constant
378
+
379
+ allow_broadcast=True)
380
+
381
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 235, in _constant_impl
382
+
383
+ t = convert_to_eager_tensor(value, ctx, dtype)
384
+
385
+ File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/constant_op.py", line 96, in convert_to_eager_tensor
386
+
387
+ return ops.EagerTensor(value, ctx.device_name, dtype)
388
+
389
+ RuntimeError:
390
+
391
+ Additional GRPC error information:
392
+
393
+ {"created":"@1608290676.637613791","description":"Error received from peer","file":"external/grpc/src/core/lib/surface/call.cc","file_line":1039,"grpc_message":"","grpc_status":12}
290
394
 
291
395
  ```
292
396
 
293
-
294
-
295
- そもそもとし正しく入力がていないのかもしれません
397
+ 現状これについ自分の方も調べを進めている段階です
296
-
297
- また、メモリが不足してしまうことから、読み込みの時点で前処理を施す必要があると思います。。
298
-
299
-
300
-
301
- 問題点が非常に多い状態での質問になってしまい申し訳ないのですが、何かご指摘いただけると幸いです。