前提
4種類の画像判別をkerasに沿って、訓練を実行しようとしていたら以下のエラーメッセージが発生しました。
発生している問題・エラーメッセージ
Epoch 1/50 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-174-04e2080d51ec> in <module> 3 validation_data=val_ds, 4 batch_size=32, ----> 5 epochs=50 6 ) 1 frames /usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs) 65 except Exception as e: # pylint: disable=broad-except 66 filtered_tb = _process_traceback_frames(e.__traceback__) ---> 67 raise e.with_traceback(filtered_tb) from None 68 finally: 69 del filtered_tb /usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs) 1145 except Exception as e: # pylint:disable=broad-except 1146 if hasattr(e, "ag_error_metadata"): -> 1147 raise e.ag_error_metadata.to_exception(e) 1148 else: 1149 raise ValueError: in user code: File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function * return step_function(self, iterator) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step ** outputs = model.train_step(data) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step loss = self.compute_loss(x, y, y_pred, sample_weight) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss y, y_pred, sample_weight, regularization_losses=self.losses) File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__ loss_value = loss_obj(y_t, y_p, sample_weight=sw) File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__ losses = call_fn(y_true, y_pred) File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call ** return ag_fn(y_true, y_pred, **self._fn_kwargs) File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1790, in categorical_crossentropy y_true, y_pred, from_logits=from_logits, axis=axis) File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 5083, in categorical_crossentropy target.shape.assert_is_compatible_with(output.shape) ValueError: Shapes (None, 1) and (None, 4) are incompatible
該当のソースコード
python
1#データの分割 2train_ds = tf.keras.preprocessing.image_dataset_from_directory( 3 data_dir, 4 validation_split=0.2, 5 subset="training", 6 seed=123, 7 image_size=(img_height, img_width), 8 batch_size=batch_size) 9val_ds = tf.keras.preprocessing.image_dataset_from_directory( 10 data_dir, 11 validation_split=0.2, 12 subset="validation", 13 seed=123, 14 image_size=(img_height, img_width), 15 batch_size=batch_size) 16 17#標準化 18AUTOTUNE = tf.data.AUTOTUNE 19 20train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE) 21val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) 22 23normalization_layer = layers.experimental.preprocessing.Rescaling(1./255) 24 25normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y)) 26image_batch, labels_batch = next(iter(normalized_ds)) 27first_image = image_batch[0] 28print(np.min(first_image), np.max(first_image)) 29 30num_classes = 4 31 32def conv2d(filters, kernel_size, strides=1, bias_init=1, **kwargs): 33 trunc = TruncatedNormal(mean=0.0, stddev=0.01) 34 cnst = Constant(value=bias_init) 35 return Conv2D( 36 filters, 37 kernel_size, 38 strides=strides, 39 padding='same', 40 activation='relu', 41 kernel_initializer=trunc, 42 bias_initializer=cnst, 43 **kwargs 44 ) 45 46def dense(units, **kwargs): 47 trunc = TruncatedNormal(mean=0.0, stddev=0.01) 48 cnst = Constant(value=1) 49 return Dense( 50 units, 51 activation='tanh', 52 kernel_initializer=trunc, 53 bias_initializer=cnst, 54 **kwargs 55 ) 56 57def AlexNet(): 58 model = Sequential() 59 60 # 第1畳み込み層 61 model.add(conv2d(96, 11, strides=(4,4), bias_init=0, input_shape=(224, 224, 3))) 62 model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) 63 model.add(BatchNormalization()) 64 65 # 第2畳み込み層 66 model.add(conv2d(256, 5, bias_init=1)) 67 model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) 68 model.add(BatchNormalization()) 69 70 # 第3~5畳み込み層 71 model.add(conv2d(384, 3, bias_init=0)) 72 model.add(conv2d(384, 3, bias_init=1)) 73 model.add(conv2d(256, 3, bias_init=1)) 74 model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) 75 model.add(BatchNormalization()) 76 77 # 全結合層 78 model.add(Flatten()) 79 model.add(dense(4096)) 80 model.add(Dropout(0.5)) 81 model.add(dense(4096)) 82 model.add(Dropout(0.5)) 83 84 # 出力層 85 model.add(Dense(4, activation='softmax')) 86 87 model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 88 return model 89 90model = AlexNet() 91 92# モデル構成の確認 93model.summary() 94#訓練 95history = model.fit( 96 train_ds, 97 validation_data=val_ds, 98 batch_size=32, 99 epochs=50 100)

回答1件
あなたの回答
tips
プレビュー
下記のような回答は推奨されていません。
このような回答には修正を依頼しましょう。
また依頼した内容が修正された場合は、修正依頼を取り消すようにしましょう。
2022/10/10 12:55
2022/10/10 15:40 編集
2022/10/11 02:04