発生している問題・エラーメッセージ
cnnを用いて画像分類を行っています。
エラーの意味分かるのですがどのコードが間違っているか分かりません。おそらくOne-hot表現の部分が間違っているのではないかと思っているのですが、、プログラミング初心者でよくわかりません。よろしくお願いします。
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-86-1097be2f782e> in <module>() 5 validation_data=(X_test, Y_test), 6 verbose=1, ----> 7 batch_size=10) ~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs) 726 max_queue_size=max_queue_size, 727 workers=workers, --> 728 use_multiprocessing=use_multiprocessing) 729 730 def evaluate(self, ~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs) 222 validation_data=validation_data, 223 validation_steps=validation_steps, --> 224 distribution_strategy=strategy) 225 226 total_samples = _get_total_number_of_samples(training_data_adapter) ~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing) 545 max_queue_size=max_queue_size, 546 workers=workers, --> 547 use_multiprocessing=use_multiprocessing) 548 val_adapter = None 549 if validation_data: ~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, shuffle, steps, distribution_strategy, max_queue_size, workers, use_multiprocessing) 592 batch_size=batch_size, 593 check_steps=False, --> 594 steps=steps) 595 adapter = adapter_cls( 596 x, ~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset) 2532 # Check that all arrays have the same length. 2533 if not self._distribution_strategy: -> 2534 training_utils.check_array_lengths(x, y, sample_weights) 2535 if self._is_graph_network and not self.run_eagerly: 2536 # Additional checks to avoid users mistakenly using improper loss fns. ~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py in check_array_lengths(inputs, targets, weights) 675 'the same number of samples as target arrays. ' 676 'Found ' + str(list(set_x)[0]) + ' input samples ' --> 677 'and ' + str(list(set_y)[0]) + ' target samples.') 678 if len(set_w) > 1: 679 raise ValueError('All sample_weight arrays should have ' ValueError: Input arrays should have the same number of samples as target arrays. Found 900 input samples and 9000 target samples.
該当のソースコード
Python3
1#CIFAR10からダウンロードして画像処理と水増し 2#X_train.shape: (900, 32, 32, 3) 3#X_test.shape: (50, 32, 32, 3) 4#y_train.shape: (900, 1) 5#y_test.shape: (50, 1) 6#Kerasが処理できる数値型に X_train と X_test を変換 7X_train = X_train.transpose([0, 3, 1, 2]) 8X_test = X_test.transpose([0, 3, 1, 2]) 9X_train = X_train.astype('f') 10X_test = X_test.astype('f')Python3 11#正規化 12X_train /= X_train.max() 13X_test /= X_test.max() 14#one-hot表現 15from keras.utils import np_utils 16Y_train = np_utils.to_categorical(y_train, num_classes=10).astype('i') 17Y_test = np_utils.to_categorical(y_test, num_classes=10).astype('i') 18print("Y_train.shape: ",Y_train.shape) 19print("Y_test.shape: ",Y_test.shape) 20 21#CNNモデルの定義 22model = Sequential() 23model.add(Conv2D(kernel_size=(3, 3),filters=32,activation='relu',input_shape=input_shape)) 24model.add(Conv2D( kernel_size=(3, 3),filters=64,activation='relu',)) 25model.add(MaxPooling2D(pool_size=(2, 2))) 26model.add(Dropout(0.25)) 27model.add(Dense(units=128, activation='relu')) 28model.add(Dropout(0.50)) 29model.add(Dense(units=num_classes, activation='softmax')) 30model.summary() 31 32model.compile(loss='categorical_crossentropy', 33 metrics=['accuracy']) 34#実行 35hist = model.fit(X_train, 36 Y_train, 37 epochs=100, 38 validation_data=(X_test, Y_test), 39 verbose=1, 40 batch_size=10)
補足情報(FW/ツールのバージョンなど)
jupyter notebook
X_train、Y_train、X_test、Y_testのshapeはどうなっていますか?
X_train.shape: (900, 32, 32, 3)
X_test.shape: (50, 32, 32, 3)
Y_train.shape: (9000,10)
Y_test.shape: (500,10)
です。
Xに対してYの数が10倍になっていますね。(何故ですか?) ここがエラーメッセージ「Found 900 input samples and 9000 target samples.」のことですね。
X_train、y_trainは900ずつなのにどこで増えたんでしょうか??
あと、one-hot表現は訓練データに対して処理するものではないですか?
one-hot表現のやり方が間違っていて10倍になってしまっているのかなあと思っているのですが、分かりません汗
また課題が”正解ラベル(y_trainとy_test)をOne-Hot表現に直す”となっていたのでこちらに対して処理しました。
ああ、10クラスある、ってことですね?
どのようにOne Hotにしたのでしょうか?
課題の内容を(ざっくりとでも)追記されると良いかと思います。
np_utils.to_categorical(y_train, num_classes=10).astype('i')
は他ページのonehot表現の部分ををコピペしました。
追加します。ありがとうございます。
X_train = X_train.transpose([0, 3, 1, 2])
X_test = X_test.transpose([0, 3, 1, 2])
この部分が間違っていたので直したら、エラー改善されました。
しかし以下のようなエラーが出ました。
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-16-1097be2f782e> in <module>()
5 validation_data=(X_test, Y_test),
6 verbose=1,
----> 7 batch_size=10)
~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
726 max_queue_size=max_queue_size,
727 workers=workers,
--> 728 use_multiprocessing=use_multiprocessing)
729
730 def evaluate(self,
~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
222 validation_data=validation_data,
223 validation_steps=validation_steps,
--> 224 distribution_strategy=strategy)
225
226 total_samples = _get_total_number_of_samples(training_data_adapter)
~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing)
545 max_queue_size=max_queue_size,
546 workers=workers,
--> 547 use_multiprocessing=use_multiprocessing)
548 val_adapter = None
549 if validation_data:
~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, shuffle, steps, distribution_strategy, max_queue_size, workers, use_multiprocessing)
592 batch_size=batch_size,
593 check_steps=False,
--> 594 steps=steps)
595 adapter = adapter_cls(
596 x,
~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
2536 # Additional checks to avoid users mistakenly using improper loss fns.
2537 training_utils.check_loss_and_target_compatibility(
-> 2538 y, self._feed_loss_fns, feed_output_shapes)
2539
2540 # If sample weight mode has not been set and weights are None for all the
~\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py in check_loss_and_target_compatibility(targets, loss_fns, output_shapes)
741 raise ValueError('A target array with shape ' + str(y.shape) +
742 ' was passed for an output of shape ' + str(shape) +
--> 743 ' while using as loss `' + loss_name + '`. '
744 'This loss expects targets to have the same shape '
745 'as the output.')
ValueError: A target array with shape (900, 10) was passed for an output of shape (None, 14, 14, 10) while using as loss `categorical_crossentropy`. This loss expects targets to have the same shape as the output.
あなたの回答
tips
プレビュー