前提・実現したいこと
以下のコードを実行したところエラーが出てしまいました。ご教授お願いします。
発生している問題・エラーメッセージ
Traceback (most recent call last): File "sleepingmodel6.py", line 106, in <module> history = model.fit_generator(train_gen,steps_per_epoch=len(train_idx)//batch_size,epochs=10,callbacks=callbacks,validation_data=valid_gen,validation_steps=len(valid_idx)//valid_batch_size) File "/opt/anaconda3/lib/python3.7/site-packages/keras/legacy/interfaces.py", line 91, in wrapper return func(*args, **kwargs) File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 1732, in fit_generator initial_epoch=initial_epoch) File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training_generator.py", line 220, in fit_generator reset_metrics=False) File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 1508, in train_on_batch class_weight=class_weight) File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 579, in _standardize_user_data exception_prefix='input') File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training_utils.py", line 135, in standardize_input_data 'with shape ' + str(data_shape)) ValueError: Error when checking input: expected input_1 to have 4 dimensions, but got array with shape (32, 198, 198)
該当のソースコード
python
1import numpy as np 2import pandas as pd 3 4import os 5for dirname, _, filenames in os.walk('/kaggle/input'): 6 for filename in filenames: 7 print(os.path.join(dirname, filename)) 8 9import glob 10import matplotlib.pyplot as plt 11import seaborn as sns 12from PIL import Image 13from efficientnet.keras import EfficientNetB7 14from sklearn.model_selection import train_test_split 15from keras.utils import to_categorical 16from keras.layers import Input, Dense 17from keras.models import Model 18from keras.callbacks import ModelCheckpoint 19 20DATA_DIR = 's0001/' 21IM_WIDTH = IM_HEIGHT = 198 22TRAIN_TEST_SPLIT = 0.8 23TRAIN_VALID_SPLIT = 0.7 24ID_EYE_MAP = {0: 'close', 1: 'open'} 25EYE_ID_MAP = dict((r, i) for i, r in ID_EYE_MAP.items()) 26 27def parse_filepath(filepath): 28 try: 29 path, filename = os.path.split(filepath) 30 filename, ext = os.path.splitext(filename) 31 _, _, _, _, eyestatus, _, _, _, = filename.split("_") 32 return ID_EYE_MAP[int(eyestatus)] 33 34 except Exception as e: 35 print(filepath) 36 return None, None, None 37 38files = glob.glob(os.path.join(DATA_DIR, "*.png")) 39attributes = list(map(parse_filepath, files)) 40 41df = pd.DataFrame(attributes) 42df['file'] = files 43df.columns = ['eyestatus', 'file'] 44df = df.dropna() 45df['eyestatus_id'] = df['eyestatus'].map(lambda eyestatus: EYE_ID_MAP[eyestatus]) 46 47#print(df.isnull().sum()) 48print(df) 49p = np.random.permutation(len(df)) 50train_up_to = int(len(df) * TRAIN_TEST_SPLIT) 51train_idx = p[:train_up_to] 52test_idx = p[train_up_to:] 53train_up_to = int(train_up_to * TRAIN_VALID_SPLIT) 54train_idx, valid_idx = train_idx[:train_up_to], train_idx[train_up_to:] 55 56def get_data_generator(df, indices, for_training, batch_size=32): 57 58 images, eyestatuses = [], [] 59 while True: 60 for i in indices: 61 r = df.iloc[i] 62 file, eyestatus = r['file'], r['eyestatus_id'] 63 im = Image.open(file) 64 im = im.resize((IM_WIDTH, IM_HEIGHT)) 65 im = np.array(im) / 255.0 66 images.append(im) 67 eyestatuses.append(to_categorical(eyestatus, 2)) 68 if len(images) >= batch_size: 69 yield np.array(images), [np.array(eyestatuses)] 70 images, eyestatuses = [], [] 71 if not for_training: 72 break 73 74input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) 75#input_layer = Reshape((IM_HEIGHT, IM_WIDTH, 3), input_shape = (IM_HEIGHT, IM_WIDTH))(F) 76 77efficient_net = EfficientNetB7(weights='noisy-student',include_top=False,input_tensor = input_layer,pooling='max') 78 79for layer in efficient_net.layers: 80 layer.trainable = True 81 82bottleneck=efficient_net.output 83 84x = Dense(units=128, activation='relu')(bottleneck) 85eyestatus_output = Dense(units=len(EYE_ID_MAP), activation='softmax', name='eyestatus_output')(x) 86 87model = Model(inputs=input_layer, outputs=[eyestatus_output]) 88 89model.compile(optimizer='rmsprop',loss={'eyestatus_output': 'categorical_crossentropy'},loss_weights={'eyestatus_output': 1.},metrics={'eyestatus_output': 'accuracy'}) 90 91#model.summary() 92 93batch_size = 32 94valid_batch_size = 32 95 96train_gen = get_data_generator(df, train_idx, for_training=True, batch_size=batch_size) 97valid_gen = get_data_generator(df, valid_idx, for_training=True, batch_size=valid_batch_size) 98 99callbacks = [ 100 ModelCheckpoint('./model_checkpoint', monitor='val_loss', verbose=1, save_best_only=True, mode='min') 101] 102 103history = model.fit_generator(train_gen,steps_per_epoch=len(train_idx)//batch_size,epochs=10,callbacks=callbacks,validation_data=valid_gen,validation_steps=len(valid_idx)//valid_batch_size) 104
試したこと
次元数の部分でエラーが起きていることはわかるのですが、どう対処すれば良いかわかりません。
補足情報(FW/ツールのバージョンなど)
ここにより詳細な情報を記載してください。

回答1件
あなたの回答
tips
プレビュー
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。
2020/10/27 07:47