前提・実現したいこと
以下のコードを実行したところ、途中で処理が止まり、Segmentation fault: 11と表示されました。解決方法が分かる方はご教授お願いします。
発生している問題・エラーメッセージ
Epoch 1/10 Segmentation fault: 11
該当のソースコード
import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import glob import matplotlib.pyplot as plt import seaborn as sns from PIL import Image from efficientnet.keras import EfficientNetB7 from sklearn.model_selection import train_test_split from keras.utils import to_categorical from keras.layers import Input, Dense from keras.models import Model from keras.callbacks import ModelCheckpoint DATA_DIR = 's0001/' IM_WIDTH = IM_HEIGHT = 100 TRAIN_TEST_SPLIT = 0.8 TRAIN_VALID_SPLIT = 0.7 ID_EYE_MAP = {0: 'close', 1: 'open'} EYE_ID_MAP = dict((r, i) for i, r in ID_EYE_MAP.items()) def parse_filepath(filepath): try: path, filename = os.path.split(filepath) filename, ext = os.path.splitext(filename) _, _, _, _, eyestatus, _, _, _, = filename.split("_") return ID_EYE_MAP[int(eyestatus)] except Exception as e: print(filepath) return None, None, None files = glob.glob(os.path.join(DATA_DIR, "*.png")) attributes = list(map(parse_filepath, files)) df = pd.DataFrame(attributes) df['file'] = files df.columns = ['eyestatus', 'file'] df = df.dropna() df['eyestatus_id'] = df['eyestatus'].map(lambda eyestatus: EYE_ID_MAP[eyestatus]) #print(df.isnull().sum()) print(df) p = np.random.permutation(len(df)) train_up_to = int(len(df) * TRAIN_TEST_SPLIT) train_idx = p[:train_up_to] test_idx = p[train_up_to:] train_up_to = int(train_up_to * TRAIN_VALID_SPLIT) train_idx, valid_idx = train_idx[:train_up_to], train_idx[train_up_to:] def get_data_generator(df, indices, for_training, batch_size=32): images, eyestatuses = [], [] while True: for i in indices: r = df.iloc[i] file, eyestatus = r['file'], r['eyestatus_id'] im = Image.open(file) im = im.convert("RGB") im = im.resize((IM_WIDTH, IM_HEIGHT)) im = np.array(im) / 255.0 images.append(im) eyestatuses.append(to_categorical(eyestatus, 2)) if len(images) >= batch_size: yield np.array(images), [np.array(eyestatuses)] images, eyestatuses = [], [] if not for_training: break input_layer = Input(shape=(IM_HEIGHT, IM_WIDTH, 3)) efficient_net = EfficientNetB7(weights='noisy-student',include_top=False,input_tensor = input_layer,pooling='max') for layer in efficient_net.layers: layer.trainable = True bottleneck=efficient_net.output x = Dense(units=128, activation='relu')(bottleneck) eyestatus_output = Dense(units=len(EYE_ID_MAP), activation='softmax', name='eyestatus_output')(x) model = Model(inputs=input_layer, outputs=[eyestatus_output]) model.compile(optimizer='rmsprop',loss={'eyestatus_output': 'categorical_crossentropy'},loss_weights={'eyestatus_output': 1.},metrics={'eyestatus_output': 'accuracy'}) #model.summary() batch_size = 10 valid_batch_size = 10 #print(len(valid_idx)//valid_batch_size) #print(len(train_idx)//batch_size) train_gen = get_data_generator(df, train_idx, for_training=True, batch_size=batch_size) valid_gen = get_data_generator(df, valid_idx, for_training=True, batch_size=valid_batch_size) callbacks = [ ModelCheckpoint('./model_checkpoint', monitor='val_loss', verbose=1, save_best_only=True, mode='min') ] history = model.fit_generator(train_gen,steps_per_epoch=len(train_idx)//batch_size,epochs=10,callbacks=callbacks,validation_data=valid_gen,validation_steps=len(valid_idx)//valid_batch_size)
試したこと
メモリ不足によるものかと思ったので、バッチサイズを32→10、画像サイズを198→100に変更してみました。
少しは処理が進みましたが、途中で止まってしまいました。
Jupiter notebookでも実行してみたところ、
The kernel appears to have died. It will restart automatically.
というものが表示され、処理が止まってしまいます。
補足情報(FW/ツールのバージョンなど)
プロセッサ:1.6 GHz デュアルコアIntel Core i5
メモリ:8 GB 2133 MHz LPDDR3

回答2件
あなたの回答
tips
プレビュー