現在、製品の不良品を検知するプログラムをGoogle Colaboratoryで作成しようとしています。
VGG16を転移学習して、識別しようとしているのですが現在、損失関数と識別率が以下の通りになっておりまったく学習できていない状態です。
このような現状を解決するアドバイスをいただきたいです。
![
また、val_accuracyの値が変化しないです。
Epoch 1/100 5/5 [==============================] - 5s 1s/step - loss: 0.8709 - accuracy: 0.4800 - val_loss: 0.7470 - val_accuracy: 0.2240 Epoch 2/100 5/5 [==============================] - 4s 982ms/step - loss: 0.6408 - accuracy: 0.6160 - val_loss: 0.9470 - val_accuracy: 0.2000 Epoch 3/100 5/5 [==============================] - 4s 981ms/step - loss: 0.4567 - accuracy: 0.7680 - val_loss: 1.2471 - val_accuracy: 0.2000 Epoch 4/100 5/5 [==============================] - 5s 1s/step - loss: 0.3266 - accuracy: 0.9040 - val_loss: 1.5914 - val_accuracy: 0.2000 Epoch 5/100 5/5 [==============================] - 4s 982ms/step - loss: 0.1663 - accuracy: 0.9920 - val_loss: 1.9080 - val_accuracy: 0.2000
以下コード全文です。
python
1from google.colab import drive 2drive.mount('/content/drive') 3 4import pandas as pd 5import os 6import numpy as np 7from keras.applications.vgg16 import VGG16 8from keras.preprocessing.image import load_img, img_to_array 9from keras.models import Sequential, Model 10from keras import models 11from keras import layers 12from keras import regularizers 13from tensorflow.keras.optimizers import SGD 14from keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout, Activation, Flatten 15from tensorflow.keras.utils import to_categorical 16 17numOfTrainImage = 250 18mini_batch = (int)(numOfTrainImage / 2 / 5) 19max_epoch = 100 20numOfTestImage = 100 21 22#学習用画像とラベルの対応表 23df = pd.read_csv('drive/MyDrive/train.csv', delimiter=',') 24df.head(5) 25 26#学習用画像の読み込み 27dir_name = os.path.join('drive/MyDrive/train_data') 28 29imgarray = [] 30category = [] 31 32for filename in sorted(os.listdir(dir_name)[0:numOfTrainImage]): 33 img = load_img(os.path.join(dir_name, filename), color_mode='rgb', target_size=(300,300)) 34 imgdata = img_to_array(img) 35 imgdata = imgdata / 255.0 36 imgarray.append(imgdata) 37 temp = df[df['id']==filename].target 38 label = df[df['id']==filename].target.values[0] 39 category.append(label) 40 41x_train = np.array(imgarray) 42t_train = to_categorical(np.array(category)) 43p = np.random.permutation(len(x_train)) 44x_train, y_train = x_train[p], t_train[p] 45print("x_train.shape = ", x_train.shape, "t_train.shape = ", t_train.shape) 46 47#モデルの構築 48input_tensor = Input(shape=(300, 300, 3)) 49vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor) 50 51top_model = vgg16.output 52top_model=GlobalAveragePooling2D()(top_model) 53top_model = Flatten(input_shape=vgg16.output_shape[1:])(top_model) 54top_model = Dense(256, activation='relu')(top_model) 55top_model = Dropout(0.5)(top_model) 56top_model = Dense(2, activation='softmax')(top_model) 57network=Model(inputs=vgg16.input, outputs=top_model) 58 59for layer in network.layers[:19]: 60 layer.trainable=False 61 62network.summary() 63 64network.compile(loss='categorical_crossentropy', optimizer=SGD(lr=1e-4, momentum=0.9), metrics=['accuracy']) 65 66history = network.fit(x_train, t_train, epochs=max_epoch, batch_size=mini_batch, validation_split=0.5, shuffle=True) 67 68acc_train = np.array(history.history['accuracy']) 69acc_valid = history.history['val_accuracy'] 70loss_train = history.history['loss'] 71loss_valid = history.history['val_loss'] 72 73f = open('temp_epoch_loss.dat', 'w') 74for n in range(0,len(acc_train)): 75 f.write(str(n) + ' ' + str(acc_train[n]) + ' ' + str(loss_train[n]) + ' ' + str(acc_valid[n]) + ' ' + str(loss_valid[n]) + '\n') 76f.close() 77 78import matplotlib.pyplot as plt 79 80epochs = range(1, len(loss_train) + 1) 81plt.plot(epochs, loss_train, 'bo', label='Training loss') 82plt.plot(epochs, loss_valid, 'b', label='Validatoin loss') 83plt.title('Training and Validation Loss') 84plt.xlabel('Epochs') 85plt.ylabel('Loss') 86plt.legend() 87plt.show() 88 89plt.plot(epochs, acc_train, 'bo', label='Training acc') 90plt.plot(epochs, acc_valid, 'b', label='Validation acc') 91plt.title('Training and Validation Accuracy') 92plt.xlabel('Epochs') 93plt.ylabel('Accuracy') 94plt.legend() 95plt.show() 96 97#テスト画像の読み込み 98dir_name = os.path.join('drive/MyDrive/test_data') 99imgarray = [] 100filenamearray = [] 101for filename in sorted(os.listdir(dir_name)[0:100]): 102 img = load_img(os.path.join(dir_name, filename), color_mode='rgb', target_size=(300,300)) 103 104 imgdata = img_to_array(img) 105 imgdata = imgdata / 255.0 106 imgarray.append(imgdata) 107 filenamearray.append(filename) 108 109x_test = np.array(imgarray) 110 111y_test = network.predict(x_test) 112numOfImages, dummy = y_test.shape 113 114for n in range(0, numOfTestImage): 115 print(filenamearray[n], end='') 116 for k in range(0,2): 117 print('%6.3f' % y_test[n][k], end='') 118 print() 119predict_x=network.predict(x_test) 120y_test = np.argmax(predict_x,axis=1) 121 122f = open('submit.csv', 'w') 123for n in range(0, numOfImages): 124 print(filenamearray[n], y_test[n], sep='\t', file=f) 125f.close()
追記事項
回答1件
あなたの回答
tips
プレビュー