手書き画像画像の数字の認識のプログラムを作っています。
しかし、損失が下がらず、認識率も低いまま収束してしまいます。
損失を見た感じ学習をしていない感じです。原因がわからないのでわかる方いましたらご指摘お願いいたします。
発生している問題
Train on 160 samples, validate on 40 samples Epoch 1/30 160/160 [==============================] - 3s 21ms/step - loss: 2.3505 - accuracy: 0.0875 - val_loss: 2.2978 - val_accuracy: 0.1500 Epoch 2/30 160/160 [==============================] - 0s 2ms/step - loss: 2.3065 - accuracy: 0.0875 - val_loss: 2.3016 - val_accuracy: 0.0500 Epoch 3/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2994 - accuracy: 0.1250 - val_loss: 2.3053 - val_accuracy: 0.0500 Epoch 4/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2917 - accuracy: 0.1250 - val_loss: 2.3207 - val_accuracy: 0.0500 Epoch 5/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2993 - accuracy: 0.1250 - val_loss: 2.3048 - val_accuracy: 0.0500 Epoch 6/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2917 - accuracy: 0.1250 - val_loss: 2.2992 - val_accuracy: 0.0500 Epoch 7/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2929 - accuracy: 0.1250 - val_loss: 2.2986 - val_accuracy: 0.0500 Epoch 8/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2885 - accuracy: 0.1250 - val_loss: 2.2991 - val_accuracy: 0.0500 Epoch 9/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2877 - accuracy: 0.1250 - val_loss: 2.3033 - val_accuracy: 0.0500 Epoch 10/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2881 - accuracy: 0.1250 - val_loss: 2.3010 - val_accuracy: 0.0500 Epoch 11/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2890 - accuracy: 0.1250 - val_loss: 2.3040 - val_accuracy: 0.0500 Epoch 12/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2867 - accuracy: 0.1250 - val_loss: 2.2970 - val_accuracy: 0.0500 Epoch 13/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2861 - accuracy: 0.1250 - val_loss: 2.2954 - val_accuracy: 0.0500 Epoch 14/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2862 - accuracy: 0.1250 - val_loss: 2.2940 - val_accuracy: 0.0500 Epoch 15/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2904 - accuracy: 0.1250 - val_loss: 2.2979 - val_accuracy: 0.0500 Epoch 16/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2841 - accuracy: 0.1250 - val_loss: 2.2940 - val_accuracy: 0.0500 Epoch 17/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2855 - accuracy: 0.1312 - val_loss: 2.2938 - val_accuracy: 0.1250 Epoch 18/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2862 - accuracy: 0.1250 - val_loss: 2.2936 - val_accuracy: 0.1250 Epoch 19/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2888 - accuracy: 0.1312 - val_loss: 2.2957 - val_accuracy: 0.1250 Epoch 20/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2861 - accuracy: 0.1312 - val_loss: 2.2975 - val_accuracy: 0.1250 Epoch 21/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2871 - accuracy: 0.1187 - val_loss: 2.2974 - val_accuracy: 0.0500 Epoch 22/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2859 - accuracy: 0.1250 - val_loss: 2.2949 - val_accuracy: 0.0500 Epoch 23/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2853 - accuracy: 0.1312 - val_loss: 2.2952 - val_accuracy: 0.0500 Epoch 24/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2856 - accuracy: 0.1063 - val_loss: 2.2953 - val_accuracy: 0.0500 Epoch 25/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2850 - accuracy: 0.0875 - val_loss: 2.2968 - val_accuracy: 0.0500 Epoch 26/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2849 - accuracy: 0.1250 - val_loss: 2.3000 - val_accuracy: 0.0500 Epoch 27/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2854 - accuracy: 0.1250 - val_loss: 2.2984 - val_accuracy: 0.0500 Epoch 28/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2855 - accuracy: 0.1375 - val_loss: 2.2958 - val_accuracy: 0.1250 Epoch 29/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2843 - accuracy: 0.1312 - val_loss: 2.2929 - val_accuracy: 0.1250 Epoch 30/30 160/160 [==============================] - 0s 2ms/step - loss: 2.2853 - accuracy: 0.1312 - val_loss: 2.2921 - val_accuracy: 0.1250
該当のソースコード
import numpy as np import matplotlib.pyplot as plt import glob,os from sklearn.model_selection import train_test_split import cv2 from keras.layers import Convolution2D, BatchNormalization, Activation, MaxPooling2D, Add, Dropout, Flatten, Dense from keras import optimizers from keras.utils import to_categorical from keras import models from keras import layers x=[] z=[] #画像フォルダのパスを指定 path=os.path.dirname(os.path.abspath('__file__')) in_size=(28,28) out_size=10 path_one = path +'/Dataset/one' path_two = path +'/Dataset/two' path_three= path +'/Dataset/three' path_four = path +'/Dataset/four' file_1=glob.glob(path_one +'/*.jpg') file_2=glob.glob(path_two +'/*.jpg') file_3=glob.glob(path_three +'/*.jpg') file_4=glob.glob(path_four +'/*.jpg') print(len(file_1)) print(len(file_2)) print(len(file_3)) print(len(file_4)) print(len(file_5)) print(len(file_6)) print(len(file_7)) print(len(file_8)) print(len(file_9)) print(len(file_0)) #出力 26 20 23 21 11 19 20 16 22 22 def load_dir_1(path,label): for i in file_1: img=cv2.imread(i) img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img=cv2.resize(img,in_size) img=img/255.0 x.append(img) z.append(label) return [x,z] def load_dir_2(path,label): for i in file_2: img=cv2.imread(i) img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img=cv2.resize(img,in_size) img=img/255.0 x.append(img) z.append(label) return [x,z] def load_dir_3(path,label): for i in file_3: img=cv2.imread(i) img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img=cv2.resize(img,in_size) img=img/255.0 x.append(img) z.append(label) return [x,z] def load_dir_4(path,label): for i in file_4: img=cv2.imread(i) img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img=cv2.resize(img,in_size) img=img/255.0 x.append(img) z.append(label) return [x,z] load_dir_1(path_one,1) load_dir_2(path_two,2) load_dir_3(path_three,3) load_dir_4(path_four,4) x=np.array(x) z=np.array(z) import keras x_train,x_test,z_train,z_test=train_test_split(x,z,test_size=0.2) x_train=x_train.reshape(len(x_train),28,28,1).astype('float32') x_test=x_test.reshape(len(x_test),28,28,1).astype('float32') z_train=keras.utils.np_utils.to_categorical(z_train.astype('int32'),10) z_test=keras.utils.np_utils.to_categorical(z_test.astype('int32'),10) model=models.Sequential() model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1))) model.add(layers.MaxPooling2D((2,2))) model.add(layers.Conv2D(64,(3,3),activation='relu')) model.add(layers.MaxPooling2D(2,2)) model.add(Dropout(0.2)) model.add(layers.Conv2D(64,(3,3),activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(64,activation='relu')) model.add(layers.Dense(10,activation='softmax')) from keras import optimizers model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x_train, z_train, epochs=30, batch_size=32,validation_data=(x_test, z_test))
回答1件
あなたの回答
tips
プレビュー