質問編集履歴
1
追加のコードです。インポートしたライブラリー、前段のコードです。
title
CHANGED
File without changes
|
body
CHANGED
@@ -36,4 +36,109 @@
|
|
36
36
|
5 cnf_matrix = confusion_matrix(y_test, y_pred)
|
37
37
|
|
38
38
|
AttributeError: 'Sequential' object has no attribute 'predict_classes'
|
39
|
+
```
|
40
|
+
|
41
|
+
|
42
|
+
|
43
|
+
```Python
|
44
|
+
# 数値計算用ライブラリ numpy をインポート
|
45
|
+
import numpy as np
|
46
|
+
|
47
|
+
# データフレームを提供するライブラリ pandas をインポート
|
48
|
+
import pandas as pd
|
49
|
+
|
50
|
+
# 機械学習用ライブラリ sklearn(scikit-learn)内にあるライブラリ から
|
51
|
+
# モデル構築(訓練用)/検証データ分割用メソッド train_test_split をインポート
|
52
|
+
from sklearn.model_selection import train_test_split
|
53
|
+
|
54
|
+
# 自分のデータを読み込むために便利なメソッドをインポート
|
55
|
+
import os
|
56
|
+
import re
|
57
|
+
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
|
58
|
+
|
59
|
+
# ディープラーニング用フレームワーク、TensorFlow のラッパーである
|
60
|
+
# keras をインポート
|
61
|
+
from tensorflow.keras.models import Sequential, Model, load_model
|
62
|
+
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization, Activation
|
63
|
+
from tensorflow.keras.optimizers import Adam
|
64
|
+
from tensorflow.keras.datasets import cifar10
|
65
|
+
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
|
66
|
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
67
|
+
|
68
|
+
# 描画用ライブラリをインポート
|
69
|
+
import matplotlib.pyplot as plt
|
70
|
+
import seaborn as sns
|
71
|
+
from matplotlib import gridspec, cm
|
72
|
+
|
73
|
+
# 混合行列作成用メソッド confusion_matrix をインポート
|
74
|
+
from sklearn.metrics import confusion_matrix
|
75
|
+
|
76
|
+
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
|
77
|
+
return [os.path.join(root, f)
|
78
|
+
for root, _, files in os.walk(directory) for f in files
|
79
|
+
if re.match(r'([\w]+.(?:' + ext + '))', f.lower())]
|
80
|
+
```
|
81
|
+
```ここに言語を入力
|
82
|
+
#モデルの構造を定義
|
83
|
+
def define_model():
|
84
|
+
model = Sequential()
|
85
|
+
model.add(BatchNormalization(input_shape=(96, 96, 3)))
|
86
|
+
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1)))
|
87
|
+
model.add(BatchNormalization())
|
88
|
+
model.add(Activation('relu'))
|
89
|
+
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1)))
|
90
|
+
model.add(BatchNormalization())
|
91
|
+
model.add(Activation('relu'))
|
92
|
+
model.add(MaxPool2D(pool_size=(2,2)))
|
93
|
+
model.add(Dropout(0.2))
|
94
|
+
|
95
|
+
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1)))
|
96
|
+
model.add(BatchNormalization())
|
97
|
+
model.add(Activation('relu'))
|
98
|
+
|
99
|
+
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1)))
|
100
|
+
model.add(BatchNormalization())
|
101
|
+
model.add(Activation('relu'))
|
102
|
+
|
103
|
+
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1)))
|
104
|
+
model.add(BatchNormalization())
|
105
|
+
model.add(Activation('relu'))
|
106
|
+
model.add(MaxPool2D(pool_size=(2,2)))
|
107
|
+
model.add(Dropout(0.2))
|
108
|
+
|
109
|
+
model.add(Flatten())
|
110
|
+
|
111
|
+
model.add(Dense(256, activation="relu"))
|
112
|
+
model.add(Dropout(0.2))
|
113
|
+
model.add(Dense(256, activation="relu"))
|
114
|
+
model.add(Dropout(0.2))
|
115
|
+
model.add(Dense(2, activation='softmax'))
|
116
|
+
|
117
|
+
return model
|
118
|
+
|
119
|
+
model = define_model()
|
120
|
+
```
|
121
|
+
|
122
|
+
```Python
|
123
|
+
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.005), metrics=['sparse_categorical_accuracy'])
|
124
|
+
|
125
|
+
```
|
126
|
+
|
127
|
+
```Python
|
128
|
+
|
129
|
+
datagen = ImageDataGenerator(rotation_range=30, width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True)
|
130
|
+
datagen.fit(X_train)
|
131
|
+
|
132
|
+
# 訓練を実行(訓練しながら ImageDataGenerator が機能し、Data Augumentation を行っている)
|
133
|
+
batch_size = 10
|
134
|
+
valid_samples = 25
|
135
|
+
train_samples = len(X_train) - valid_samples
|
136
|
+
mc = ModelCheckpoint("cnn_model_02.h5", monitor="val_loss", save_best_only=True, verbose=1)
|
137
|
+
es = EarlyStopping(monitor='val_loss', patience=15)
|
138
|
+
hist = model.fit_generator(datagen.flow(X_train[:train_samples], y_train[:train_samples], batch_size=batch_size),
|
139
|
+
steps_per_epoch= train_samples / batch_size,
|
140
|
+
epochs=40,
|
141
|
+
callbacks=[mc, es],
|
142
|
+
validation_data=datagen.flow(X_train[-valid_samples:], y_train[-valid_samples:], batch_size=batch_size),
|
143
|
+
validation_steps=valid_samples / batch_size)
|
39
144
|
```
|