質問編集履歴
1
追加のコードです。インポートしたライブラリー、前段のコードです。
test
CHANGED
File without changes
|
test
CHANGED
@@ -75,3 +75,213 @@
|
|
75
75
|
AttributeError: 'Sequential' object has no attribute 'predict_classes'
|
76
76
|
|
77
77
|
```
|
78
|
+
|
79
|
+
|
80
|
+
|
81
|
+
|
82
|
+
|
83
|
+
|
84
|
+
|
85
|
+
```Python
|
86
|
+
|
87
|
+
# 数値計算用ライブラリ numpy をインポート
|
88
|
+
|
89
|
+
import numpy as np
|
90
|
+
|
91
|
+
|
92
|
+
|
93
|
+
# データフレームを提供するライブラリ pandas をインポート
|
94
|
+
|
95
|
+
import pandas as pd
|
96
|
+
|
97
|
+
|
98
|
+
|
99
|
+
# 機械学習用ライブラリ sklearn(scikit-learn)内にあるライブラリ から
|
100
|
+
|
101
|
+
# モデル構築(訓練用)/検証データ分割用メソッド train_test_split をインポート
|
102
|
+
|
103
|
+
from sklearn.model_selection import train_test_split
|
104
|
+
|
105
|
+
|
106
|
+
|
107
|
+
# 自分のデータを読み込むために便利なメソッドをインポート
|
108
|
+
|
109
|
+
import os
|
110
|
+
|
111
|
+
import re
|
112
|
+
|
113
|
+
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
|
114
|
+
|
115
|
+
|
116
|
+
|
117
|
+
# ディープラーニング用フレームワーク、TensorFlow のラッパーである
|
118
|
+
|
119
|
+
# keras をインポート
|
120
|
+
|
121
|
+
from tensorflow.keras.models import Sequential, Model, load_model
|
122
|
+
|
123
|
+
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout, BatchNormalization, Activation
|
124
|
+
|
125
|
+
from tensorflow.keras.optimizers import Adam
|
126
|
+
|
127
|
+
from tensorflow.keras.datasets import cifar10
|
128
|
+
|
129
|
+
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
|
130
|
+
|
131
|
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
132
|
+
|
133
|
+
|
134
|
+
|
135
|
+
# 描画用ライブラリをインポート
|
136
|
+
|
137
|
+
import matplotlib.pyplot as plt
|
138
|
+
|
139
|
+
import seaborn as sns
|
140
|
+
|
141
|
+
from matplotlib import gridspec, cm
|
142
|
+
|
143
|
+
|
144
|
+
|
145
|
+
# 混合行列作成用メソッド confusion_matrix をインポート
|
146
|
+
|
147
|
+
from sklearn.metrics import confusion_matrix
|
148
|
+
|
149
|
+
|
150
|
+
|
151
|
+
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
|
152
|
+
|
153
|
+
return [os.path.join(root, f)
|
154
|
+
|
155
|
+
for root, _, files in os.walk(directory) for f in files
|
156
|
+
|
157
|
+
if re.match(r'([\w]+.(?:' + ext + '))', f.lower())]
|
158
|
+
|
159
|
+
```
|
160
|
+
|
161
|
+
```ここに言語を入力
|
162
|
+
|
163
|
+
#モデルの構造を定義
|
164
|
+
|
165
|
+
def define_model():
|
166
|
+
|
167
|
+
model = Sequential()
|
168
|
+
|
169
|
+
model.add(BatchNormalization(input_shape=(96, 96, 3)))
|
170
|
+
|
171
|
+
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1)))
|
172
|
+
|
173
|
+
model.add(BatchNormalization())
|
174
|
+
|
175
|
+
model.add(Activation('relu'))
|
176
|
+
|
177
|
+
model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1)))
|
178
|
+
|
179
|
+
model.add(BatchNormalization())
|
180
|
+
|
181
|
+
model.add(Activation('relu'))
|
182
|
+
|
183
|
+
model.add(MaxPool2D(pool_size=(2,2)))
|
184
|
+
|
185
|
+
model.add(Dropout(0.2))
|
186
|
+
|
187
|
+
|
188
|
+
|
189
|
+
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1)))
|
190
|
+
|
191
|
+
model.add(BatchNormalization())
|
192
|
+
|
193
|
+
model.add(Activation('relu'))
|
194
|
+
|
195
|
+
|
196
|
+
|
197
|
+
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1)))
|
198
|
+
|
199
|
+
model.add(BatchNormalization())
|
200
|
+
|
201
|
+
model.add(Activation('relu'))
|
202
|
+
|
203
|
+
|
204
|
+
|
205
|
+
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1)))
|
206
|
+
|
207
|
+
model.add(BatchNormalization())
|
208
|
+
|
209
|
+
model.add(Activation('relu'))
|
210
|
+
|
211
|
+
model.add(MaxPool2D(pool_size=(2,2)))
|
212
|
+
|
213
|
+
model.add(Dropout(0.2))
|
214
|
+
|
215
|
+
|
216
|
+
|
217
|
+
model.add(Flatten())
|
218
|
+
|
219
|
+
|
220
|
+
|
221
|
+
model.add(Dense(256, activation="relu"))
|
222
|
+
|
223
|
+
model.add(Dropout(0.2))
|
224
|
+
|
225
|
+
model.add(Dense(256, activation="relu"))
|
226
|
+
|
227
|
+
model.add(Dropout(0.2))
|
228
|
+
|
229
|
+
model.add(Dense(2, activation='softmax'))
|
230
|
+
|
231
|
+
|
232
|
+
|
233
|
+
return model
|
234
|
+
|
235
|
+
|
236
|
+
|
237
|
+
model = define_model()
|
238
|
+
|
239
|
+
```
|
240
|
+
|
241
|
+
|
242
|
+
|
243
|
+
```Python
|
244
|
+
|
245
|
+
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.005), metrics=['sparse_categorical_accuracy'])
|
246
|
+
|
247
|
+
|
248
|
+
|
249
|
+
```
|
250
|
+
|
251
|
+
|
252
|
+
|
253
|
+
```Python
|
254
|
+
|
255
|
+
|
256
|
+
|
257
|
+
datagen = ImageDataGenerator(rotation_range=30, width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True)
|
258
|
+
|
259
|
+
datagen.fit(X_train)
|
260
|
+
|
261
|
+
|
262
|
+
|
263
|
+
# 訓練を実行(訓練しながら ImageDataGenerator が機能し、Data Augumentation を行っている)
|
264
|
+
|
265
|
+
batch_size = 10
|
266
|
+
|
267
|
+
valid_samples = 25
|
268
|
+
|
269
|
+
train_samples = len(X_train) - valid_samples
|
270
|
+
|
271
|
+
mc = ModelCheckpoint("cnn_model_02.h5", monitor="val_loss", save_best_only=True, verbose=1)
|
272
|
+
|
273
|
+
es = EarlyStopping(monitor='val_loss', patience=15)
|
274
|
+
|
275
|
+
hist = model.fit_generator(datagen.flow(X_train[:train_samples], y_train[:train_samples], batch_size=batch_size),
|
276
|
+
|
277
|
+
steps_per_epoch= train_samples / batch_size,
|
278
|
+
|
279
|
+
epochs=40,
|
280
|
+
|
281
|
+
callbacks=[mc, es],
|
282
|
+
|
283
|
+
validation_data=datagen.flow(X_train[-valid_samples:], y_train[-valid_samples:], batch_size=batch_size),
|
284
|
+
|
285
|
+
validation_steps=valid_samples / batch_size)
|
286
|
+
|
287
|
+
```
|