質問編集履歴
1
現在の状況をアップデートしました。
test
CHANGED
File without changes
|
test
CHANGED
@@ -12,267 +12,285 @@
|
|
12
12
|
|
13
13
|
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
15
|
+
ここで手順3のモデル評価時にValidationデータの予想結果を混合行列を使って可視化したいと思っています。
|
16
|
+
|
17
|
+
sklearnのconfusion_matrix()とmatplotlibを用いて下記Figure1のような混合行列を表示・保存したいです。
|
18
|
+
|
19
|
+
現在Figure2のような状態なのですが、
|
20
|
+
|
21
|
+
・Figure1のようにどの画像で何枚間違えたかを可視化する方法がわからない
|
22
|
+
|
23
|
+
・Figure2のA, Dの部分の見切れた部分をどうにかしたい
|
24
|
+
|
25
|
+
という状況で、この打開策をご教授頂きたく思います。
|
26
|
+
|
27
|
+
![イメージ説明](1a038f9f312d121c0475601309d8bae4.png)
|
28
|
+
|
29
|
+
|
30
|
+
|
31
|
+
現在のコードは下記の通りです。
|
32
|
+
|
33
|
+
|
34
|
+
|
35
|
+
```ここに言語を入力
|
36
|
+
|
37
|
+
#ライブラリのインポート部分は省略
|
38
|
+
|
39
|
+
|
40
|
+
|
41
|
+
program_path = Path(__file__).parent.resolve()
|
42
|
+
|
43
|
+
parent_path = program_path.parent.resolve()
|
44
|
+
|
45
|
+
data_path = parent_path / 'data'
|
46
|
+
|
47
|
+
data_processed_path = data_path / 'processed'
|
48
|
+
|
49
|
+
|
50
|
+
|
51
|
+
train_dir = os.path.join(data_processed_path, 'Train')
|
52
|
+
|
53
|
+
validation_dir = os.path.join(data_processed_path, 'Validation')
|
54
|
+
|
55
|
+
test_dir = os.path.join(data_processed_path, 'Test')
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
label = os.listdir(test_dir)
|
60
|
+
|
61
|
+
n_categories = len(label)
|
62
|
+
|
63
|
+
|
64
|
+
|
65
|
+
n_epochs = 500
|
66
|
+
|
67
|
+
batch_size = 8
|
68
|
+
|
69
|
+
input_image_size = 224
|
70
|
+
|
71
|
+
|
72
|
+
|
73
|
+
def create_vgg16():
|
74
|
+
|
75
|
+
base_model = VGG16(
|
76
|
+
|
77
|
+
include_top = False,
|
78
|
+
|
79
|
+
weights = "imagenet",
|
80
|
+
|
81
|
+
input_shape = None
|
82
|
+
|
83
|
+
)
|
84
|
+
|
85
|
+
|
86
|
+
|
87
|
+
x = base_model.output
|
88
|
+
|
89
|
+
x = GlobalAveragePooling2D()(x)
|
90
|
+
|
91
|
+
x = Dense(1024, activation = 'relu')(x)
|
92
|
+
|
93
|
+
predictions = Dense(n_categories, activation = 'softmax')(x)
|
94
|
+
|
95
|
+
|
96
|
+
|
97
|
+
model = Model(inputs = base_model.input, outputs = predictions)
|
98
|
+
|
99
|
+
|
100
|
+
|
101
|
+
for layer in model.layers[:17]:
|
102
|
+
|
103
|
+
layer.trainable = False
|
104
|
+
|
105
|
+
for layer in model.layers[17:]:
|
106
|
+
|
107
|
+
layer.trainable = True
|
108
|
+
|
109
|
+
|
110
|
+
|
111
|
+
return model
|
112
|
+
|
113
|
+
|
114
|
+
|
115
|
+
model = create_vgg16()
|
116
|
+
|
117
|
+
model.compile(
|
118
|
+
|
119
|
+
optimizer = Adam(),
|
120
|
+
|
121
|
+
loss = 'categorical_crossentropy',
|
122
|
+
|
123
|
+
metrics = ["accuracy"]
|
124
|
+
|
125
|
+
)
|
126
|
+
|
127
|
+
|
128
|
+
|
129
|
+
train_datagen=ImageDataGenerator(
|
130
|
+
|
131
|
+
rescale=1.0/255,
|
132
|
+
|
133
|
+
shear_range=0.2,
|
134
|
+
|
135
|
+
zoom_range=0.2,
|
136
|
+
|
137
|
+
vertical_flip=True,
|
138
|
+
|
139
|
+
horizontal_flip=True,
|
140
|
+
|
141
|
+
height_shift_range=0.5,
|
142
|
+
|
143
|
+
width_shift_range=0.5,
|
144
|
+
|
145
|
+
channel_shift_range=5.0,
|
146
|
+
|
147
|
+
brightness_range=[0.3,1.0],
|
148
|
+
|
149
|
+
fill_mode='nearest
|
150
|
+
|
151
|
+
)
|
152
|
+
|
153
|
+
|
154
|
+
|
155
|
+
validation_datagen=ImageDataGenerator(rescale=1.0/255)
|
156
|
+
|
157
|
+
|
158
|
+
|
159
|
+
train_generator=train_datagen.flow_from_directory(
|
160
|
+
|
161
|
+
train_dir,
|
162
|
+
|
163
|
+
target_size=(input_image_size,input_image_size),
|
164
|
+
|
165
|
+
batch_size=batch_size,
|
166
|
+
|
167
|
+
class_mode='categorical',
|
168
|
+
|
169
|
+
shuffle=True
|
170
|
+
|
171
|
+
)
|
172
|
+
|
173
|
+
|
174
|
+
|
175
|
+
validation_generator=validation_datagen.flow_from_directory(
|
176
|
+
|
177
|
+
validation_dir,
|
178
|
+
|
179
|
+
target_size=(input_image_size,input_image_size),
|
180
|
+
|
181
|
+
batch_size=batch_size,
|
182
|
+
|
183
|
+
class_mode='categorical',
|
184
|
+
|
185
|
+
shuffle=False
|
186
|
+
|
187
|
+
)
|
188
|
+
|
189
|
+
|
190
|
+
|
191
|
+
history=model.fit_generator(
|
192
|
+
|
193
|
+
train_generator,
|
194
|
+
|
195
|
+
epochs=n_epochs,
|
196
|
+
|
197
|
+
verbose=1,
|
198
|
+
|
199
|
+
validation_data=validation_generator
|
200
|
+
|
201
|
+
)
|
202
|
+
|
203
|
+
|
204
|
+
|
205
|
+
#Confution Matrix and Classification Report
|
206
|
+
|
207
|
+
def plot_confusion_matrix(cm, classes, cmap):
|
208
|
+
|
209
|
+
|
210
|
+
|
211
|
+
plt.imshow(cm, cmap=cmap)
|
212
|
+
|
213
|
+
plt.colorbar()
|
214
|
+
|
215
|
+
plt.ylabel('True label')
|
216
|
+
|
217
|
+
plt.xlabel('Predicted label')
|
218
|
+
|
219
|
+
plt.title('Confusion Matrix')
|
220
|
+
|
221
|
+
tick_marks = np.arange(len(classes))
|
222
|
+
|
223
|
+
plt.xticks(tick_marks, classes, rotation=45)
|
224
|
+
|
225
|
+
plt.yticks(tick_marks, classes)
|
226
|
+
|
227
|
+
plt.tight_layout()
|
228
|
+
|
229
|
+
plt.savefig(os.path.join(result_path, 'confuse_' + file_name + '.png'))
|
230
|
+
|
231
|
+
|
232
|
+
|
233
|
+
Y_pred = model.predict_generator(validation_generator)
|
234
|
+
|
235
|
+
y_pred = np.argmax(Y_pred, axis=1)
|
236
|
+
|
237
|
+
true_class = validation_generator.classes
|
238
|
+
|
239
|
+
class_labels = list(validation_generator.class_indices.keys())
|
240
|
+
|
241
|
+
cm = confusion_matrix(true_class, y_pred)
|
242
|
+
|
243
|
+
cmap = plt.cm.Blues
|
244
|
+
|
245
|
+
plot_confusion_matrix(cm, classes=class_labels, cmap=cmap)
|
246
|
+
|
247
|
+
```
|
248
|
+
|
249
|
+
|
250
|
+
|
251
|
+
ちなみに画像データ格納フォルダの階層は次のような感じです。
|
252
|
+
|
253
|
+
|
254
|
+
|
255
|
+
data
|
256
|
+
|
257
|
+
-processed
|
258
|
+
|
259
|
+
--Train
|
260
|
+
|
261
|
+
---A
|
262
|
+
|
263
|
+
----sample01.png
|
264
|
+
|
265
|
+
----sample02.png
|
28
266
|
|
29
267
|
・・・
|
30
268
|
|
31
|
-
--B
|
32
|
-
|
33
|
-
--C
|
34
|
-
|
35
|
-
--D
|
36
|
-
|
37
|
-
-Validation
|
38
|
-
|
39
|
-
--A
|
40
|
-
|
41
|
-
--B
|
42
|
-
|
43
|
-
--C
|
44
|
-
|
45
|
-
--D
|
46
|
-
|
47
|
-
-Test
|
48
|
-
|
49
|
-
--A
|
50
|
-
|
51
|
-
--B
|
52
|
-
|
53
|
-
--C
|
54
|
-
|
55
|
-
--D
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
ここで手順3のモデル評価時にValidationデータの予想結果を混合行列を使って可視化したいと思っています。(accやlossは図示できました)
|
60
|
-
|
61
|
-
confusion_matrix()を使い、引数に実クラスと予想クラスを入れればいいと思うのですが、
|
62
|
-
|
63
|
-
実クラスと予想クラスの抽出方法がわかりません。
|
64
|
-
|
65
|
-
ネットに出てくる例だとImageDataGeneratorを使わずに画像を読み込み、行列に変換してやっているのを見るのですが、
|
66
|
-
|
67
|
-
ImageDataGeneratorを使って画像を読み込んだ際にはどのような処理をすればいいのでしょうか?
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
現在のコードは下記の通りです。
|
72
|
-
|
73
|
-
このコードの下にどのような記述をすればいいのかご教授お願い致します。
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
```ここに言語を入力
|
78
|
-
|
79
|
-
import os
|
80
|
-
|
81
|
-
import random
|
82
|
-
|
83
|
-
from pathlib import Path
|
84
|
-
|
85
|
-
import matplotlib.pyplot as plt
|
86
|
-
|
87
|
-
import numpy as np
|
88
|
-
|
89
|
-
import keras.models
|
90
|
-
|
91
|
-
from keras.models import Model
|
92
|
-
|
93
|
-
from keras.models import Sequential
|
94
|
-
|
95
|
-
from keras.layers import Dense, Input, Activation, Dropout, Flatten
|
96
|
-
|
97
|
-
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
|
98
|
-
|
99
|
-
from keras.layers import MaxPool2D
|
100
|
-
|
101
|
-
from keras.applications.vgg16 import VGG16
|
102
|
-
|
103
|
-
from keras.preprocessing.image import ImageDataGenerator
|
104
|
-
|
105
|
-
from keras.preprocessing.image import img_to_array, load_img
|
106
|
-
|
107
|
-
from keras.optimizers import Adam, RMSprop, SGD
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
program_path = Path(__file__).parent.resolve()
|
112
|
-
|
113
|
-
parent_path = program_path.parent.resolve()
|
114
|
-
|
115
|
-
data_path = parent_path / 'data'
|
116
|
-
|
117
|
-
data_processed_path = data_path / 'processed'
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
train_dir = os.path.join(data_processed_path, 'Train')
|
122
|
-
|
123
|
-
validation_dir = os.path.join(data_processed_path, 'Validation')
|
124
|
-
|
125
|
-
test_dir = os.path.join(data_processed_path, 'Test')
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
label = os.listdir(test_dir)
|
130
|
-
|
131
|
-
n_categories = len(label)
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
n_epochs = 500
|
136
|
-
|
137
|
-
batch_size = 8
|
138
|
-
|
139
|
-
input_image_size = 224
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
def create_vgg16():
|
144
|
-
|
145
|
-
base_model = VGG16(
|
146
|
-
|
147
|
-
include_top = False,
|
148
|
-
|
149
|
-
weights = "imagenet",
|
150
|
-
|
151
|
-
input_shape = None
|
152
|
-
|
153
|
-
)
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
x = base_model.output
|
158
|
-
|
159
|
-
x = GlobalAveragePooling2D()(x)
|
160
|
-
|
161
|
-
x = Dense(1024, activation = 'relu')(x)
|
162
|
-
|
163
|
-
predictions = Dense(n_categories, activation = 'softmax')(x)
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
model = Model(inputs = base_model.input, outputs = predictions)
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
for layer in model.layers[:17]:
|
172
|
-
|
173
|
-
layer.trainable = False
|
174
|
-
|
175
|
-
for layer in model.layers[17:]:
|
176
|
-
|
177
|
-
layer.trainable = True
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
return model
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
model = create_vgg16()
|
186
|
-
|
187
|
-
model.compile(
|
188
|
-
|
189
|
-
optimizer = Adam(),
|
190
|
-
|
191
|
-
loss = 'categorical_crossentropy',
|
192
|
-
|
193
|
-
metrics = ["accuracy"]
|
194
|
-
|
195
|
-
)
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
train_datagen=ImageDataGenerator(
|
200
|
-
|
201
|
-
rescale=1.0/255,
|
202
|
-
|
203
|
-
shear_range=0.2,
|
204
|
-
|
205
|
-
zoom_range=0.2,
|
206
|
-
|
207
|
-
vertical_flip=True,
|
208
|
-
|
209
|
-
horizontal_flip=True,
|
210
|
-
|
211
|
-
height_shift_range=0.5,
|
212
|
-
|
213
|
-
width_shift_range=0.5,
|
214
|
-
|
215
|
-
channel_shift_range=5.0,
|
216
|
-
|
217
|
-
brightness_range=[0.3,1.0],
|
218
|
-
|
219
|
-
fill_mode='nearest
|
220
|
-
|
221
|
-
)
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
validation_datagen=ImageDataGenerator(rescale=1.0/255)
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
train_generator=train_datagen.flow_from_directory(
|
230
|
-
|
231
|
-
train_dir,
|
232
|
-
|
233
|
-
target_size=(input_image_size,input_image_size),
|
234
|
-
|
235
|
-
batch_size=batch_size,
|
236
|
-
|
237
|
-
class_mode='categorical',
|
238
|
-
|
239
|
-
shuffle=True
|
240
|
-
|
241
|
-
)
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
validation_generator=validation_datagen.flow_from_directory(
|
246
|
-
|
247
|
-
validation_dir,
|
248
|
-
|
249
|
-
target_size=(input_image_size,input_image_size),
|
250
|
-
|
251
|
-
batch_size=batch_size,
|
252
|
-
|
253
|
-
class_mode='categorical',
|
254
|
-
|
255
|
-
shuffle=True
|
256
|
-
|
257
|
-
)
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
history=model.fit_generator(
|
262
|
-
|
263
|
-
train_generator,
|
264
|
-
|
265
|
-
epochs=n_epochs,
|
266
|
-
|
267
|
-
verbose=1,
|
268
|
-
|
269
|
-
validation_data=validation_generator
|
270
|
-
|
271
|
-
)
|
272
|
-
|
273
|
-
```
|
274
|
-
|
275
|
-
|
269
|
+
---B
|
270
|
+
|
271
|
+
---C
|
272
|
+
|
273
|
+
---D
|
274
|
+
|
275
|
+
--Validation
|
276
|
+
|
277
|
+
---A
|
278
|
+
|
279
|
+
---B
|
280
|
+
|
281
|
+
---C
|
282
|
+
|
283
|
+
---D
|
284
|
+
|
285
|
+
--Test
|
286
|
+
|
287
|
+
---A
|
288
|
+
|
289
|
+
---B
|
290
|
+
|
291
|
+
---C
|
292
|
+
|
293
|
+
---D
|
276
294
|
|
277
295
|
|
278
296
|
|