回答編集履歴

1

コード追加

2021/09/20 08:15

投稿

jbpb0
jbpb0

スコア7651

test CHANGED
@@ -11,3 +11,301 @@
11
11
  Y1_label.append(index1)
12
12
 
13
13
  ```
14
+
15
+ 【追記】
16
+
17
+ 上記の修正の他に、明らかな書き間違いと思われるところも全部直した下記のコードで、適当な2種類に画像を分類するデータセットを使って、Google Colabで実行したところ、正常に学習され、「print」文で表示される各数値もそれなりの正しそうな値が表示されました
18
+
19
+ ```python
20
+
21
+ #import keras
22
+
23
+ import glob
24
+
25
+ import numpy as np
26
+
27
+ from sklearn.model_selection import train_test_split
28
+
29
+ from tensorflow.keras.preprocessing.image import load_img, img_to_array
30
+
31
+ from tensorflow.keras.models import Sequential
32
+
33
+ from tensorflow.keras.layers import Conv2D, MaxPooling2D
34
+
35
+ from tensorflow.keras.layers import Dense, Dropout, Flatten
36
+
37
+ from tensorflow.keras.utils import to_categorical
38
+
39
+ from tensorflow.keras.optimizers import Adam
40
+
41
+ import matplotlib.pyplot as plt
42
+
43
+ import time
44
+
45
+ import pickle
46
+
47
+ from sklearn.metrics import confusion_matrix
48
+
49
+ from sklearn import metrics
50
+
51
+ from sklearn.metrics import precision_score
52
+
53
+ from sklearn.metrics import recall_score
54
+
55
+
56
+
57
+ '''
58
+
59
+ def set_random_seed(seed):
60
+
61
+ random.seed(seed)
62
+
63
+ np.random.seed (seed)
64
+
65
+ tf.set_random_seed(seed)
66
+
67
+ '''
68
+
69
+
70
+
71
+ train_data_path = 'dataset'
72
+
73
+
74
+
75
+ test_data_path = 'dataset1'
76
+
77
+
78
+
79
+ image_size = 28
80
+
81
+
82
+
83
+ color_setting = 3
84
+
85
+
86
+
87
+ folder = ['normal', 'abnormal']
88
+
89
+ folders = ['normal', 'abnormal']
90
+
91
+
92
+
93
+ class_number = len(folder)
94
+
95
+ print('今回のデータで分類するクラス数は「', str(class_number), '」です。')
96
+
97
+
98
+
99
+ X_image = []
100
+
101
+ Y_label = []
102
+
103
+ X1_image = []
104
+
105
+ Y1_label = []
106
+
107
+
108
+
109
+ for index, name in enumerate(folder):
110
+
111
+ read_data = train_data_path + '/' + name
112
+
113
+ files = glob.glob(read_data + '/*.jpg')
114
+
115
+ print('--- 読み込んだデータセットは', read_data, 'です。')
116
+
117
+ for i, file in enumerate(files):
118
+
119
+ if color_setting == 1:
120
+
121
+ img = load_img(file, color_mode = 'grayscale' ,target_size=(image_size, image_size))
122
+
123
+ elif color_setting == 3:
124
+
125
+ img = load_img(file, color_mode = 'rgb' ,target_size=(image_size, image_size))
126
+
127
+ array = img_to_array(img)
128
+
129
+ X_image.append(array)
130
+
131
+ Y_label.append(index)
132
+
133
+
134
+
135
+ for index1, name in enumerate(folders):
136
+
137
+ read_data1 = test_data_path + '/' + name
138
+
139
+ files1 = glob.glob(read_data1 + '/*.jpg')
140
+
141
+ print('--- 読み込んだデータセットは', read_data1, 'です。')
142
+
143
+ for j, file1 in enumerate(files1):
144
+
145
+ if color_setting == 1:
146
+
147
+ img1 = load_img(file1, color_mode = 'grayscale' ,target_size=(image_size, image_size))
148
+
149
+ elif color_setting == 3:
150
+
151
+ img1 = load_img(file1, color_mode = 'rgb' ,target_size=(image_size, image_size))
152
+
153
+ array = img_to_array(img1)
154
+
155
+ X1_image.append(array)
156
+
157
+ #Y1_label.append(index)
158
+
159
+ Y1_label.append(index1)
160
+
161
+
162
+
163
+ X_image = np.array(X_image)
164
+
165
+ Y_label = np.array(Y_label)
166
+
167
+
168
+
169
+ X1_image = np.array(X1_image)
170
+
171
+ Y1_label = np.array(Y1_label)
172
+
173
+
174
+
175
+ X_image = X_image.astype('float32') / 255
176
+
177
+ Y_label = to_categorical(Y_label, class_number)
178
+
179
+
180
+
181
+ X1_image = X1_image.astype('float32') / 255
182
+
183
+ Y1_label = to_categorical(Y1_label, class_number)
184
+
185
+
186
+
187
+ x_train = X_image
188
+
189
+ y_train = Y_label
190
+
191
+ x_test = X1_image
192
+
193
+ y_test = Y1_label
194
+
195
+
196
+
197
+ #4 機械学習(人工知能)モデルの作成 – 畳み込みニューラルネットワーク(CNN)・学習の実行等
198
+
199
+
200
+
201
+ model = Sequential()
202
+
203
+ model.add(Conv2D(16, (3, 3), padding='same',
204
+
205
+ input_shape=(image_size, image_size, color_setting), activation='relu'))
206
+
207
+ model.add(MaxPooling2D(pool_size=(2, 2)))
208
+
209
+ model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
210
+
211
+ model.add(Conv2D(256, (3, 3), padding='same', activation='relu'))
212
+
213
+ model.add(MaxPooling2D(pool_size=(2, 2)))
214
+
215
+ model.add(Dropout(0.5))
216
+
217
+ model.add(Flatten())
218
+
219
+ model.add(Dense(128, activation='relu'))
220
+
221
+ model.add(Dropout(0.25))
222
+
223
+ model.add(Dense(class_number, activation='softmax'))
224
+
225
+
226
+
227
+ model.summary()
228
+
229
+
230
+
231
+ model.compile(loss='categorical_crossentropy',
232
+
233
+ optimizer=Adam(),
234
+
235
+ metrics=['accuracy'])
236
+
237
+
238
+
239
+ start_time = time.time()
240
+
241
+
242
+
243
+ history = model.fit(x_train,y_train, batch_size=128, epochs=5, verbose=1, validation_data=(x_test, y_test))
244
+
245
+
246
+
247
+ print (metrics.confusion_matrix(y_test.argmax(axis=1), model.predict(x_test).argmax(axis=1)))
248
+
249
+
250
+
251
+ print (recall_score(y_test.argmax(axis=1), model.predict(x_test).argmax(axis=1)))
252
+
253
+
254
+
255
+ print (precision_score(y_test.argmax(axis=1), model.predict(x_test).argmax(axis=1)))
256
+
257
+
258
+
259
+ plt.plot(history.history['accuracy'])
260
+
261
+ plt.plot(history.history['val_accuracy'])
262
+
263
+ plt.title('Model accuracy')
264
+
265
+ plt.ylabel('Accuracy')
266
+
267
+ plt.xlabel('Epoch')
268
+
269
+ plt.grid()
270
+
271
+ plt.legend(['Train', 'Validation'], loc='upper left')
272
+
273
+ plt.show()
274
+
275
+
276
+
277
+ plt.plot(history.history['loss'])
278
+
279
+ plt.plot(history.history['val_loss'])
280
+
281
+ plt.title('Model loss')
282
+
283
+ plt.ylabel('Loss')
284
+
285
+ plt.xlabel('Epoch')
286
+
287
+ plt.grid()
288
+
289
+ plt.legend(['Train', 'Validation'], loc='upper left')
290
+
291
+ plt.show()
292
+
293
+
294
+
295
+ open('cnn_model.json','w').write(model.to_json())
296
+
297
+ model.save_weights('cnn_weights.h5')
298
+
299
+ #model.save('cnn_model_weight.h5') #モデル構造と重みを1つにまとめることもできます
300
+
301
+
302
+
303
+ score = model.evaluate(x_test, y_test, verbose=0)
304
+
305
+ print('Loss:', score[0], '(損失関数値 - 0に近いほど正解に近い)')
306
+
307
+ print('Accuracy:', score[1] * 100, '%', '(精度 - 100% に近いほど正解に近い)')
308
+
309
+ print('Computation time(計算時間):{0:.3f} sec(秒)'.format(time.time() - start_time))
310
+
311
+ ```