回答編集履歴

1

コード追記

2020/02/16 02:03

投稿

8524ba23
8524ba23

スコア38341

test CHANGED
@@ -29,3 +29,435 @@
29
29
 
30
30
 
31
31
  参考まで。
32
+
33
+
34
+
35
+ 予測データ作成
36
+
37
+ ```Python
38
+
39
+ '''
40
+
41
+ #Train a simple deep CNN on the CIFAR10 small images dataset.
42
+
43
+ It gets to 75% validation accuracy in 25 epochs, and 79% after 50 epochs.
44
+
45
+ (it's still underfitting at that point, though).
46
+
47
+ '''
48
+
49
+ import keras
50
+
51
+ from keras.datasets import cifar10
52
+
53
+ from keras.preprocessing.image import ImageDataGenerator
54
+
55
+ from keras.models import Sequential
56
+
57
+ from keras.layers import Dense, Dropout, Activation, Flatten
58
+
59
+ from keras.layers import Conv2D, MaxPooling2D
60
+
61
+ import os
62
+
63
+
64
+
65
+ batch_size = 32
66
+
67
+ num_classes = 10
68
+
69
+ epochs = 100
70
+
71
+ data_augmentation = True
72
+
73
+ num_predictions = 20
74
+
75
+ #save_dir = os.path.join(os.getcwd(), 'saved_models')
76
+
77
+ save_dir = "."
78
+
79
+ model_name = 'cnn_model.h5'
80
+
81
+ w_name = 'cnn_weights.h5'
82
+
83
+
84
+
85
+ # The data, split between train and test sets:
86
+
87
+ (x_train, y_train), (x_test, y_test) = cifar10.load_data()
88
+
89
+ print('x_train shape:', x_train.shape)
90
+
91
+ print(x_train.shape[0], 'train samples')
92
+
93
+ print(x_test.shape[0], 'test samples')
94
+
95
+
96
+
97
+ # Convert class vectors to binary class matrices.
98
+
99
+ y_train = keras.utils.to_categorical(y_train, num_classes)
100
+
101
+ y_test = keras.utils.to_categorical(y_test, num_classes)
102
+
103
+
104
+
105
+ model = Sequential()
106
+
107
+ model.add(Conv2D(32, (3, 3), padding='same',
108
+
109
+ input_shape=x_train.shape[1:]))
110
+
111
+ model.add(Activation('relu'))
112
+
113
+ model.add(Conv2D(32, (3, 3)))
114
+
115
+ model.add(Activation('relu'))
116
+
117
+ model.add(MaxPooling2D(pool_size=(2, 2)))
118
+
119
+ model.add(Dropout(0.25))
120
+
121
+
122
+
123
+ model.add(Conv2D(64, (3, 3), padding='same'))
124
+
125
+ model.add(Activation('relu'))
126
+
127
+ model.add(Conv2D(64, (3, 3)))
128
+
129
+ model.add(Activation('relu'))
130
+
131
+ model.add(MaxPooling2D(pool_size=(2, 2)))
132
+
133
+ model.add(Dropout(0.25))
134
+
135
+
136
+
137
+ model.add(Flatten())
138
+
139
+ model.add(Dense(512))
140
+
141
+ model.add(Activation('relu'))
142
+
143
+ model.add(Dropout(0.5))
144
+
145
+ model.add(Dense(num_classes))
146
+
147
+ model.add(Activation('softmax'))
148
+
149
+
150
+
151
+ # initiate RMSprop optimizer
152
+
153
+ opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
154
+
155
+
156
+
157
+ # Let's train the model using RMSprop
158
+
159
+ model.compile(loss='categorical_crossentropy',
160
+
161
+ optimizer=opt,
162
+
163
+ metrics=['accuracy'])
164
+
165
+
166
+
167
+ x_train = x_train.astype('float32')
168
+
169
+ x_test = x_test.astype('float32')
170
+
171
+ x_train /= 255
172
+
173
+ x_test /= 255
174
+
175
+
176
+
177
+ if not data_augmentation:
178
+
179
+ print('Not using data augmentation.')
180
+
181
+ model.fit(x_train, y_train,
182
+
183
+ batch_size=batch_size,
184
+
185
+ epochs=epochs,
186
+
187
+ validation_data=(x_test, y_test),
188
+
189
+ shuffle=True)
190
+
191
+ else:
192
+
193
+ print('Using real-time data augmentation.')
194
+
195
+ # This will do preprocessing and realtime data augmentation:
196
+
197
+ datagen = ImageDataGenerator(
198
+
199
+ featurewise_center=False, # set input mean to 0 over the dataset
200
+
201
+ samplewise_center=False, # set each sample mean to 0
202
+
203
+ featurewise_std_normalization=False, # divide inputs by std of the dataset
204
+
205
+ samplewise_std_normalization=False, # divide each input by its std
206
+
207
+ zca_whitening=False, # apply ZCA whitening
208
+
209
+ zca_epsilon=1e-06, # epsilon for ZCA whitening
210
+
211
+ rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
212
+
213
+ # randomly shift images horizontally (fraction of total width)
214
+
215
+ width_shift_range=0.1,
216
+
217
+ # randomly shift images vertically (fraction of total height)
218
+
219
+ height_shift_range=0.1,
220
+
221
+ shear_range=0., # set range for random shear
222
+
223
+ zoom_range=0., # set range for random zoom
224
+
225
+ channel_shift_range=0., # set range for random channel shifts
226
+
227
+ # set mode for filling points outside the input boundaries
228
+
229
+ fill_mode='nearest',
230
+
231
+ cval=0., # value used for fill_mode = "constant"
232
+
233
+ horizontal_flip=True, # randomly flip images
234
+
235
+ vertical_flip=False, # randomly flip images
236
+
237
+ # set rescaling factor (applied before any other transformation)
238
+
239
+ rescale=None,
240
+
241
+ # set function that will be applied on each input
242
+
243
+ preprocessing_function=None,
244
+
245
+ # image data format, either "channels_first" or "channels_last"
246
+
247
+ data_format=None,
248
+
249
+ # fraction of images reserved for validation (strictly between 0 and 1)
250
+
251
+ validation_split=0.0)
252
+
253
+
254
+
255
+ # Compute quantities required for feature-wise normalization
256
+
257
+ # (std, mean, and principal components if ZCA whitening is applied).
258
+
259
+ datagen.fit(x_train)
260
+
261
+
262
+
263
+ # Fit the model on the batches generated by datagen.flow().
264
+
265
+ model.fit_generator(datagen.flow(x_train, y_train,
266
+
267
+ batch_size=batch_size),
268
+
269
+ epochs=epochs,
270
+
271
+ validation_data=(x_test, y_test),
272
+
273
+ workers=4)
274
+
275
+
276
+
277
+ # Save model and weights
278
+
279
+ if not os.path.isdir(save_dir):
280
+
281
+ os.makedirs(save_dir)
282
+
283
+ model_path = os.path.join(save_dir, model_name)
284
+
285
+ model.save(model_path)
286
+
287
+ print('Saved trained model at %s ' % model_path)
288
+
289
+
290
+
291
+ w_path = os.path.join(save_dir, w_name)
292
+
293
+ model.save_weights(w_path)
294
+
295
+
296
+
297
+ # Score trained model.
298
+
299
+ scores = model.evaluate(x_test, y_test, verbose=1)
300
+
301
+ print('Test loss:', scores[0])
302
+
303
+ print('Test accuracy:', scores[1])
304
+
305
+ ```
306
+
307
+
308
+
309
+ 環境構築とテスト実行
310
+
311
+ ```
312
+
313
+ >conda create -n cifar10 python=3.6.9 tensorflow=1.15.0 keras=2.2.5 matplotlib flask
314
+
315
+ >conda activate cifar10
316
+
317
+ (VS2015ビルド環境がないと怒られるが無視)
318
+
319
+ >conda install -c conda-forge opencv
320
+
321
+ >python app.py
322
+
323
+ (いろいろ警告が出るが無視)
324
+
325
+ ```
326
+
327
+
328
+
329
+ `app.py`
330
+
331
+ ```Python
332
+
333
+ import pickle
334
+
335
+ import numpy as np
336
+
337
+ from flask import Flask, render_template, request, redirect, url_for
338
+
339
+
340
+
341
+ from keras.models import load_model
342
+
343
+ import cv2
344
+
345
+ import numpy as np
346
+
347
+ import matplotlib.pyplot as plt
348
+
349
+ import io
350
+
351
+ import tensorflow as tf
352
+
353
+ import sys
354
+
355
+
356
+
357
+ app = Flask(__name__)
358
+
359
+
360
+
361
+ # 機械学習モデルの読込
362
+
363
+ model = load_model("./templates/cnn_model.h5")
364
+
365
+ model.load_weights("./templates/cnn_weights.h5")
366
+
367
+
368
+
369
+ graph = tf.get_default_graph()
370
+
371
+
372
+
373
+ @app.route("/")
374
+
375
+ def index():
376
+
377
+ return render_template("index.html")
378
+
379
+
380
+
381
+ @app.route("/result", methods=['POST'])
382
+
383
+ def result():
384
+
385
+ if request.files["image"]:
386
+
387
+ img_file = request.files["image"]
388
+
389
+ f = img_file.stream.read()
390
+
391
+ bin_data = io.BytesIO(f)
392
+
393
+ file_bytes = np.asarray(bytearray(bin_data.read()), dtype=np.uint8)
394
+
395
+
396
+
397
+ img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
398
+
399
+ img = cv2.resize(img, (32, 32))
400
+
401
+
402
+
403
+ in_rows = 32
404
+
405
+ in_cols = 32
406
+
407
+ in_colors = 3
408
+
409
+
410
+
411
+ labels = [
412
+
413
+ 'airplane',
414
+
415
+ 'automobile',
416
+
417
+ 'bird',
418
+
419
+ 'cat',
420
+
421
+ 'deer',
422
+
423
+ 'dog',
424
+
425
+ 'frog',
426
+
427
+ 'horse',
428
+
429
+ 'ship',
430
+
431
+ 'truck'
432
+
433
+ ]
434
+
435
+
436
+
437
+ img = img.reshape(-1, in_rows, in_cols,
438
+
439
+ in_colors).astype("float32") / 255
440
+
441
+
442
+
443
+ with graph.as_default():
444
+
445
+ r = model.predict(img, batch_size=32, verbose=1)
446
+
447
+ res = r[0]
448
+
449
+ print(res, file=sys.stdout)
450
+
451
+ return render_template("result.html", res=res, labels=labels)
452
+
453
+ else:
454
+
455
+ return redirect(url_for('index'))
456
+
457
+
458
+
459
+ if __name__ == '__main__':
460
+
461
+ app.run(debug=True)
462
+
463
+ ```