質問編集履歴
3
文法の修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -44,23 +44,7 @@
|
|
44
44
|
|
45
45
|
path_one=path + '/パス/one'
|
46
46
|
|
47
|
-
path_two=path + '/パス/two'
|
48
47
|
|
49
|
-
path_three=path + '/パス/three'
|
50
|
-
|
51
|
-
path_four=path + '/パス/four'
|
52
|
-
|
53
|
-
path_five=path + '/パス/five'
|
54
|
-
|
55
|
-
path_six=path + '/パス/six'
|
56
|
-
|
57
|
-
path_seven=path + '/パス/seven'
|
58
|
-
|
59
|
-
path_eight=path + '/パス/eight'
|
60
|
-
|
61
|
-
path_nine=path + '/パス/nine'
|
62
|
-
|
63
|
-
path_zero=path + '/パス/zero'
|
64
48
|
|
65
49
|
in_size=(28,28)
|
66
50
|
|
@@ -69,24 +53,6 @@
|
|
69
53
|
|
70
54
|
|
71
55
|
file_1=glob.glob(path_one +'/*.jpg')
|
72
|
-
|
73
|
-
file_2=glob.glob(path_two +'/*.jpg')
|
74
|
-
|
75
|
-
file_3=glob.glob(path_three +'/*.jpg')
|
76
|
-
|
77
|
-
file_4=glob.glob(path_four +'/*.jpg')
|
78
|
-
|
79
|
-
file_5=glob.glob(path_five +'/*.jpg')
|
80
|
-
|
81
|
-
file_6=glob.glob(path_six +'/*.jpg')
|
82
|
-
|
83
|
-
file_7=glob.glob(path_seven +'/*.jpg')
|
84
|
-
|
85
|
-
file_8=glob.glob(path_eight +'/*.jpg')
|
86
|
-
|
87
|
-
file_9=glob.glob(path_nine +'/*.jpg')
|
88
|
-
|
89
|
-
file_0=glob.glob(path_zero +'/*.jpg')
|
90
56
|
|
91
57
|
|
92
58
|
|
@@ -120,250 +86,6 @@
|
|
120
86
|
|
121
87
|
|
122
88
|
|
123
|
-
def load_dir_2(path,label):
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
for i in file_2:
|
128
|
-
|
129
|
-
img=cv2.imread(i)
|
130
|
-
|
131
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
132
|
-
|
133
|
-
img=cv2.resize(img,in_size)
|
134
|
-
|
135
|
-
img=img/255.0
|
136
|
-
|
137
|
-
x.append(img)
|
138
|
-
|
139
|
-
y.append(label)
|
140
|
-
|
141
|
-
return [x,y]
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
def load_dir_3(path,label):
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
for i in file_3:
|
150
|
-
|
151
|
-
img=cv2.imread(i)
|
152
|
-
|
153
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
154
|
-
|
155
|
-
img=cv2.resize(img,in_size)
|
156
|
-
|
157
|
-
img=img/255.0
|
158
|
-
|
159
|
-
x.append(img)
|
160
|
-
|
161
|
-
y.append(label)
|
162
|
-
|
163
|
-
return [x,y]
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
def load_dir_4(path,label):
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
for i in file_4:
|
172
|
-
|
173
|
-
img=cv2.imread(i)
|
174
|
-
|
175
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
176
|
-
|
177
|
-
img=cv2.resize(img,in_size)
|
178
|
-
|
179
|
-
img=img/255.0
|
180
|
-
|
181
|
-
x.append(img)
|
182
|
-
|
183
|
-
y.append(label)
|
184
|
-
|
185
|
-
return [x,y]
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
def load_dir_5(path,label):
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
for i in file_5:
|
196
|
-
|
197
|
-
img=cv2.imread(i)
|
198
|
-
|
199
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
200
|
-
|
201
|
-
img=cv2.resize(img,in_size)
|
202
|
-
|
203
|
-
img=img/255.0
|
204
|
-
|
205
|
-
x.append(img)
|
206
|
-
|
207
|
-
y.append(label)
|
208
|
-
|
209
|
-
return [x,y]
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
def load_dir_6(path,label):
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
for i in file_6:
|
218
|
-
|
219
|
-
img=cv2.imread(i)
|
220
|
-
|
221
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
222
|
-
|
223
|
-
img=cv2.resize(img,in_size)
|
224
|
-
|
225
|
-
img=img/255.0
|
226
|
-
|
227
|
-
x.append(img)
|
228
|
-
|
229
|
-
y.append(label)
|
230
|
-
|
231
|
-
return [x,y]
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
def load_dir_7(path,label):
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
for i in file_7:
|
240
|
-
|
241
|
-
img=cv2.imread(i)
|
242
|
-
|
243
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
244
|
-
|
245
|
-
img=cv2.resize(img,in_size)
|
246
|
-
|
247
|
-
img=img/255.0
|
248
|
-
|
249
|
-
x.append(img)
|
250
|
-
|
251
|
-
y.append(label)
|
252
|
-
|
253
|
-
return [x,y]
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
def load_dir_8(path,label):
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
for i in file_8:
|
262
|
-
|
263
|
-
img=cv2.imread(i)
|
264
|
-
|
265
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
266
|
-
|
267
|
-
img=cv2.resize(img,in_size)
|
268
|
-
|
269
|
-
img=img/255.0
|
270
|
-
|
271
|
-
x.append(img)
|
272
|
-
|
273
|
-
y.append(label)
|
274
|
-
|
275
|
-
return [x,y]
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
def load_dir_9(path,label):
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
for i in file_9:
|
284
|
-
|
285
|
-
img=cv2.imread(i)
|
286
|
-
|
287
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
288
|
-
|
289
|
-
img=cv2.resize(img,in_size)
|
290
|
-
|
291
|
-
img=img/255.0
|
292
|
-
|
293
|
-
x.append(img)
|
294
|
-
|
295
|
-
y.append(label)
|
296
|
-
|
297
|
-
return [x,y]
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
def load_dir_0(path,label):
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
for i in file_0:
|
308
|
-
|
309
|
-
img=cv2.imread(i)
|
310
|
-
|
311
|
-
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
312
|
-
|
313
|
-
img=cv2.resize(img,in_size)
|
314
|
-
|
315
|
-
img=img/255.0
|
316
|
-
|
317
|
-
x.append(img)
|
318
|
-
|
319
|
-
y.append(label)
|
320
|
-
|
321
|
-
return [x,y]
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
#kerasのモデルに入れられるように数値データに変換
|
326
|
-
|
327
|
-
load_dir_1(path_one,1)
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
load_dir_2(path_two,2)
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
load_dir_3(path_three,3)
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
load_dir_4(path_four,4)
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
load_dir_5(path_five,5)
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
load_dir_6(path_six,6)
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
load_dir_7(path_seven,7)
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
load_dir_8(path_eight,8)
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
load_dir_9(path_nine,9)
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
load_dir_0(path_zero,0)
|
364
|
-
|
365
|
-
|
366
|
-
|
367
89
|
#print(y)
|
368
90
|
|
369
91
|
#リストになっている数値データをnumpyの配列に変換
|
@@ -378,38 +100,6 @@
|
|
378
100
|
|
379
101
|
|
380
102
|
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
#訓練データとラベルの確認
|
386
|
-
|
387
|
-
print(x_train.shape,y_train.shape)
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
#テストデータとラベルの次元の確認
|
392
|
-
|
393
|
-
print(x_test.shape,y_test.shape)
|
394
|
-
|
395
|
-
print(x_test[111].shape)
|
396
|
-
|
397
|
-
#数値データをもとの大きさに戻す
|
398
|
-
|
399
|
-
x_test[9]=x_test[9]*255
|
400
|
-
|
401
|
-
#貼り付け
|
402
|
-
|
403
|
-
plt.imshow(x_test[9])
|
404
|
-
|
405
|
-
#表示
|
406
|
-
|
407
|
-
plt.show()
|
408
|
-
|
409
|
-
#ラベルの表示
|
410
|
-
|
411
|
-
print(y_test[9])
|
412
|
-
|
413
103
|
|
414
104
|
|
415
105
|
|
@@ -419,32 +109,6 @@
|
|
419
109
|
x_test=x_test.reshape(len(x_test),28,28,1).astype('float32')/255
|
420
110
|
|
421
111
|
|
422
|
-
|
423
|
-
print(x_train.shape,x_test.shape)
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
#出力(ラベル、画像の次元など)
|
432
|
-
|
433
|
-
(870, 28, 28) (870,)
|
434
|
-
|
435
|
-
(218, 28, 28) (218,)
|
436
|
-
|
437
|
-
(28, 28)
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
4
|
446
|
-
|
447
|
-
(870, 28, 28, 1) (218, 28, 28, 1)
|
448
112
|
|
449
113
|
|
450
114
|
|
@@ -458,168 +122,12 @@
|
|
458
122
|
|
459
123
|
from keras.utils import to_categorical
|
460
124
|
|
461
|
-
|
462
125
|
|
463
|
-
#x_train=x_train.reshape(-1,784).astype('float32')/255
|
464
|
-
|
465
|
-
#x_test=x_test.reshape(-1,784).astype('float32')/255
|
466
|
-
|
467
|
-
|
468
126
|
|
469
127
|
y_train=keras.utils.np_utils.to_categorical(y_train.astype('int32'),10)
|
470
128
|
|
471
129
|
y_test=keras.utils.np_utils.to_categorical(y_test.astype('int32'),10)
|
472
130
|
|
473
|
-
|
474
131
|
|
475
132
|
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
from keras import models
|
484
|
-
|
485
|
-
from keras import layers
|
486
|
-
|
487
|
-
from keras.layers import Dense,Dropout,MaxPooling2D,Convolution2D
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
model=models.Sequential()
|
492
|
-
|
493
|
-
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1)))
|
494
|
-
|
495
|
-
model.add(layers.MaxPooling2D((2,2)))
|
496
|
-
|
497
|
-
model.add(Dropout(0.2))
|
498
|
-
|
499
|
-
model.add(layers.Conv2D(64,(3,3),activation='relu'))
|
500
|
-
|
501
|
-
model.add(layers.MaxPooling2D(2,2))
|
502
|
-
|
503
|
-
model.add(Dropout(0.2))
|
504
|
-
|
505
|
-
model.add(layers.Conv2D(64,(3,3),activation='relu'))
|
506
|
-
|
507
|
-
model.add(layers.Flatten())
|
508
|
-
|
509
|
-
model.add(layers.Dense(64,activation='relu'))
|
510
|
-
|
511
|
-
model.add(layers.Dense(out_size,activation='softmax'))
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
# データのシャッフル
|
520
|
-
|
521
|
-
data_number = len(x_train)
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
shuffled_num = np.arange(data_number)
|
526
|
-
|
527
|
-
np.random.shuffle(shuffled_num)
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
for i in range(data_number):
|
532
|
-
|
533
|
-
x_train[shuffled_num[i]], x_train[i] = x_train[i], x_train[shuffled_num[i]]
|
534
|
-
|
535
|
-
y_train[shuffled_num[i]], y_train[i] = y_train[i], y_train[shuffled_num[i]]
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
print(y_train[9])
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
#出力
|
550
|
-
|
551
|
-
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
from keras import optimizers
|
562
|
-
|
563
|
-
model.compile(optimizer='adam',
|
564
|
-
|
565
|
-
loss='categorical_crossentropy',
|
566
|
-
|
567
|
-
metrics=['accuracy'])
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
model.fit(x_train,y_train,batch_size=10,epochs=10)
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
model.save('keras_number.h5')
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
Epoch 1/10
|
586
|
-
|
587
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1910 - accuracy: 0.1920
|
588
|
-
|
589
|
-
Epoch 2/10
|
590
|
-
|
591
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1406 - accuracy: 0.1943
|
592
|
-
|
593
|
-
Epoch 3/10
|
594
|
-
|
595
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1419 - accuracy: 0.1828
|
596
|
-
|
597
|
-
Epoch 4/10
|
598
|
-
|
599
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1349 - accuracy: 0.1874
|
600
|
-
|
601
|
-
Epoch 5/10
|
602
|
-
|
603
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1318 - accuracy: 0.1805
|
604
|
-
|
605
|
-
Epoch 6/10
|
606
|
-
|
607
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1369 - accuracy: 0.1897
|
608
|
-
|
609
|
-
Epoch 7/10
|
610
|
-
|
611
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1393 - accuracy: 0.1885
|
612
|
-
|
613
|
-
Epoch 8/100. 番号リスト
|
614
|
-
|
615
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1365 - accuracy: 0.1782
|
616
|
-
|
617
|
-
Epoch 9/10
|
618
|
-
|
619
|
-
870/870 [==============================] - 1s 988us/step - loss: 2.1387 - accuracy: 0.1897
|
620
|
-
|
621
|
-
Epoch 10/10
|
622
|
-
|
623
|
-
870/870 [==============================] - 1s 1ms/step - loss: 2.1336 - accuracy: 0.1816
|
624
|
-
|
625
133
|
```
|
2
文法の修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -376,20 +376,6 @@
|
|
376
376
|
|
377
377
|
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)
|
378
378
|
|
379
|
-
#print(x.shape)
|
380
|
-
|
381
|
-
#print(y_train)
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
In [14]:
|
392
|
-
|
393
379
|
|
394
380
|
|
395
381
|
|
@@ -442,7 +428,7 @@
|
|
442
428
|
|
443
429
|
|
444
430
|
|
445
|
-
|
431
|
+
#出力(ラベル、画像の次元など)
|
446
432
|
|
447
433
|
(870, 28, 28) (870,)
|
448
434
|
|
@@ -456,15 +442,11 @@
|
|
456
442
|
|
457
443
|
|
458
444
|
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
445
|
4
|
464
446
|
|
465
447
|
(870, 28, 28, 1) (218, 28, 28, 1)
|
466
448
|
|
467
|
-
|
449
|
+
|
468
450
|
|
469
451
|
|
470
452
|
|
@@ -496,12 +478,6 @@
|
|
496
478
|
|
497
479
|
|
498
480
|
|
499
|
-
In [16]:
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
481
|
|
506
482
|
|
507
483
|
from keras import models
|
@@ -534,24 +510,6 @@
|
|
534
510
|
|
535
511
|
model.add(layers.Dense(out_size,activation='softmax'))
|
536
512
|
|
537
|
-
#model.add(layers.Dense(512,activation='relu',input_shape=(784,)))
|
538
|
-
|
539
|
-
#model.add(Dropout(0.2))
|
540
|
-
|
541
|
-
#model.add(Dense(512,activation='relu'))
|
542
|
-
|
543
|
-
#model.add(Dropout(0.2))
|
544
|
-
|
545
|
-
#model.add(layers.Dense(out_size,activation='softmax'))
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
In [17]:
|
554
|
-
|
555
513
|
|
556
514
|
|
557
515
|
|
@@ -588,11 +546,11 @@
|
|
588
546
|
|
589
547
|
|
590
548
|
|
591
|
-
|
549
|
+
#出力
|
592
550
|
|
593
551
|
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
|
594
552
|
|
595
|
-
|
553
|
+
|
596
554
|
|
597
555
|
|
598
556
|
|
1
文法の修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
|
7
7
|
いろいろ試しては見ましたが、結果は変わりませんでした。
|
8
8
|
|
9
|
-
数字の種類を減らした場合、lossと正解率は上がりましたがlossが減っていたりしたわけではなく単純に種類が減ったから正解率も上がっただけでした。
|
9
|
+
数字の種類を減らした場合、数字のデータ数を増やすとlossと正解率は上がりましたがlossが減っていたりしたわけではなく単純に種類が減ったから正解率も上がっただけでした。
|
10
10
|
|
11
11
|
何が原因なのかわからないのでご教授いただければ幸いです。
|
12
12
|
|
@@ -18,6 +18,12 @@
|
|
18
18
|
|
19
19
|
|
20
20
|
|
21
|
+
|
22
|
+
|
23
|
+
|
24
|
+
|
25
|
+
```ここに言語を入力
|
26
|
+
|
21
27
|
#画像フォルダから画像データを読み込む
|
22
28
|
|
23
29
|
import glob,os
|
@@ -657,3 +663,5 @@
|
|
657
663
|
Epoch 10/10
|
658
664
|
|
659
665
|
870/870 [==============================] - 1s 1ms/step - loss: 2.1336 - accuracy: 0.1816
|
666
|
+
|
667
|
+
```
|