質問編集履歴
1
学習プログラムの追加
title
CHANGED
|
File without changes
|
body
CHANGED
|
@@ -10,46 +10,134 @@
|
|
|
10
10
|
よろしくお願いいたします。
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
### 学習
|
|
13
|
+
### 学習プログラム(alexnet.py)
|
|
14
14
|
|
|
15
15
|
```ここに言語名を入力
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
16
|
+
# coding:utf-8
|
|
17
|
+
|
|
18
|
+
import keras
|
|
19
|
+
from keras.layers import Conv2D, MaxPooling2D, Lambda, Input, Dense, Flatten, BatchNormalization
|
|
20
|
+
from keras.utils import np_utils
|
|
21
|
+
from keras.models import Sequential
|
|
22
|
+
from keras.layers.convolutional import Conv2D, MaxPooling2D
|
|
23
|
+
from keras.layers.core import Dense, Dropout, Activation, Flatten
|
|
24
|
+
import numpy as np
|
|
25
|
+
from sklearn.model_selection import train_test_split
|
|
26
|
+
from PIL import Image
|
|
27
|
+
import glob
|
|
28
|
+
from keras.utils import plot_model
|
|
29
|
+
import matplotlib.pyplot as plt
|
|
30
|
+
import tensorflow as tf
|
|
31
|
+
|
|
32
|
+
folder = ["0","1"]
|
|
33
|
+
image_size = 224
|
|
34
|
+
epoch_size = 10
|
|
35
|
+
|
|
36
|
+
X = []
|
|
37
|
+
Y = []
|
|
38
|
+
|
|
39
|
+
for index, name in enumerate(folder):
|
|
40
|
+
dir = "./" + name
|
|
41
|
+
files = glob.glob(dir + "/*.jpg")
|
|
42
|
+
for i, file in enumerate(files):
|
|
43
|
+
image = Image.open(file)
|
|
44
|
+
image = image.convert("RGB")
|
|
45
|
+
image = image.resize((image_size, image_size))
|
|
46
|
+
data = np.asarray(image)
|
|
47
|
+
X.append(data)
|
|
48
|
+
Y.append(index)
|
|
49
|
+
#Xは画像データ、Yは正解ラベルのデータ
|
|
50
|
+
X = np.array(X)
|
|
51
|
+
Y = np.array(Y)
|
|
52
|
+
#画像データを0から1の範囲に変換
|
|
53
|
+
X = X.astype('float32')
|
|
54
|
+
X = X / 255.0
|
|
55
|
+
|
|
56
|
+
#正解ラベルの形式を変換
|
|
57
|
+
#つまり、ラベルを[0, 0, 0, 1]のようなベクトルにする。値はラベルの数に合わせる。
|
|
58
|
+
Y = np_utils.to_categorical(Y, 2)
|
|
59
|
+
|
|
60
|
+
# 学習用データとテストデータに分割
|
|
61
|
+
#train_test_split 関数はデータをランダムに、好きの割合で分割できる関数。
|
|
62
|
+
#X_train(訓練データ), X_test(テストデータ), y_train(訓練ラベル), y_test(テストラベル)
|
|
63
|
+
#test_sizeはテストデータにする割合
|
|
64
|
+
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25)
|
|
65
|
+
|
|
66
|
+
model = Sequential()
|
|
67
|
+
|
|
68
|
+
model.add(Conv2D(48, 11, strides=(3, 3), activation='relu', padding='same',input_shape=X_train.shape[1:]))
|
|
69
|
+
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
|
|
70
|
+
model.add(BatchNormalization())
|
|
71
|
+
model.add(Conv2D(128, 5, strides=(3, 3), activation='relu', padding='same'))
|
|
72
|
+
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
|
|
73
|
+
model.add(BatchNormalization())
|
|
74
|
+
model.add(Conv2D(192, 3, strides=(1, 1), activation='relu', padding='same'))
|
|
75
|
+
model.add(Conv2D(192, 3, strides=(1, 1), activation='relu', padding='same'))
|
|
76
|
+
model.add(Conv2D(128, 3, strides=(1, 1), activation='relu', padding='same'))
|
|
77
|
+
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
|
|
78
|
+
model.add(BatchNormalization())
|
|
79
|
+
|
|
80
|
+
model.add(Flatten())
|
|
81
|
+
model.add(Dense(2048, activation='relu'))
|
|
82
|
+
model.add(Dropout(0.5))
|
|
83
|
+
model.add(Dense(2048, activation='relu'))
|
|
84
|
+
model.add(Dropout(0.5))
|
|
85
|
+
|
|
86
|
+
model.add(Dense(2, activation='softmax'))
|
|
87
|
+
#model.add(Activation('softmax'))
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
model.summary();
|
|
91
|
+
|
|
92
|
+
model.compile(loss='categorical_crossentropy',optimizer='SGD',metrics=['accuracy'])
|
|
93
|
+
|
|
94
|
+
history = model.fit(X_train, y_train, epochs=epoch_size, verbose=1, validation_split=0.15)
|
|
95
|
+
|
|
96
|
+
#評価 & 評価結果出力
|
|
97
|
+
#print(model.evaluate(X_test, y_test))
|
|
98
|
+
score = model.evaluate(X_test, y_test, verbose=0)
|
|
99
|
+
print('Test loss :', score[0])
|
|
100
|
+
print('Test accuracy :', score[1])
|
|
101
|
+
|
|
102
|
+
# モデルをプロット
|
|
103
|
+
plot_model(model, to_file='./model3.png')
|
|
104
|
+
|
|
105
|
+
#loss: 訓練データの損失値
|
|
106
|
+
#val_loss: テストデータの損失値
|
|
107
|
+
loss = history.history['loss']
|
|
108
|
+
val_loss = history.history['val_loss']
|
|
109
|
+
|
|
110
|
+
# lossのグラフ
|
|
111
|
+
plt.plot(range(epoch_size), loss, marker='.', label='loss')
|
|
112
|
+
plt.plot(range(epoch_size), val_loss, marker='.', label='val_loss')
|
|
113
|
+
plt.legend(loc='best', fontsize=10)
|
|
114
|
+
plt.grid()
|
|
115
|
+
plt.xlabel('epoch')
|
|
116
|
+
plt.ylabel('loss')
|
|
117
|
+
plt.show()
|
|
118
|
+
|
|
119
|
+
#acc: 訓練データの精度
|
|
120
|
+
#val_acc: テストデータの精度
|
|
121
|
+
acc = history.history['acc']
|
|
122
|
+
val_acc = history.history['val_acc']
|
|
123
|
+
|
|
124
|
+
# accuracyのグラフ
|
|
125
|
+
plt.plot(range(epoch_size), acc, marker='.', label='acc')
|
|
126
|
+
plt.plot(range(epoch_size), val_acc, marker='.', label='val_acc')
|
|
127
|
+
plt.legend(loc='best', fontsize=10)
|
|
128
|
+
plt.grid()
|
|
129
|
+
plt.xlabel('epoch')
|
|
130
|
+
plt.ylabel('acc')
|
|
131
|
+
plt.show()
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
### save weights
|
|
135
|
+
json_string = model.to_json()
|
|
136
|
+
open('alexnet_model.json', 'w').write(json_string)
|
|
137
|
+
model.save_weights('alexnet_weights.h5')
|
|
138
|
+
|
|
139
|
+
_____________________________________________________________
|
|
140
|
+
|
|
53
141
|
```
|
|
54
142
|
|
|
55
143
|
### 保存済みの重みデータ
|