質問編集履歴

1

学習プログラムの追加

2018/09/29 08:03

投稿

tagaa
tagaa

スコア13

test CHANGED
File without changes
test CHANGED
@@ -22,85 +22,261 @@
22
22
 
23
23
 
24
24
 
25
- ### 学習サマリ
25
+ ### 学習プログラム(alexnet.py)
26
26
 
27
27
 
28
28
 
29
29
  ```ここに言語名を入力
30
30
 
31
- _________________________________________________________________
32
-
33
- Layer (type) Output Shape Param #
34
-
35
- =================================================================
36
-
37
- conv2d_1 (Conv2D) (None, 75, 75, 48) 17472
38
-
39
- _________________________________________________________________
40
-
41
- max_pooling2d_1 (MaxPooling2 (None, 37, 37, 48) 0
42
-
43
- _________________________________________________________________
44
-
45
- batch_normalization_1 (Batch (None, 37, 37, 48) 192
46
-
47
- _________________________________________________________________
48
-
49
- conv2d_2 (Conv2D) (None, 13, 13, 128) 153728
50
-
51
- _________________________________________________________________
52
-
53
- max_pooling2d_2 (MaxPooling2 (None, 6, 6, 128) 0
54
-
55
- _________________________________________________________________
56
-
57
- batch_normalization_2 (Batch (None, 6, 6, 128) 512
58
-
59
- _________________________________________________________________
60
-
61
- conv2d_3 (Conv2D) (None, 6, 6, 192) 221376
62
-
63
- _________________________________________________________________
64
-
65
- conv2d_4 (Conv2D) (None, 6, 6, 192) 331968
66
-
67
- _________________________________________________________________
68
-
69
- conv2d_5 (Conv2D) (None, 6, 6, 128) 221312
70
-
71
- _________________________________________________________________
72
-
73
- max_pooling2d_3 (MaxPooling2 (None, 2, 2, 128) 0
74
-
75
- _________________________________________________________________
76
-
77
- batch_normalization_3 (Batch (None, 2, 2, 128) 512
78
-
79
- _________________________________________________________________
80
-
81
- flatten_1 (Flatten) (None, 512) 0
82
-
83
- _________________________________________________________________
84
-
85
- dense_1 (Dense) (None, 2048) 1050624
86
-
87
- _________________________________________________________________
88
-
89
- dropout_1 (Dropout) (None, 2048) 0
90
-
91
- _________________________________________________________________
92
-
93
- dense_2 (Dense) (None, 2048) 4196352
94
-
95
- _________________________________________________________________
96
-
97
- dropout_2 (Dropout) (None, 2048) 0
98
-
99
- _________________________________________________________________
100
-
101
- dense_3 (Dense) (None, 2) 4098
102
-
103
- =================================================================
31
+ # coding:utf-8
32
+
33
+
34
+
35
+ import keras
36
+
37
+ from keras.layers import Conv2D, MaxPooling2D, Lambda, Input, Dense, Flatten, BatchNormalization
38
+
39
+ from keras.utils import np_utils
40
+
41
+ from keras.models import Sequential
42
+
43
+ from keras.layers.convolutional import Conv2D, MaxPooling2D
44
+
45
+ from keras.layers.core import Dense, Dropout, Activation, Flatten
46
+
47
+ import numpy as np
48
+
49
+ from sklearn.model_selection import train_test_split
50
+
51
+ from PIL import Image
52
+
53
+ import glob
54
+
55
+ from keras.utils import plot_model
56
+
57
+ import matplotlib.pyplot as plt
58
+
59
+ import tensorflow as tf
60
+
61
+
62
+
63
+ folder = ["0","1"]
64
+
65
+ image_size = 224
66
+
67
+ epoch_size = 10
68
+
69
+
70
+
71
+ X = []
72
+
73
+ Y = []
74
+
75
+
76
+
77
+ for index, name in enumerate(folder):
78
+
79
+ dir = "./" + name
80
+
81
+ files = glob.glob(dir + "/*.jpg")
82
+
83
+ for i, file in enumerate(files):
84
+
85
+ image = Image.open(file)
86
+
87
+ image = image.convert("RGB")
88
+
89
+ image = image.resize((image_size, image_size))
90
+
91
+ data = np.asarray(image)
92
+
93
+ X.append(data)
94
+
95
+ Y.append(index)
96
+
97
+ #Xは画像データ、Yは正解ラベルのデータ
98
+
99
+ X = np.array(X)
100
+
101
+ Y = np.array(Y)
102
+
103
+ #画像データを0から1の範囲に変換
104
+
105
+ X = X.astype('float32')
106
+
107
+ X = X / 255.0
108
+
109
+
110
+
111
+ #正解ラベルの形式を変換
112
+
113
+ #つまり、ラベルを[0, 0, 0, 1]のようなベクトルにする。値はラベルの数に合わせる。
114
+
115
+ Y = np_utils.to_categorical(Y, 2)
116
+
117
+
118
+
119
+ # 学習用データとテストデータに分割
120
+
121
+ #train_test_split 関数はデータをランダムに、好きの割合で分割できる関数。
122
+
123
+ #X_train(訓練データ), X_test(テストデータ), y_train(訓練ラベル), y_test(テストラベル)
124
+
125
+ #test_sizeはテストデータにする割合
126
+
127
+ X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25)
128
+
129
+
130
+
131
+ model = Sequential()
132
+
133
+
134
+
135
+ model.add(Conv2D(48, 11, strides=(3, 3), activation='relu', padding='same',input_shape=X_train.shape[1:]))
136
+
137
+ model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
138
+
139
+ model.add(BatchNormalization())
140
+
141
+ model.add(Conv2D(128, 5, strides=(3, 3), activation='relu', padding='same'))
142
+
143
+ model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
144
+
145
+ model.add(BatchNormalization())
146
+
147
+ model.add(Conv2D(192, 3, strides=(1, 1), activation='relu', padding='same'))
148
+
149
+ model.add(Conv2D(192, 3, strides=(1, 1), activation='relu', padding='same'))
150
+
151
+ model.add(Conv2D(128, 3, strides=(1, 1), activation='relu', padding='same'))
152
+
153
+ model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
154
+
155
+ model.add(BatchNormalization())
156
+
157
+
158
+
159
+ model.add(Flatten())
160
+
161
+ model.add(Dense(2048, activation='relu'))
162
+
163
+ model.add(Dropout(0.5))
164
+
165
+ model.add(Dense(2048, activation='relu'))
166
+
167
+ model.add(Dropout(0.5))
168
+
169
+
170
+
171
+ model.add(Dense(2, activation='softmax'))
172
+
173
+ #model.add(Activation('softmax'))
174
+
175
+
176
+
177
+
178
+
179
+ model.summary();
180
+
181
+
182
+
183
+ model.compile(loss='categorical_crossentropy',optimizer='SGD',metrics=['accuracy'])
184
+
185
+
186
+
187
+ history = model.fit(X_train, y_train, epochs=epoch_size, verbose=1, validation_split=0.15)
188
+
189
+
190
+
191
+ #評価 & 評価結果出力
192
+
193
+ #print(model.evaluate(X_test, y_test))
194
+
195
+ score = model.evaluate(X_test, y_test, verbose=0)
196
+
197
+ print('Test loss :', score[0])
198
+
199
+ print('Test accuracy :', score[1])
200
+
201
+
202
+
203
+ # モデルをプロット
204
+
205
+ plot_model(model, to_file='./model3.png')
206
+
207
+
208
+
209
+ #loss: 訓練データの損失値
210
+
211
+ #val_loss: テストデータの損失値
212
+
213
+ loss = history.history['loss']
214
+
215
+ val_loss = history.history['val_loss']
216
+
217
+
218
+
219
+ # lossのグラフ
220
+
221
+ plt.plot(range(epoch_size), loss, marker='.', label='loss')
222
+
223
+ plt.plot(range(epoch_size), val_loss, marker='.', label='val_loss')
224
+
225
+ plt.legend(loc='best', fontsize=10)
226
+
227
+ plt.grid()
228
+
229
+ plt.xlabel('epoch')
230
+
231
+ plt.ylabel('loss')
232
+
233
+ plt.show()
234
+
235
+
236
+
237
+ #acc: 訓練データの精度
238
+
239
+ #val_acc: テストデータの精度
240
+
241
+ acc = history.history['acc']
242
+
243
+ val_acc = history.history['val_acc']
244
+
245
+
246
+
247
+ # accuracyのグラフ
248
+
249
+ plt.plot(range(epoch_size), acc, marker='.', label='acc')
250
+
251
+ plt.plot(range(epoch_size), val_acc, marker='.', label='val_acc')
252
+
253
+ plt.legend(loc='best', fontsize=10)
254
+
255
+ plt.grid()
256
+
257
+ plt.xlabel('epoch')
258
+
259
+ plt.ylabel('acc')
260
+
261
+ plt.show()
262
+
263
+
264
+
265
+
266
+
267
+ ### save weights
268
+
269
+ json_string = model.to_json()
270
+
271
+ open('alexnet_model.json', 'w').write(json_string)
272
+
273
+ model.save_weights('alexnet_weights.h5')
274
+
275
+
276
+
277
+ _____________________________________________________________
278
+
279
+
104
280
 
105
281
  ```
106
282