回答編集履歴
8
fix answer
test
CHANGED
@@ -68,7 +68,7 @@
|
|
68
68
|
epochs = epochs
|
69
69
|
)
|
70
70
|
```
|
71
|
-
基本,分類問題を解かせる場合はTrainable params
|
71
|
+
基本,分類問題を解かせる場合はTrainable paramsやユニット数を出力側に行くにつれ減少させるのが常套手段です.カーネルの枚数を徐々に減少させるようにしておきました.また,活性化関数もELUを上回った[Swish](https://arxiv.org/pdf/1710.05941v1.pdf)を推奨しておきます.さらに,カーネルの初期値は`glorot_uniform`なので,ELUやSwishのような[ReLUファミリ用の`he_uniform`](https://arxiv.org/pdf/1502.01852.pdf)を推奨します.
|
72
72
|
```Python:network.summary()
|
73
73
|
Model: "model"
|
74
74
|
__________________________________________________________________________________________________
|
7
fix result on feature shape (2,)
test
CHANGED
@@ -100,22 +100,23 @@
|
|
100
100
|
|
101
101
|
flatten (Flatten) (None, 256) 0 ['max_pooling2d_4[0][0]']
|
102
102
|
|
103
|
-
feature_input (InputLayer) [(None,
|
103
|
+
feature_input (InputLayer) [(None, 2)] 0 []
|
104
104
|
|
105
|
-
concatenate (Concatenate) (None, 2
|
105
|
+
concatenate (Concatenate) (None, 258) 0 ['flatten[0][0]',
|
106
106
|
'feature_input[0][0]']
|
107
107
|
|
108
|
-
dropout (Dropout) (None, 2
|
108
|
+
dropout (Dropout) (None, 258) 0 ['concatenate[0][0]']
|
109
109
|
|
110
|
-
dense (Dense) (None, 128) 335
|
110
|
+
dense (Dense) (None, 128) 33152 ['dropout[0][0]']
|
111
111
|
|
112
112
|
dense_1 (Dense) (None, 10) 1290 ['dense[0][0]']
|
113
113
|
|
114
114
|
==================================================================================================
|
115
|
-
Total params: 47,
|
115
|
+
Total params: 47,132
|
116
|
-
Trainable params: 47,
|
116
|
+
Trainable params: 47,132
|
117
117
|
Non-trainable params: 0
|
118
|
+
__________________________________________________________________________________________________
|
118
119
|
```
|
119
|
-

|
120
121
|
|
121
122
|
個人的には,CNNだけを分類予測学習した後,CNNを学習しないようにして特徴量マップ出力と追加情報を合併して予測/分類する方が良いと思います.
|
6
append network summary
test
CHANGED
@@ -69,6 +69,53 @@
|
|
69
69
|
)
|
70
70
|
```
|
71
71
|
基本,分類問題を解かせる場合はTrainable paramsは出力側に行くにつれ減少させるのが常套手段です.カーネルの枚数を徐々に減少させるようにしておきました.また,活性化関数もELUを上回った[Swish](https://arxiv.org/pdf/1710.05941v1.pdf)を推奨しておきます.さらに,カーネルの初期値は`glorot_uniform`なので,ELUやSwishのような[ReLUファミリ用の`he_uniform`](https://arxiv.org/pdf/1502.01852.pdf)を推奨します.
|
72
|
+
```Python:network.summary()
|
73
|
+
Model: "model"
|
74
|
+
__________________________________________________________________________________________________
|
75
|
+
Layer (type) Output Shape Param # Connected to
|
76
|
+
==================================================================================================
|
77
|
+
image_input (InputLayer) [(None, 256, 128, 1 0 []
|
78
|
+
)]
|
79
|
+
|
80
|
+
conv2d (Conv2D) (None, 256, 128, 32 320 ['image_input[0][0]']
|
81
|
+
)
|
82
|
+
|
83
|
+
max_pooling2d (MaxPooling2D) (None, 128, 64, 32) 0 ['conv2d[0][0]']
|
84
|
+
|
85
|
+
conv2d_1 (Conv2D) (None, 128, 64, 23) 6647 ['max_pooling2d[0][0]']
|
86
|
+
|
87
|
+
max_pooling2d_1 (MaxPooling2D) (None, 64, 32, 23) 0 ['conv2d_1[0][0]']
|
88
|
+
|
89
|
+
conv2d_2 (Conv2D) (None, 64, 32, 16) 3328 ['max_pooling2d_1[0][0]']
|
90
|
+
|
91
|
+
max_pooling2d_2 (MaxPooling2D) (None, 32, 16, 16) 0 ['conv2d_2[0][0]']
|
92
|
+
|
93
|
+
conv2d_3 (Conv2D) (None, 32, 16, 11) 1595 ['max_pooling2d_2[0][0]']
|
94
|
+
|
95
|
+
max_pooling2d_3 (MaxPooling2D) (None, 16, 8, 11) 0 ['conv2d_3[0][0]']
|
96
|
+
|
97
|
+
conv2d_4 (Conv2D) (None, 16, 8, 8) 800 ['max_pooling2d_3[0][0]']
|
98
|
+
|
99
|
+
max_pooling2d_4 (MaxPooling2D) (None, 8, 4, 8) 0 ['conv2d_4[0][0]']
|
100
|
+
|
101
|
+
flatten (Flatten) (None, 256) 0 ['max_pooling2d_4[0][0]']
|
102
|
+
|
103
|
+
feature_input (InputLayer) [(None, 5)] 0 []
|
104
|
+
|
105
|
+
concatenate (Concatenate) (None, 261) 0 ['flatten[0][0]',
|
106
|
+
'feature_input[0][0]']
|
107
|
+
|
108
|
+
dropout (Dropout) (None, 261) 0 ['concatenate[0][0]']
|
109
|
+
|
110
|
+
dense (Dense) (None, 128) 33536 ['dropout[0][0]']
|
111
|
+
|
112
|
+
dense_1 (Dense) (None, 10) 1290 ['dense[0][0]']
|
113
|
+
|
114
|
+
==================================================================================================
|
115
|
+
Total params: 47,516
|
116
|
+
Trainable params: 47,516
|
117
|
+
Non-trainable params: 0
|
118
|
+
```
|
72
119
|

|
73
120
|
|
74
121
|
個人的には,CNNだけを分類予測学習した後,CNNを学習しないようにして特徴量マップ出力と追加情報を合併して予測/分類する方が良いと思います.
|
5
append code comments
test
CHANGED
@@ -8,9 +8,9 @@
|
|
8
8
|
import numpy as np
|
9
9
|
|
10
10
|
image_input = Input(shape = (256, 128, 1), name = "image_input")
|
11
|
-
append_input = Input(shape = (2,), name = "feature_input") # 追加情報の特徴量の数だけ
|
11
|
+
append_input = Input(shape = (2,), name = "feature_input") # 追加情報の特徴量の数だけ次元数を指定する
|
12
12
|
|
13
|
-
params = { # 同一のパラメータはまとめて書いておく
|
13
|
+
params = { # 同一のパラメータは まとめて書いておく
|
14
14
|
"kernel_size": (3, 3),
|
15
15
|
"strides": (1, 1),
|
16
16
|
"padding": "same",
|
@@ -37,21 +37,29 @@
|
|
37
37
|
|
38
38
|
network = Model(inputs = [image_input, append_input], outputs = x)
|
39
39
|
|
40
|
+
network.compile(
|
41
|
+
optimizer = 'adam',
|
40
|
-
|
42
|
+
loss = 'categorical_crossentropy',
|
43
|
+
metrics=['accuracy']
|
44
|
+
)
|
41
45
|
|
42
46
|
network.summary()
|
43
47
|
from keras.utils.vis_utils import plot_model
|
44
48
|
plot_model(network, to_file = "CNN.png", rankdir = "LR", show_shapes = True, show_layer_names = True, show_layer_activations = True)
|
45
49
|
|
50
|
+
# 擬似データを用意
|
51
|
+
# 擬似画像 32枚解像度256x128のグレースケール画像
|
46
52
|
train_img = np.random.randn(32, 256, 128, 1)
|
47
|
-
#
|
53
|
+
# train_feautre = np.array([ [x0, y0], [x1, y1], [x2, y2], ..., [xn, yn] ]) となるように与える
|
48
54
|
train_feature = np.random.randn(32, 2)
|
55
|
+
# モデルに出力してほしい値.One-Hot Encoding済であること.
|
49
56
|
train_y = np.abs(np.random.randn(32, 10))
|
57
|
+
|
50
58
|
batch_size = 32
|
51
59
|
epochs = 10
|
52
60
|
|
53
61
|
network.fit(
|
54
|
-
x = { # レイヤの名前と一致させる
|
62
|
+
x = { # keyはレイヤの名前と一致させる
|
55
63
|
"image_input": train_img,
|
56
64
|
"feature_input": train_feature
|
57
65
|
},
|
4
fix answer
test
CHANGED
@@ -8,7 +8,7 @@
|
|
8
8
|
import numpy as np
|
9
9
|
|
10
10
|
image_input = Input(shape = (256, 128, 1), name = "image_input")
|
11
|
-
append_input = Input(shape = (
|
11
|
+
append_input = Input(shape = (2,), name = "feature_input") # 追加情報の特徴量の数だけ
|
12
12
|
|
13
13
|
params = { # 同一のパラメータはまとめて書いておく
|
14
14
|
"kernel_size": (3, 3),
|
@@ -44,7 +44,8 @@
|
|
44
44
|
plot_model(network, to_file = "CNN.png", rankdir = "LR", show_shapes = True, show_layer_names = True, show_layer_activations = True)
|
45
45
|
|
46
46
|
train_img = np.random.randn(32, 256, 128, 1)
|
47
|
+
# 擬似的に train_feautre = np.array([ [x0, y0], [x1, y1], [x2, y2], ..., [xn, yn] ])
|
47
|
-
train_feature = np.random.randn(32,
|
48
|
+
train_feature = np.random.randn(32, 2)
|
48
49
|
train_y = np.abs(np.random.randn(32, 10))
|
49
50
|
batch_size = 32
|
50
51
|
epochs = 10
|
3
fix answer
test
CHANGED
@@ -59,7 +59,7 @@
|
|
59
59
|
epochs = epochs
|
60
60
|
)
|
61
61
|
```
|
62
|
-
基本,分類問題を解かせる場合はTrainable paramsは出力側に行くにつれ減少させるのが
|
62
|
+
基本,分類問題を解かせる場合はTrainable paramsは出力側に行くにつれ減少させるのが常套手段です.カーネルの枚数を徐々に減少させるようにしておきました.また,活性化関数もELUを上回った[Swish](https://arxiv.org/pdf/1710.05941v1.pdf)を推奨しておきます.さらに,カーネルの初期値は`glorot_uniform`なので,ELUやSwishのような[ReLUファミリ用の`he_uniform`](https://arxiv.org/pdf/1502.01852.pdf)を推奨します.
|
63
63
|

|
64
64
|
|
65
65
|
個人的には,CNNだけを分類予測学習した後,CNNを学習しないようにして特徴量マップ出力と追加情報を合併して予測/分類する方が良いと思います.
|
2
fix code
test
CHANGED
@@ -10,26 +10,34 @@
|
|
10
10
|
image_input = Input(shape = (256, 128, 1), name = "image_input")
|
11
11
|
append_input = Input(shape = (5), name = "feature_input") # 追加情報の特徴量の数だけ
|
12
12
|
|
13
|
+
params = { # 同一のパラメータはまとめて書いておく
|
14
|
+
"kernel_size": (3, 3),
|
15
|
+
"strides": (1, 1),
|
16
|
+
"padding": "same",
|
17
|
+
"activation": "swish", # 連続関数を利用する
|
13
|
-
|
18
|
+
"kernel_initializer": "he_normal" # ReLUファミリ用の活性化関数を利用する
|
19
|
+
}
|
20
|
+
|
21
|
+
x = Conv2D(filters=32, **params)(image_input)
|
14
22
|
x = MaxPool2D(pool_size=(2,2), strides=None, padding='same')(x)
|
15
|
-
x = Conv2D(filters=
|
23
|
+
x = Conv2D(filters=23, **params)(x)
|
16
24
|
x = MaxPool2D(pool_size=(2,2), strides=None, padding='same')(x)
|
17
|
-
x = Conv2D(filters=16,
|
25
|
+
x = Conv2D(filters=16, **params)(x)
|
18
26
|
x = MaxPool2D(pool_size=(2,2), strides=None, padding='same')(x)
|
19
|
-
x = Conv2D(filters=1
|
27
|
+
x = Conv2D(filters=11, **params)(x)
|
20
28
|
x = MaxPool2D(pool_size=(2,2), strides=None, padding='same')(x)
|
21
|
-
x = Conv2D(filters=
|
29
|
+
x = Conv2D(filters=8, **params)(x)
|
22
30
|
x = MaxPool2D(pool_size=(2,2), strides=None, padding='same')(x)
|
23
31
|
|
24
32
|
x = Flatten()(x)
|
25
33
|
x = concatenate([x, append_input])
|
26
34
|
x = Dropout(0.5)(x)
|
27
|
-
x = Dense(128, activation='elu')(x)
|
35
|
+
x = Dense(128, activation='swish', kernel_initializer = 'he_uniform')(x)
|
28
36
|
x = Dense(10, activation='softmax')(x)
|
29
37
|
|
30
38
|
network = Model(inputs = [image_input, append_input], outputs = x)
|
31
39
|
|
32
|
-
network.compile(optimizer='
|
40
|
+
network.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
|
33
41
|
|
34
42
|
network.summary()
|
35
43
|
from keras.utils.vis_utils import plot_model
|
@@ -37,17 +45,21 @@
|
|
37
45
|
|
38
46
|
train_img = np.random.randn(32, 256, 128, 1)
|
39
47
|
train_feature = np.random.randn(32, 5, 1)
|
40
|
-
train_y = np.random.randn(32, 10)
|
48
|
+
train_y = np.abs(np.random.randn(32, 10))
|
41
49
|
batch_size = 32
|
42
50
|
epochs = 10
|
43
51
|
|
44
52
|
network.fit(
|
53
|
+
x = { # レイヤの名前と一致させる
|
54
|
+
"image_input": train_img,
|
45
|
-
|
55
|
+
"feature_input": train_feature
|
56
|
+
},
|
46
57
|
y = train_y,
|
47
58
|
batch_size = batch_size,
|
48
59
|
epochs = epochs
|
49
60
|
)
|
50
61
|
```
|
62
|
+
基本,分類問題を解かせる場合はTrainable paramsは出力側に行くにつれ減少させるのがセオリーです.カーネルフィルタの枚数を徐々に減少させるようにしておきました.また,活性化関数もELUを上回ったSwishを推奨しておきます.さらに,カーネルフィルタの初期値は`glorot_uniform`なので,ELUやSwishのようなReLUファミリ用の`he_uniform`を推奨します.
|
51
|
-

|
52
64
|
|
53
65
|
個人的には,CNNだけを分類予測学習した後,CNNを学習しないようにして特徴量マップ出力と追加情報を合併して予測/分類する方が良いと思います.
|
1
fix code
test
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
from keras.models import Model
|
6
6
|
from keras.layers import Conv2D, Input, Flatten, Dense, MaxPool2D, Dropout, concatenate
|
7
7
|
from keras.optimizers import Adam
|
8
|
+
import numpy as np
|
8
9
|
|
9
10
|
image_input = Input(shape = (256, 128, 1), name = "image_input")
|
10
11
|
append_input = Input(shape = (5), name = "feature_input") # 追加情報の特徴量の数だけ
|
@@ -32,11 +33,17 @@
|
|
32
33
|
|
33
34
|
network.summary()
|
34
35
|
from keras.utils.vis_utils import plot_model
|
35
|
-
plot_model(network, to_file = "CNN.png", show_shapes = True, show_layer_names = True, show_layer_activations = True)
|
36
|
+
plot_model(network, to_file = "CNN.png", rankdir = "LR", show_shapes = True, show_layer_names = True, show_layer_activations = True)
|
37
|
+
|
38
|
+
train_img = np.random.randn(32, 256, 128, 1)
|
39
|
+
train_feature = np.random.randn(32, 5, 1)
|
40
|
+
train_y = np.random.randn(32, 10)
|
41
|
+
batch_size = 32
|
42
|
+
epochs = 10
|
36
43
|
|
37
44
|
network.fit(
|
38
|
-
x =
|
45
|
+
x = {"image_input": train_img, "feature_input": train_feature},
|
39
|
-
y = tri
|
46
|
+
y = train_y,
|
40
47
|
batch_size = batch_size,
|
41
48
|
epochs = epochs
|
42
49
|
)
|