質問編集履歴
1
コードの全文を付け足しました。
test
CHANGED
File without changes
|
test
CHANGED
@@ -73,3 +73,269 @@
|
|
73
73
|
print('accuracy:', accuracy)
|
74
74
|
|
75
75
|
```
|
76
|
+
|
77
|
+
|
78
|
+
|
79
|
+
|
80
|
+
|
81
|
+
クラス内のコードの全文です。
|
82
|
+
|
83
|
+
```Tensorflow
|
84
|
+
|
85
|
+
|
86
|
+
|
87
|
+
# activation = [0, 1, 0 ,1]
|
88
|
+
|
89
|
+
# 0 : ReLU, 1 : sigmoid
|
90
|
+
|
91
|
+
class MLP(object):
|
92
|
+
|
93
|
+
def __init__(self, input_n, output_n, hidden_n, func_n):
|
94
|
+
|
95
|
+
self.input_n = input_n
|
96
|
+
|
97
|
+
self.output_n = output_n
|
98
|
+
|
99
|
+
self.hidden_n = hidden_n
|
100
|
+
|
101
|
+
self.weights = []
|
102
|
+
|
103
|
+
self.biases = []
|
104
|
+
|
105
|
+
self.func_n = func_n
|
106
|
+
|
107
|
+
self._x = None
|
108
|
+
|
109
|
+
self._t = None
|
110
|
+
|
111
|
+
self._sess = None
|
112
|
+
|
113
|
+
self._history = {
|
114
|
+
|
115
|
+
'accuracy' : [],
|
116
|
+
|
117
|
+
'loss' : []
|
118
|
+
|
119
|
+
}
|
120
|
+
|
121
|
+
|
122
|
+
|
123
|
+
|
124
|
+
|
125
|
+
def weight_variable(self, shape):
|
126
|
+
|
127
|
+
initial = tf.Variable(tf.random_normal(shape, stddev=0.01))
|
128
|
+
|
129
|
+
return initial
|
130
|
+
|
131
|
+
|
132
|
+
|
133
|
+
def bias_variable(self, shape):
|
134
|
+
|
135
|
+
initial = tf.Variable(tf.zeros(shape))
|
136
|
+
|
137
|
+
return initial
|
138
|
+
|
139
|
+
|
140
|
+
|
141
|
+
def function(self, x, func):
|
142
|
+
|
143
|
+
if func == 0:
|
144
|
+
|
145
|
+
return tf.nn.relu(x)
|
146
|
+
|
147
|
+
if func == 1:
|
148
|
+
|
149
|
+
return tf.nn.sigmoi(x)
|
150
|
+
|
151
|
+
|
152
|
+
|
153
|
+
def inference(self, x):
|
154
|
+
|
155
|
+
# input-layer - hidden-layer, hidden-layer - hidden-layer
|
156
|
+
|
157
|
+
for i, hidden in enumerate(self.hidden_n):
|
158
|
+
|
159
|
+
if i == 0:
|
160
|
+
|
161
|
+
inp = x
|
162
|
+
|
163
|
+
input_dim = self.input_n
|
164
|
+
|
165
|
+
else:
|
166
|
+
|
167
|
+
inp = output
|
168
|
+
|
169
|
+
input_dim = self.hidden_n[i-1]
|
170
|
+
|
171
|
+
|
172
|
+
|
173
|
+
self.weights.append(self.weight_variable([input_dim, hidden]))
|
174
|
+
|
175
|
+
self.biases.append(self.bias_variable([hidden]))
|
176
|
+
|
177
|
+
|
178
|
+
|
179
|
+
h = tf.matmul(inp, self.weights[-1]) + self.biases[-1]
|
180
|
+
|
181
|
+
output = self.function(h, self.func_n[i])
|
182
|
+
|
183
|
+
|
184
|
+
|
185
|
+
self.weights.append(self.weight_variable([self.hidden_n[-1], self.output_n]))
|
186
|
+
|
187
|
+
self.biases.append(self.bias_variable([self.output_n]))
|
188
|
+
|
189
|
+
|
190
|
+
|
191
|
+
y = tf.nn.softmax(tf.matmul(output, self.weights[-1]) + self.biases[-1])
|
192
|
+
|
193
|
+
|
194
|
+
|
195
|
+
return y
|
196
|
+
|
197
|
+
|
198
|
+
|
199
|
+
|
200
|
+
|
201
|
+
def loss(self, y, t):
|
202
|
+
|
203
|
+
loss = tf.reduce_mean(-tf.reduce_sum(
|
204
|
+
|
205
|
+
t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), reduction_indices=[1]))
|
206
|
+
|
207
|
+
return loss
|
208
|
+
|
209
|
+
|
210
|
+
|
211
|
+
def training(self, loss):
|
212
|
+
|
213
|
+
optimizer = tf.train.GradientDescentOptimizer(0.01)
|
214
|
+
|
215
|
+
train_step = optimizer.minimize(loss)
|
216
|
+
|
217
|
+
return(loss)
|
218
|
+
|
219
|
+
|
220
|
+
|
221
|
+
def accuracy(self, y, t):
|
222
|
+
|
223
|
+
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(t, 1))
|
224
|
+
|
225
|
+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
226
|
+
|
227
|
+
return accuracy
|
228
|
+
|
229
|
+
|
230
|
+
|
231
|
+
def fit(self, X_train, Y_train, epochs=100, batch_size=100):
|
232
|
+
|
233
|
+
x = tf.placeholder(tf.float32, [None, self.input_n])
|
234
|
+
|
235
|
+
t = tf.placeholder(tf.float32, [None, self.output_n])
|
236
|
+
|
237
|
+
|
238
|
+
|
239
|
+
# save for evaluate()
|
240
|
+
|
241
|
+
self._x = x
|
242
|
+
|
243
|
+
self._t = t
|
244
|
+
|
245
|
+
|
246
|
+
|
247
|
+
y = self.inference(x)
|
248
|
+
|
249
|
+
loss = self.loss(y, t)
|
250
|
+
|
251
|
+
train_step = self.training(loss)
|
252
|
+
|
253
|
+
accuracy = self.accuracy(y, t)
|
254
|
+
|
255
|
+
|
256
|
+
|
257
|
+
init = tf.global_variables_initializer()
|
258
|
+
|
259
|
+
sess = tf.Session()
|
260
|
+
|
261
|
+
sess.run(init)
|
262
|
+
|
263
|
+
|
264
|
+
|
265
|
+
# save for evaluate()
|
266
|
+
|
267
|
+
self._sess = sess
|
268
|
+
|
269
|
+
|
270
|
+
|
271
|
+
N_train = len(X_train)
|
272
|
+
|
273
|
+
n_batches = N_train // batch_size
|
274
|
+
|
275
|
+
|
276
|
+
|
277
|
+
for epoch in range(epochs):
|
278
|
+
|
279
|
+
X_, Y_ = shuffle(X_train, Y_train)
|
280
|
+
|
281
|
+
|
282
|
+
|
283
|
+
for i in range(n_batches):
|
284
|
+
|
285
|
+
start = i * batch_size
|
286
|
+
|
287
|
+
end = start + batch_size
|
288
|
+
|
289
|
+
|
290
|
+
|
291
|
+
sess.run(train_step, feed_dict={
|
292
|
+
|
293
|
+
x : X_train[start:end],
|
294
|
+
|
295
|
+
t : Y_train[start:end]
|
296
|
+
|
297
|
+
})
|
298
|
+
|
299
|
+
loss_ = loss.eval(session=sess, feed_dict={
|
300
|
+
|
301
|
+
x : X_train,
|
302
|
+
|
303
|
+
t : Y_train
|
304
|
+
|
305
|
+
})
|
306
|
+
|
307
|
+
accuracy_ = accuracy.eval(session=sess, feed_dict={
|
308
|
+
|
309
|
+
x : X_train,
|
310
|
+
|
311
|
+
t : Y_train
|
312
|
+
|
313
|
+
})
|
314
|
+
|
315
|
+
|
316
|
+
|
317
|
+
# save to _history
|
318
|
+
|
319
|
+
self._history['loss'].append(loss_)
|
320
|
+
|
321
|
+
self._history['accuracy'].append(accuracy_)
|
322
|
+
|
323
|
+
|
324
|
+
|
325
|
+
return self._history
|
326
|
+
|
327
|
+
|
328
|
+
|
329
|
+
|
330
|
+
|
331
|
+
def evaluate(self, X_test, Y_test):
|
332
|
+
|
333
|
+
return self.accuracy.eval(session=self._sess, feed_dict={
|
334
|
+
|
335
|
+
self._x : X_test,
|
336
|
+
|
337
|
+
self._t : Y_test
|
338
|
+
|
339
|
+
})
|
340
|
+
|
341
|
+
```
|