質問編集履歴

1

code delete

2018/03/12 17:17

投稿

yasu89
yasu89

スコア13

test CHANGED
File without changes
test CHANGED
@@ -10,295 +10,7 @@
10
10
 
11
11
 
12
12
 
13
- ```python
14
13
 
15
- import numpy as np
16
-
17
- import tensorflow as tf
18
-
19
- import csv
20
-
21
- import csv_decode as csvd
22
-
23
- from matplotlib import pyplot as plt
24
-
25
- import cv2
26
-
27
- import os
28
-
29
- import sys
30
-
31
-
32
-
33
- # define input data
34
-
35
- # [batch_size, height, width, channel]
36
-
37
- img_input = tf.placeholder(tf.float32, [None, 224, 224, 3])
38
-
39
-
40
-
41
- # conv layer 1
42
-
43
- # [height, width, channel, number of filter]
44
-
45
- block1_f1 = tf.Variable(tf.truncated_normal([5, 5, 3, 64], stddev=0.1), name='block1_f1')
46
-
47
- # [batch direction, height direction, width direction, channel direction]
48
-
49
- block1_conv1 = tf.nn.conv2d(img_input, block1_f1, strides=[1, 1, 1, 1], padding='SAME', name='block1_conv1')
50
-
51
- block1_b1 = tf.Variable(tf.constant(0.1, shape=[64]), name='block1_b1')
52
-
53
- block1_h_conv1 = tf.nn.relu(block1_conv1 + block1_b1)
54
-
55
-
56
-
57
- # conv layer 2
58
-
59
- block1_f2 = tf.Variable(tf.truncated_normal([5, 5, 64, 64], stddev=0.1), name='block1_f2')
60
-
61
- block1_conv2 = tf.nn.conv2d(block1_h_conv1, block1_f2, strides=[1, 1, 1, 1], padding='SAME', name='block1_conv2')
62
-
63
- block1_b2 = tf.Variable(tf.constant(0.1, shape=[64]), name='block1_b2')
64
-
65
- block1_h_conv2 = tf.nn.relu(block1_conv2 + block1_b2)
66
-
67
-
68
-
69
- # pooling layer 1
70
-
71
- # [batch direction, height direction, width direction, channel direction]
72
-
73
- block1_pool = tf.nn.max_pool(block1_h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
74
-
75
-
76
-
77
- # to the flat tensor
78
-
79
- flatten = tf.reshape(block1_pool, [-1, 112*112*64])
80
-
81
-
82
-
83
- # fully connected layer 1
84
-
85
- w_fc1 = tf.Variable(tf.truncated_normal([112*112*64, 4096], stddev=0.1), name='w_fc1')
86
-
87
- b_fc1 = tf.Variable(tf.constant(0.1, shape=[4096]), name='b_fc1')
88
-
89
- h_fc1 = tf.nn.relu(tf.matmul(flatten, w_fc1) + b_fc1)
90
-
91
-
92
-
93
- # fully connected layer 2
94
-
95
- w_fc2 = tf.Variable(tf.truncated_normal([4096, 4096], stddev=0.1), name='w_fc2')
96
-
97
- b_fc2 = tf.Variable(tf.constant(0.1, shape=[4096]), name='b_fc2')
98
-
99
- h_fc2 = tf.nn.relu(tf.matmul(h_fc1, w_fc2) + b_fc2)
100
-
101
-
102
-
103
- # output layer
104
-
105
- # class = ['barcode', 'tag'] thus [4096, 2]
106
-
107
- w_output = tf.Variable(tf.truncated_normal([4096, 2], stddev=0.1), name='w_output')
108
-
109
- b_output = tf.Variable(tf.constant(0.1, shape=[2]), name='b_output')
110
-
111
- out = tf.nn.softmax(tf.matmul(h_fc2, w_output) + b_output)
112
-
113
-
114
-
115
- # define training data
116
-
117
- # class = ['barcode', 'tag'] thus [None, 2]
118
-
119
- y = tf.placeholder(tf.float32, [None, 2])
120
-
121
- # loss function: cross entropy
122
-
123
- loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(out + 1e-5), axis=[1]))
124
-
125
-
126
-
127
- # training
128
-
129
- train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
130
-
131
-
132
-
133
- # evaluation
134
-
135
- correct = tf.equal(tf.argmax(out, 1), tf.argmax(y, 1))
136
-
137
- # tf.reduce_mean: average calculate (ex)[0,1,1] --> 2/3
138
-
139
- accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
140
-
141
-
142
-
143
- # initialization
144
-
145
- init = tf.global_variables_initializer()
146
-
147
-
148
-
149
-
150
-
151
- with tf.Session() as sess:
152
-
153
- sess.run(init)
154
-
155
- print('initilize now')
156
-
157
-
158
-
159
- # load images: (height, width, channel)
160
-
161
- """ img = [[[R,G,B],[R,G,B],...],
162
-
163
- [[R,G,B],[R,G,B],...],
164
-
165
- [...................],
166
-
167
- [...................],
168
-
169
- [...................]]"""
170
-
171
- files_train = os.listdir('images/train')
172
-
173
- img_train = []
174
-
175
- normalize = np.ones((224,224,3)) * 255.0
176
-
177
- for file in files_train:
178
-
179
- img = plt.imread('images/train/' + file)
180
-
181
- img = cv2.resize(img, (224,224))
182
-
183
- img = img / normalize
184
-
185
- img_train.append(img)
186
-
187
- print('loading train images now')
188
-
189
-
190
-
191
- print('loading train csv file now')
192
-
193
- labels_train_csv = csvd.read_csv('data/train_labels.csv')
194
-
195
- # labels = [['IMG_6188.JPG', 'barcode'],...]
196
-
197
- labels_train = csvd.fname_and_label(labels_train_csv)
198
-
199
- labels_one_hot_train = []
200
-
201
- for i in range(len(labels_train)):
202
-
203
- labels_one_hot_train.append([0, 0])
204
-
205
- print('making train one-hot labels of all zero')
206
-
207
- for i in range(len(labels_train)):
208
-
209
- if labels_train[i][1] == 'barcode':
210
-
211
- labels_one_hot_train[i][0] = 1
212
-
213
- if labels_train[i][1] == 'tag':
214
-
215
- labels_one_hot_train[i][1] = 1
216
-
217
- print('making true train one-hot labels')
218
-
219
-
220
-
221
- # load images: (height, width, channel)
222
-
223
- """ img = [[[R,G,B],[R,G,B],...],
224
-
225
- [[R,G,B],[R,G,B],...],
226
-
227
- [...................],
228
-
229
- [...................],
230
-
231
- [...................]]"""
232
-
233
- files_test = os.listdir('images/test')
234
-
235
- img_test = []
236
-
237
- for file in files_test:
238
-
239
- img = plt.imread('images/test/' + file)
240
-
241
- img = cv2.resize(img, (224,224))
242
-
243
- img = img / normalize
244
-
245
- img_test.append(img)
246
-
247
- print('loading test images now')
248
-
249
-
250
-
251
- print('loading test csv file now')
252
-
253
- labels_test_csv = csvd.read_csv('data/test_labels.csv')
254
-
255
- # labels = [['IMG_6188.JPG', 'barcode'],...]
256
-
257
- labels_test = csvd.fname_and_label(labels_test_csv)
258
-
259
- labels_one_hot_test = []
260
-
261
- for i in range(len(labels_test)):
262
-
263
- labels_one_hot_test.append([0, 0])
264
-
265
- print('making test one-hot labels of all zero')
266
-
267
- for i in range(len(labels_test)):
268
-
269
- if labels_test[i][1] == 'barcode':
270
-
271
- labels_one_hot_test[i][0] = 1
272
-
273
- if labels_test[i][1] == 'tag':
274
-
275
- labels_one_hot_test[i][1] = 1
276
-
277
- print('making true test one-hot labels')
278
-
279
-
280
-
281
- for step in range(1000):
282
-
283
- for i in range(len(img_train)):
284
-
285
- sess.run(train_step, feed_dict={img_input:img_train[i], y:labels_one_hot_train[i]})
286
-
287
- print('training now: ', i, ' steps')
288
-
289
-
290
-
291
- acc_val = 0
292
-
293
- for i in range(len(img_test)):
294
-
295
- acc_val += sess.run(accuracy, feed_dict={img_input:img_test[i], y:labels_one_hot_test[i]})
296
-
297
- acc_val = acc_val / len(img_test)
298
-
299
- print('step: ', step+1, ' accuracy: ', acc_val)
300
-
301
- ```
302
14
 
303
15
  実行時のコマンドプロンプトは以下のように表示されたきり動きません。
304
16