質問編集履歴

1

コード修正しました

2017/07/09 11:20

投稿

zakio49
zakio49

スコア29

test CHANGED
@@ -1 +1 @@
1
- tensorflow csvのデータ形式
1
+ tensorflow shapeの整え方をお尋ねしたいです。csvのデータ形式
test CHANGED
@@ -4,6 +4,8 @@
4
4
 
5
5
  今のcsv
6
6
 
7
+ rank 1 shape [1,12]
8
+
7
9
  ````
8
10
 
9
11
  [ 0.71428573, 0.85714287, 0.71428573, 0.5714286 , 0.5714286 ,
@@ -14,7 +16,9 @@
14
16
 
15
17
  ````
16
18
 
19
+ こう変えることでshape[12,1,1] shape[1]という要求を満たせるですか?
20
+
17
- こう変える
21
+ お知恵を貸してください!
18
22
 
19
23
  ````
20
24
 
@@ -37,3 +41,229 @@
37
41
  0.5714286 , 0.71428573], dtype=float32)] - got shape [1, 12], but wanted [1].
38
42
 
39
43
  ````
44
+
45
+ その他のコード(問題なのはsession2のほうです)
46
+
47
+
48
+
49
+ ```
50
+
51
+
52
+
53
+ import tensorflow as tf
54
+
55
+ import numpy
56
+
57
+ import os
58
+
59
+
60
+
61
+ cwd = os.getcwd()
62
+
63
+
64
+
65
+ SCORE_SIZE = 12
66
+
67
+ HIDDEN_UNIT_SIZE = 40
68
+
69
+ TRAIN_DATA_SIZE = 45
70
+
71
+ TACK = 1
72
+
73
+ raw_input = numpy.loadtxt(open("test.csv"), delimiter=",")
74
+
75
+ [tensor, score] = numpy.hsplit(raw_input, [1])
76
+
77
+ [tensor_train, tensor_test] = numpy.vsplit(tensor, [TRAIN_DATA_SIZE])
78
+
79
+ [score_train, score_test] = numpy.vsplit(score, [TRAIN_DATA_SIZE])
80
+
81
+ print(score_test)
82
+
83
+ #tensorは正解データtrainは学習モデル、scoreは学習データ、testは実データ
84
+
85
+
86
+
87
+ def inference(score_placeholder):
88
+
89
+ with tf.name_scope('hidden1') as scope:
90
+
91
+ hidden1_weight = tf.Variable(tf.truncated_normal([SCORE_SIZE, HIDDEN_UNIT_SIZE], stddev=0.01), name="hidden1_weight")
92
+
93
+ hidden1_bias = tf.Variable(tf.constant(0.1, shape=[HIDDEN_UNIT_SIZE]), name="hidden1_bias")
94
+
95
+ hidden1_output = tf.nn.relu(tf.matmul(score_placeholder, hidden1_weight) + hidden1_bias)
96
+
97
+ with tf.name_scope('output') as scope:
98
+
99
+ output_weight = tf.Variable(tf.truncated_normal([HIDDEN_UNIT_SIZE, 1], stddev=0.01), name="output_weight")
100
+
101
+ output_bias = tf.Variable(tf.constant(0.1, shape=[1]), name="output_bias")
102
+
103
+ output = tf.matmul(hidden1_output, output_weight) + output_bias
104
+
105
+ if TACK != 1:
106
+
107
+ print("saku1")
108
+
109
+ print(output)
110
+
111
+ else:
112
+
113
+ print("saku2")
114
+
115
+
116
+
117
+ return tf.nn.l2_normalize(output, 0)
118
+
119
+
120
+
121
+ def loss(output, tensor_placeholder, loss_label_placeholder):
122
+
123
+ with tf.name_scope('loss') as scope:
124
+
125
+ loss = tf.nn.l2_loss(output - tf.nn.l2_normalize(tensor_placeholder, 0))
126
+
127
+ tf.summary.scalar('loss_label_placeholder', loss)
128
+
129
+ return loss
130
+
131
+
132
+
133
+ def training(loss):
134
+
135
+ with tf.name_scope('training') as scope:
136
+
137
+ train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
138
+
139
+ return train_step
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+ with tf.Graph().as_default():
148
+
149
+ tensor_placeholder = tf.placeholder(tf.float32, [None, 1], name="tensor_placeholder")
150
+
151
+ score_placeholder = tf.placeholder(tf.float32, [None, SCORE_SIZE], name="score_placeholder")
152
+
153
+ loss_label_placeholder = tf.placeholder("string", name="loss_label_placeholder")
154
+
155
+
156
+
157
+ feed_dict_train={
158
+
159
+ tensor_placeholder: tensor_train,
160
+
161
+ score_placeholder: score_train,
162
+
163
+ loss_label_placeholder: "loss_train"
164
+
165
+ }
166
+
167
+
168
+
169
+ feed_dict_test={
170
+
171
+ tensor_placeholder: tensor_test,
172
+
173
+ score_placeholder: score_test,
174
+
175
+ loss_label_placeholder: "loss_test"
176
+
177
+ }
178
+
179
+
180
+
181
+ output = inference(score_placeholder)
182
+
183
+ loss = loss(output, tensor_placeholder, loss_label_placeholder)
184
+
185
+ training_op = training(loss)
186
+
187
+ summary_op = tf.summary.merge_all()
188
+
189
+ init = tf.global_variables_initializer()
190
+
191
+ best_loss = float("inf")
192
+
193
+
194
+
195
+ with tf.Session() as sess:
196
+
197
+ summary_writer = tf.summary.FileWriter('data', graph_def=sess.graph_def)
198
+
199
+ sess.run(init)
200
+
201
+ for step in range(10000):
202
+
203
+ sess.run(training_op, feed_dict=feed_dict_train)
204
+
205
+ loss_test = sess.run(loss, feed_dict=feed_dict_test)
206
+
207
+ if loss_test < best_loss:
208
+
209
+ best_loss = loss_test
210
+
211
+ best_match = sess.run(output, feed_dict=feed_dict_test)
212
+
213
+ #if step % 100 == 0:
214
+
215
+ # summary_str = sess.run(summary_op, feed_dict=feed_dict_test)
216
+
217
+ # summary_str += sess.run(summary_op, feed_dict=feed_dict_train)
218
+
219
+ # summary_writer.add_summary(summary_str, step)
220
+
221
+
222
+
223
+ saver=tf.train.Saver()
224
+
225
+ saver.save(sess,cwd+'/model.ckpt')
226
+
227
+ print(cwd)
228
+
229
+ print(best_match)
230
+
231
+ print('Saved a model.')
232
+
233
+ sess.close()
234
+
235
+
236
+
237
+ with tf.Session() as sess2:
238
+
239
+ #変数の読み込み
240
+
241
+ summary_writer = tf.summary.FileWriter('data', graph=sess2.graph)
242
+
243
+ #sess2.run(init)
244
+
245
+ #新しいデータ
246
+
247
+ TRAIN_DATA_SIZE2 = 0
248
+
249
+ test2 = numpy.loadtxt(open("one_record.csv"), delimiter=",").astype(numpy.float32)
250
+
251
+ score3 = [test2]
252
+
253
+ print(score3)
254
+
255
+ saver = tf.train.Saver()
256
+
257
+ cwd = os.getcwd()
258
+
259
+ saver.restore(sess2,cwd + "/model.ckpt")
260
+
261
+ best_match2 = sess2.run(inference(score3))
262
+
263
+ print(best_match2)
264
+
265
+ print("fin")
266
+
267
+ sess2.close()
268
+
269
+ ```