質問編集履歴
7
動作コード追記
test
CHANGED
File without changes
|
test
CHANGED
@@ -1,5 +1,137 @@
|
|
1
1
|
下記コードで勾配降下法で定数a,b,cの近似値を求めようと考えています。
|
2
2
|
|
3
|
+
|
4
|
+
|
5
|
+
追記:描きコードに酒精したところ動作はしましたが、勾配降下方による予測値は期待値の半分程度の値をとっています。cはゼロ値になっており収束してゼロなのかこれから確認します。
|
6
|
+
|
7
|
+
|
8
|
+
|
9
|
+
```lang-python
|
10
|
+
|
11
|
+
# Imports
|
12
|
+
|
13
|
+
import tensorflow as tf
|
14
|
+
|
15
|
+
import numpy as np
|
16
|
+
|
17
|
+
import xlrd
|
18
|
+
|
19
|
+
|
20
|
+
|
21
|
+
# Gradient Update Rate
|
22
|
+
|
23
|
+
gradient_rate=0.001
|
24
|
+
|
25
|
+
|
26
|
+
|
27
|
+
# Placeholders
|
28
|
+
|
29
|
+
Ta = tf.placeholder(tf.float32, name='Ta')
|
30
|
+
|
31
|
+
Tb = tf.placeholder(tf.float32, name='Tb')
|
32
|
+
|
33
|
+
|
34
|
+
|
35
|
+
a = tf.Variable(tf.zeros([1, 1]))
|
36
|
+
|
37
|
+
b = tf.Variable(tf.zeros([1, 1]))
|
38
|
+
|
39
|
+
c = tf.Variable(tf.zeros([1, 1]))
|
40
|
+
|
41
|
+
|
42
|
+
|
43
|
+
def inference(Ta, Tb):
|
44
|
+
|
45
|
+
Z = (a * Ta + b) * tf.exp(Tb - c * Ta)
|
46
|
+
|
47
|
+
return Z
|
48
|
+
|
49
|
+
|
50
|
+
|
51
|
+
def loss(logit, label):
|
52
|
+
|
53
|
+
loss = tf.reduce_mean(tf.square(logit - label))
|
54
|
+
|
55
|
+
return loss
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
def training(loss, gradient_rate):
|
60
|
+
|
61
|
+
optimizer = tf.train.GradientDescentOptimizer(gradient_rate)
|
62
|
+
|
63
|
+
train_op = optimizer.minimize(loss)
|
64
|
+
|
65
|
+
return train_op
|
66
|
+
|
67
|
+
|
68
|
+
|
69
|
+
# Read Excel File
|
70
|
+
|
71
|
+
book = xlrd.open_workbook('test.xlsx')
|
72
|
+
|
73
|
+
sheet = book.sheet_by_index(0)
|
74
|
+
|
75
|
+
|
76
|
+
|
77
|
+
# Model Processing
|
78
|
+
|
79
|
+
for i in range(sheet.ncols):
|
80
|
+
|
81
|
+
Ta = float(sheet.cell_value(i, 0))
|
82
|
+
|
83
|
+
Tb = float(sheet.cell_value(i, 1))
|
84
|
+
|
85
|
+
print("%d: Ta: %f, Tb: %f" % (i, Ta, Tb))
|
86
|
+
|
87
|
+
|
88
|
+
|
89
|
+
# Inference Model.
|
90
|
+
|
91
|
+
logit = inference(Ta, Tb)
|
92
|
+
|
93
|
+
|
94
|
+
|
95
|
+
label = float(sheet.cell_value(i, 2))
|
96
|
+
|
97
|
+
|
98
|
+
|
99
|
+
# Loss Calculation.
|
100
|
+
|
101
|
+
loss = loss(logit, label)
|
102
|
+
|
103
|
+
|
104
|
+
|
105
|
+
train_op = training(loss, gradient_rate)
|
106
|
+
|
107
|
+
|
108
|
+
|
109
|
+
# Model Running
|
110
|
+
|
111
|
+
sess = tf.Session()
|
112
|
+
|
113
|
+
sess.run(tf.global_variables_initializer())
|
114
|
+
|
115
|
+
sess.run(logit)
|
116
|
+
|
117
|
+
sess.run(loss)
|
118
|
+
|
119
|
+
sess.run(train_op)
|
120
|
+
|
121
|
+
|
122
|
+
|
123
|
+
print(sess.run(a), sess.run(b), sess.run(c))
|
124
|
+
|
125
|
+
```
|
126
|
+
|
127
|
+
|
128
|
+
|
129
|
+
|
130
|
+
|
131
|
+
|
132
|
+
|
133
|
+
|
134
|
+
|
3
135
|
SDGでもバッチでも良いのですが、エラー:
|
4
136
|
|
5
137
|
ValueError: No variables to optimize.
|
6
期待値追加
test
CHANGED
File without changes
|
test
CHANGED
@@ -18,7 +18,7 @@
|
|
18
18
|
|
19
19
|
トレースバックを下記に示します。
|
20
20
|
|
21
|
-
入力値
|
21
|
+
入力値TaとTbに対する期待値は例えば、下記の場合12.855です。
|
22
22
|
|
23
23
|
|
24
24
|
|
5
tracebackの再ペースト
test
CHANGED
File without changes
|
test
CHANGED
@@ -24,13 +24,29 @@
|
|
24
24
|
|
25
25
|
```lang-python
|
26
26
|
|
27
|
+
>>>> Ta <<<<
|
28
|
+
|
29
|
+
13.521899999999986
|
30
|
+
|
31
|
+
>>>> Tb <<<<
|
32
|
+
|
33
|
+
56.5643
|
34
|
+
|
35
|
+
>>>> LOGIT <<<<
|
36
|
+
|
37
|
+
Tensor("mul_2:0", dtype=float32)
|
38
|
+
|
39
|
+
>>>> LOSS <<<<
|
40
|
+
|
41
|
+
Tensor("Mean:0", dtype=float32)
|
42
|
+
|
27
43
|
Traceback (most recent call last):
|
28
44
|
|
29
|
-
File "fitting.py", line 5
|
45
|
+
File "fitting_question.py", line 53, in <module>
|
30
46
|
|
31
|
-
train_op = training(loss)
|
47
|
+
train_op = training(loss, gradient_rate)
|
32
48
|
|
33
|
-
File "fitting.py", line 2
|
49
|
+
File "fitting_question.py", line 25, in training
|
34
50
|
|
35
51
|
train_op = optimizer.minimize(loss)
|
36
52
|
|
4
トレースバックの追記
test
CHANGED
File without changes
|
test
CHANGED
@@ -15,6 +15,36 @@
|
|
15
15
|
|
16
16
|
|
17
17
|
このような状況ですが、このコード上何が間違っているかをSGDとバッチの両方の面から指摘していただけると助かります。宜しくお願い致します。
|
18
|
+
|
19
|
+
トレースバックを下記に示します。
|
20
|
+
|
21
|
+
入力値をそれに対する期待値(ラベル)は諸事情により開示できません。
|
22
|
+
|
23
|
+
|
24
|
+
|
25
|
+
```lang-python
|
26
|
+
|
27
|
+
Traceback (most recent call last):
|
28
|
+
|
29
|
+
File "fitting.py", line 52, in <module>
|
30
|
+
|
31
|
+
train_op = training(loss)
|
32
|
+
|
33
|
+
File "fitting.py", line 29, in training
|
34
|
+
|
35
|
+
train_op = optimizer.minimize(loss)
|
36
|
+
|
37
|
+
File "//anaconda/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py", line 279, in minimize
|
38
|
+
|
39
|
+
grad_loss=grad_loss)
|
40
|
+
|
41
|
+
File "//anaconda/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py", line 339, in compute_gradients
|
42
|
+
|
43
|
+
raise ValueError("No variables to optimize.")
|
44
|
+
|
45
|
+
ValueError: No variables to optimize.
|
46
|
+
|
47
|
+
```
|
18
48
|
|
19
49
|
|
20
50
|
|
3
引数間違いを修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -68,7 +68,7 @@
|
|
68
68
|
|
69
69
|
|
70
70
|
|
71
|
-
def training(loss,
|
71
|
+
def training(loss, gradient_rate):
|
72
72
|
|
73
73
|
optimizer = tf.train.GradientDescentOptimizer(gradient_rate)
|
74
74
|
|
2
```python code```に修正
test
CHANGED
File without changes
|
test
CHANGED
@@ -18,7 +18,7 @@
|
|
18
18
|
|
19
19
|
|
20
20
|
|
21
|
-
|
21
|
+
```lang-python
|
22
22
|
|
23
23
|
# Imports
|
24
24
|
|
@@ -148,4 +148,4 @@
|
|
148
148
|
|
149
149
|
print(sess.run(a), sess.run(b), sess.run(c))
|
150
150
|
|
151
|
-
|
151
|
+
```
|
1
<CODE>によるコードの明示など
test
CHANGED
File without changes
|
test
CHANGED
@@ -4,7 +4,7 @@
|
|
4
4
|
|
5
5
|
ValueError: No variables to optimize.
|
6
6
|
|
7
|
-
が
|
7
|
+
がどういう意図のメッセージなのか理解できていません。現状SGDのコードになっていることは理解できていますが、書き方の正しさは分かっていません。
|
8
8
|
|
9
9
|
そもそもplaceholderを使うべきかもよく分かっていません。
|
10
10
|
|
@@ -17,6 +17,8 @@
|
|
17
17
|
このような状況ですが、このコード上何が間違っているかをSGDとバッチの両方の面から指摘していただけると助かります。宜しくお願い致します。
|
18
18
|
|
19
19
|
|
20
|
+
|
21
|
+
<CODE>
|
20
22
|
|
21
23
|
# Imports
|
22
24
|
|
@@ -68,7 +70,7 @@
|
|
68
70
|
|
69
71
|
def training(loss, learning_rate):
|
70
72
|
|
71
|
-
optimizer = tf.train.GradientDescentOptimizer(
|
73
|
+
optimizer = tf.train.GradientDescentOptimizer(gradient_rate)
|
72
74
|
|
73
75
|
train_op = optimizer.minimize(loss)
|
74
76
|
|
@@ -145,3 +147,5 @@
|
|
145
147
|
|
146
148
|
|
147
149
|
print(sess.run(a), sess.run(b), sess.run(c))
|
150
|
+
|
151
|
+
<CODE>
|