質問編集履歴
2
コードを書きました関係ないとこ lossとmodelは省きました
test
CHANGED
File without changes
|
test
CHANGED
@@ -32,6 +32,40 @@
|
|
32
32
|
|
33
33
|
コードに関しては大きめのモデルでランダムの行列をinputで2000回ほど動かすだけのものです
|
34
34
|
|
35
|
+
```ここに言語を入力
|
36
|
+
|
37
|
+
from tensorflow.python.client import device_lib
|
38
|
+
|
39
|
+
device_lib.list_local_devices()
|
40
|
+
|
41
|
+
```
|
42
|
+
|
43
|
+
結果
|
44
|
+
|
45
|
+
device_type: "CPU"
|
46
|
+
|
47
|
+
device_type: "XLA_CPU"
|
48
|
+
|
49
|
+
device_type: "GPU"
|
50
|
+
|
51
|
+
|
52
|
+
|
53
|
+
```ここに言語を入力
|
54
|
+
|
55
|
+
import tensorflow as tf
|
56
|
+
|
57
|
+
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
|
58
|
+
|
59
|
+
```
|
60
|
+
|
61
|
+
結果
|
62
|
+
|
63
|
+
Num GPUs Available: 1
|
64
|
+
|
65
|
+
|
66
|
+
|
67
|
+
|
68
|
+
|
35
69
|
|
36
70
|
|
37
71
|
試したこと
|
@@ -43,3 +77,175 @@
|
|
43
77
|
|
44
78
|
|
45
79
|
何んで低いか教えてください
|
80
|
+
|
81
|
+
|
82
|
+
|
83
|
+
追記 コード 文字数制限のため一部省いてます
|
84
|
+
|
85
|
+
```ここに言語を入力
|
86
|
+
|
87
|
+
class QNetwork :
|
88
|
+
|
89
|
+
def __init__(self,learning_rate, state_size, action_size):
|
90
|
+
|
91
|
+
self.input1 = Input(shape=(state_size.shape))
|
92
|
+
|
93
|
+
|
94
|
+
|
95
|
+
self.a=Conv2D(32,kernel_size=(3,3),padding='same',activation=LeakyReLU(alpha=0.01),
|
96
|
+
|
97
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
98
|
+
|
99
|
+
bias_constraint=max_norm(2.))(self.input1)
|
100
|
+
|
101
|
+
self.a=Conv2D(32,(3,3),strides=1,padding='same',activation=LeakyReLU(alpha=0.01),
|
102
|
+
|
103
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
104
|
+
|
105
|
+
bias_constraint=max_norm(2.))(self.a)
|
106
|
+
|
107
|
+
self.a=MaxPooling2D(pool_size=(2, 2))(self.a)
|
108
|
+
|
109
|
+
|
110
|
+
|
111
|
+
self.a=Conv2D(64,kernel_size=(3, 3), padding='same', data_format=None, dilation_rate=(1, 1),activation=LeakyReLU(alpha=0.01),
|
112
|
+
|
113
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
114
|
+
|
115
|
+
bias_constraint=max_norm(2.))(self.a)
|
116
|
+
|
117
|
+
self.a=Conv2D(64,kernel_size=(3, 3), padding='same', data_format=None, dilation_rate=(1, 1),activation=LeakyReLU(alpha=0.01),
|
118
|
+
|
119
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
120
|
+
|
121
|
+
bias_constraint=max_norm(2.))(self.a)
|
122
|
+
|
123
|
+
self.a=MaxPooling2D(pool_size=(2, 2))(self.a)
|
124
|
+
|
125
|
+
self.a=Flatten()(self.a)
|
126
|
+
|
127
|
+
|
128
|
+
|
129
|
+
CNN 結構層数は多い
|
130
|
+
|
131
|
+
|
132
|
+
|
133
|
+
self.a=Dense(200,activation='relu',
|
134
|
+
|
135
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
136
|
+
|
137
|
+
bias_constraint=max_norm(2.))(self.a)
|
138
|
+
|
139
|
+
self.a=Dense(200,activation='relu',
|
140
|
+
|
141
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
142
|
+
|
143
|
+
bias_constraint=max_norm(2.))(self.a)
|
144
|
+
|
145
|
+
self.a=Dense(850,activation='relu',
|
146
|
+
|
147
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
148
|
+
|
149
|
+
bias_constraint=max_norm(2.))(self.a)
|
150
|
+
|
151
|
+
#-------------------------------------------------------------------------------
|
152
|
+
|
153
|
+
# < Value function >
|
154
|
+
|
155
|
+
self.ve=Dense(206,activation='relu',
|
156
|
+
|
157
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
158
|
+
|
159
|
+
bias_constraint=max_norm(2.))(self.a)
|
160
|
+
|
161
|
+
self.ve=NoisyDense(1,
|
162
|
+
|
163
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
164
|
+
|
165
|
+
bias_constraint=max_norm(2.))(self.ve)
|
166
|
+
|
167
|
+
self.dv=Dense(206,activation='relu'
|
168
|
+
|
169
|
+
,use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
170
|
+
|
171
|
+
bias_constraint=max_norm(2.))(self.a)
|
172
|
+
|
173
|
+
self.dv=NoisyDense(1,
|
174
|
+
|
175
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
176
|
+
|
177
|
+
bias_constraint=max_norm(2.))(self.dv)#Doring
|
178
|
+
|
179
|
+
self.v = Concatenate()([self.ve,self.dv])#状態価値と行動価値結合
|
180
|
+
|
181
|
+
self.v = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], axis=1, keepdims=True),
|
182
|
+
|
183
|
+
output_shape=(1,))(self.v)
|
184
|
+
|
185
|
+
#--------------------------------------------------------------------------------
|
186
|
+
|
187
|
+
# < Action Mean >
|
188
|
+
|
189
|
+
self.mu=Dense(action_size,activation='relu',
|
190
|
+
|
191
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
192
|
+
|
193
|
+
bias_constraint=max_norm(2.))(self.a)
|
194
|
+
|
195
|
+
#lllll
|
196
|
+
|
197
|
+
self.l = Dense(int(action_size * (action_size + 1) / 2), activation="linear", name='l0')(self.a)
|
198
|
+
|
199
|
+
self.l = Lambda(lambda x: tfp.math.fill_triangular(x))(self.l)
|
200
|
+
|
201
|
+
self.l = Lambda(lambda x: tf.linalg.set_diag(x, tf.exp(tf.linalg.diag_part(x))))(self.l)
|
202
|
+
|
203
|
+
self.l = Lambda(lambda x: tf.matmul(x,x))(self.l)
|
204
|
+
|
205
|
+
# < Action function >
|
206
|
+
|
207
|
+
self.u = Input(shape=(action_size,), name='action_input')
|
208
|
+
|
209
|
+
self.u_mu = keras.layers.Subtract()([self.u, self.mu])
|
210
|
+
|
211
|
+
self.u_mu_l = keras.layers.Dot(axes=1)([self.u_mu, self.l]) # transpose 自動でされてた
|
212
|
+
|
213
|
+
self.u_mu_l = keras.layers.Dot(axes=1)([self.u_mu_l, self.u_mu])
|
214
|
+
|
215
|
+
self.A = Lambda(lambda x: -1.0/2.0 * x)(self.u_mu_l)
|
216
|
+
|
217
|
+
self.Q = keras.layers.Add()([self.A, self.v])
|
218
|
+
|
219
|
+
# Input and Output -----------------------------------------------------
|
220
|
+
|
221
|
+
self.net_q = Model(input=[self.input1,self.u], output=[self.Q])
|
222
|
+
|
223
|
+
self.net_a = Model(input=[self.input1], output=[self.mu])
|
224
|
+
|
225
|
+
self.net_v = Model(input=[self.input1], output=[self.v])
|
226
|
+
|
227
|
+
|
228
|
+
|
229
|
+
self.adm = Adam(lr=learning_rate,beta_1=0.9, beta_2=0.999, amsgrad=False) # 誤差を減らす学習方法はAdam
|
230
|
+
|
231
|
+
# self.inputs.compile(loss='mse', optimizer=self.optimizer)
|
232
|
+
|
233
|
+
self.net_q.compile(loss=huberloss, optimizer=self.adm,metrics=['accuracy'])
|
234
|
+
|
235
|
+
|
236
|
+
|
237
|
+
c=[np.random.rand(1,720,480,3),np.random.rand(1,2)]
|
238
|
+
|
239
|
+
print("uiiiiiiiiiiiiiiiiiiiiiiiiiiii")
|
240
|
+
|
241
|
+
a=QNetwork(0.001, np.zeros((720,480,3)),2)
|
242
|
+
|
243
|
+
print("uiiiiiiiiiiiiiiiiiiiiiiiiiiii")
|
244
|
+
|
245
|
+
for _ in range(4*5*10*5):
|
246
|
+
|
247
|
+
a.net_q.predict_on_batch(c)
|
248
|
+
|
249
|
+
print("ok")
|
250
|
+
|
251
|
+
```
|
1
test
CHANGED
File without changes
|
test
CHANGED
@@ -28,6 +28,8 @@
|
|
28
28
|
|
29
29
|
|
30
30
|
|
31
|
+
使用率の低かった
|
32
|
+
|
31
33
|
コードに関しては大きめのモデルでランダムの行列をinputで2000回ほど動かすだけのものです
|
32
34
|
|
33
35
|
|
@@ -37,3 +39,7 @@
|
|
37
39
|
ググる→出てこない
|
38
40
|
|
39
41
|
他に何すればいいかわからない
|
42
|
+
|
43
|
+
|
44
|
+
|
45
|
+
何んで低いか教えてください
|