質問編集履歴
2
コードを書きました関係ないとこ lossとmodelは省きました
title
CHANGED
File without changes
|
body
CHANGED
@@ -15,9 +15,112 @@
|
|
15
15
|
|
16
16
|
使用率の低かった
|
17
17
|
コードに関しては大きめのモデルでランダムの行列をinputで2000回ほど動かすだけのものです
|
18
|
+
```ここに言語を入力
|
19
|
+
from tensorflow.python.client import device_lib
|
20
|
+
device_lib.list_local_devices()
|
21
|
+
```
|
22
|
+
結果
|
23
|
+
device_type: "CPU"
|
24
|
+
device_type: "XLA_CPU"
|
25
|
+
device_type: "GPU"
|
18
26
|
|
27
|
+
```ここに言語を入力
|
28
|
+
import tensorflow as tf
|
29
|
+
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
|
30
|
+
```
|
31
|
+
結果
|
32
|
+
Num GPUs Available: 1
|
33
|
+
|
34
|
+
|
35
|
+
|
19
36
|
試したこと
|
20
37
|
ググる→出てこない
|
21
38
|
他に何すればいいかわからない
|
22
39
|
|
23
|
-
何んで低いか教えてください
|
40
|
+
何んで低いか教えてください
|
41
|
+
|
42
|
+
追記 コード 文字数制限のため一部省いてます
|
43
|
+
```ここに言語を入力
|
44
|
+
class QNetwork :
|
45
|
+
def __init__(self,learning_rate, state_size, action_size):
|
46
|
+
self.input1 = Input(shape=(state_size.shape))
|
47
|
+
|
48
|
+
self.a=Conv2D(32,kernel_size=(3,3),padding='same',activation=LeakyReLU(alpha=0.01),
|
49
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
50
|
+
bias_constraint=max_norm(2.))(self.input1)
|
51
|
+
self.a=Conv2D(32,(3,3),strides=1,padding='same',activation=LeakyReLU(alpha=0.01),
|
52
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
53
|
+
bias_constraint=max_norm(2.))(self.a)
|
54
|
+
self.a=MaxPooling2D(pool_size=(2, 2))(self.a)
|
55
|
+
|
56
|
+
self.a=Conv2D(64,kernel_size=(3, 3), padding='same', data_format=None, dilation_rate=(1, 1),activation=LeakyReLU(alpha=0.01),
|
57
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
58
|
+
bias_constraint=max_norm(2.))(self.a)
|
59
|
+
self.a=Conv2D(64,kernel_size=(3, 3), padding='same', data_format=None, dilation_rate=(1, 1),activation=LeakyReLU(alpha=0.01),
|
60
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
61
|
+
bias_constraint=max_norm(2.))(self.a)
|
62
|
+
self.a=MaxPooling2D(pool_size=(2, 2))(self.a)
|
63
|
+
self.a=Flatten()(self.a)
|
64
|
+
|
65
|
+
CNN 結構層数は多い
|
66
|
+
|
67
|
+
self.a=Dense(200,activation='relu',
|
68
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
69
|
+
bias_constraint=max_norm(2.))(self.a)
|
70
|
+
self.a=Dense(200,activation='relu',
|
71
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
72
|
+
bias_constraint=max_norm(2.))(self.a)
|
73
|
+
self.a=Dense(850,activation='relu',
|
74
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
75
|
+
bias_constraint=max_norm(2.))(self.a)
|
76
|
+
#-------------------------------------------------------------------------------
|
77
|
+
# < Value function >
|
78
|
+
self.ve=Dense(206,activation='relu',
|
79
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
80
|
+
bias_constraint=max_norm(2.))(self.a)
|
81
|
+
self.ve=NoisyDense(1,
|
82
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
83
|
+
bias_constraint=max_norm(2.))(self.ve)
|
84
|
+
self.dv=Dense(206,activation='relu'
|
85
|
+
,use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
86
|
+
bias_constraint=max_norm(2.))(self.a)
|
87
|
+
self.dv=NoisyDense(1,
|
88
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
89
|
+
bias_constraint=max_norm(2.))(self.dv)#Doring
|
90
|
+
self.v = Concatenate()([self.ve,self.dv])#状態価値と行動価値結合
|
91
|
+
self.v = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], axis=1, keepdims=True),
|
92
|
+
output_shape=(1,))(self.v)
|
93
|
+
#--------------------------------------------------------------------------------
|
94
|
+
# < Action Mean >
|
95
|
+
self.mu=Dense(action_size,activation='relu',
|
96
|
+
use_bias=True,kernel_initializer='he_normal',bias_initializer='zeros',kernel_constraint=max_norm(2.),
|
97
|
+
bias_constraint=max_norm(2.))(self.a)
|
98
|
+
#lllll
|
99
|
+
self.l = Dense(int(action_size * (action_size + 1) / 2), activation="linear", name='l0')(self.a)
|
100
|
+
self.l = Lambda(lambda x: tfp.math.fill_triangular(x))(self.l)
|
101
|
+
self.l = Lambda(lambda x: tf.linalg.set_diag(x, tf.exp(tf.linalg.diag_part(x))))(self.l)
|
102
|
+
self.l = Lambda(lambda x: tf.matmul(x,x))(self.l)
|
103
|
+
# < Action function >
|
104
|
+
self.u = Input(shape=(action_size,), name='action_input')
|
105
|
+
self.u_mu = keras.layers.Subtract()([self.u, self.mu])
|
106
|
+
self.u_mu_l = keras.layers.Dot(axes=1)([self.u_mu, self.l]) # transpose 自動でされてた
|
107
|
+
self.u_mu_l = keras.layers.Dot(axes=1)([self.u_mu_l, self.u_mu])
|
108
|
+
self.A = Lambda(lambda x: -1.0/2.0 * x)(self.u_mu_l)
|
109
|
+
self.Q = keras.layers.Add()([self.A, self.v])
|
110
|
+
# Input and Output -----------------------------------------------------
|
111
|
+
self.net_q = Model(input=[self.input1,self.u], output=[self.Q])
|
112
|
+
self.net_a = Model(input=[self.input1], output=[self.mu])
|
113
|
+
self.net_v = Model(input=[self.input1], output=[self.v])
|
114
|
+
|
115
|
+
self.adm = Adam(lr=learning_rate,beta_1=0.9, beta_2=0.999, amsgrad=False) # 誤差を減らす学習方法はAdam
|
116
|
+
# self.inputs.compile(loss='mse', optimizer=self.optimizer)
|
117
|
+
self.net_q.compile(loss=huberloss, optimizer=self.adm,metrics=['accuracy'])
|
118
|
+
|
119
|
+
c=[np.random.rand(1,720,480,3),np.random.rand(1,2)]
|
120
|
+
print("uiiiiiiiiiiiiiiiiiiiiiiiiiiii")
|
121
|
+
a=QNetwork(0.001, np.zeros((720,480,3)),2)
|
122
|
+
print("uiiiiiiiiiiiiiiiiiiiiiiiiiiii")
|
123
|
+
for _ in range(4*5*10*5):
|
124
|
+
a.net_q.predict_on_batch(c)
|
125
|
+
print("ok")
|
126
|
+
```
|
1
title
CHANGED
File without changes
|
body
CHANGED
@@ -13,8 +13,11 @@
|
|
13
13
|
cudnn 7.6.5
|
14
14
|
gpu rtx 2800 ti
|
15
15
|
|
16
|
+
使用率の低かった
|
16
17
|
コードに関しては大きめのモデルでランダムの行列をinputで2000回ほど動かすだけのものです
|
17
18
|
|
18
19
|
試したこと
|
19
20
|
ググる→出てこない
|
20
|
-
他に何すればいいかわからない
|
21
|
+
他に何すればいいかわからない
|
22
|
+
|
23
|
+
何んで低いか教えてください
|