回答編集履歴
1
期待に沿える形(DiscriminatorにLSTMを使用)ではないですが、コードを追加しました。
test
CHANGED
@@ -19,3 +19,155 @@
|
|
19
19
|
|
20
20
|
|
21
21
|
[リンク内容](http://qiita.com/taku-buntu/items/0093a68bfae0b0ff879d)
|
22
|
+
|
23
|
+
|
24
|
+
|
25
|
+
【追記0617】
|
26
|
+
|
27
|
+
LSTMを使うとdiscriminator は(2,5,1)の入力を求めてくるのですが
|
28
|
+
|
29
|
+
generatorが(2,5)の出力しか出せないので
|
30
|
+
|
31
|
+
(私の今の知識では)combinedモデルが出来なかったです。
|
32
|
+
|
33
|
+
discriminatorは時系列データを扱っていても、次のデータを予測するわけではなく本物か偽物かを判定するのが仕事なので、別のモデルを組み込んではどうか↓と思いました。
|
34
|
+
|
35
|
+
```Python3
|
36
|
+
|
37
|
+
import tensorflow as tf
|
38
|
+
|
39
|
+
from keras.backend import tensorflow_backend
|
40
|
+
|
41
|
+
|
42
|
+
|
43
|
+
from tensorflow import keras
|
44
|
+
|
45
|
+
from tensorflow.keras.models import Sequential,Model
|
46
|
+
|
47
|
+
from tensorflow.keras.layers import Activation, Dense, Dropout, Flatten,Input,LSTM
|
48
|
+
|
49
|
+
from tensorflow.keras import backend as K
|
50
|
+
|
51
|
+
from tensorflow.keras.initializers import he_normal
|
52
|
+
|
53
|
+
from tensorflow.keras.optimizers import Adam
|
54
|
+
|
55
|
+
|
56
|
+
|
57
|
+
import os
|
58
|
+
|
59
|
+
import numpy as np
|
60
|
+
|
61
|
+
|
62
|
+
|
63
|
+
|
64
|
+
|
65
|
+
np.random.seed(0)
|
66
|
+
|
67
|
+
np.random.RandomState(0)
|
68
|
+
|
69
|
+
tf.set_random_seed(0)
|
70
|
+
|
71
|
+
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
|
72
|
+
|
73
|
+
session = tf.Session(config=config)
|
74
|
+
|
75
|
+
tensorflow_backend.set_session(session)
|
76
|
+
|
77
|
+
|
78
|
+
|
79
|
+
n_data= 2
|
80
|
+
|
81
|
+
max_len = 7
|
82
|
+
|
83
|
+
dim_in = 3
|
84
|
+
|
85
|
+
dim_out = 5
|
86
|
+
|
87
|
+
dim_hidden = 11
|
88
|
+
|
89
|
+
|
90
|
+
|
91
|
+
|
92
|
+
|
93
|
+
def build_generator_l():
|
94
|
+
|
95
|
+
model = Sequential()
|
96
|
+
|
97
|
+
model.add(LSTM(dim_hidden,input_shape=(max_len,dim_in)))
|
98
|
+
|
99
|
+
model.add(Dropout(0.2))
|
100
|
+
|
101
|
+
model.add(Dense(dim_out*1))
|
102
|
+
|
103
|
+
|
104
|
+
|
105
|
+
# model.summary()
|
106
|
+
|
107
|
+
|
108
|
+
|
109
|
+
return model
|
110
|
+
|
111
|
+
|
112
|
+
|
113
|
+
def build_discriminator_l():
|
114
|
+
|
115
|
+
model = Sequential()
|
116
|
+
|
117
|
+
#model.add(LSTM(dim_hidden,input_shape=(dim_out,1),return_sequences=False))
|
118
|
+
|
119
|
+
model.add(Dense(dim_hidden,input_dim = dim_out , activation = 'relu'))
|
120
|
+
|
121
|
+
model.add(Dropout(0.2))
|
122
|
+
|
123
|
+
model.add(Dense(1))
|
124
|
+
|
125
|
+
|
126
|
+
|
127
|
+
# model.summary()
|
128
|
+
|
129
|
+
|
130
|
+
|
131
|
+
return model
|
132
|
+
|
133
|
+
|
134
|
+
|
135
|
+
|
136
|
+
|
137
|
+
ge= build_generator_l()#Generator
|
138
|
+
|
139
|
+
|
140
|
+
|
141
|
+
optimizer = Adam(lr=0.0002, beta_1=0.5)#Discriminator
|
142
|
+
|
143
|
+
dsc = build_discriminator_l()
|
144
|
+
|
145
|
+
dsc.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
|
146
|
+
|
147
|
+
|
148
|
+
|
149
|
+
z = Input(shape=( max_len, dim_in))#combinedモデル
|
150
|
+
|
151
|
+
img = ge(z)
|
152
|
+
|
153
|
+
dsc.trainable = False
|
154
|
+
|
155
|
+
valid = dsc(img)
|
156
|
+
|
157
|
+
cmb = Model(z,valid)
|
158
|
+
|
159
|
+
cmb.compile(loss='binary_crossentropy', optimizer=optimizer)
|
160
|
+
|
161
|
+
|
162
|
+
|
163
|
+
noise = np.random.normal(0, 1, (n_data, max_len, dim_in))#Generatorへの入力
|
164
|
+
|
165
|
+
g_loss = cmb.train_on_batch(noise, np.ones((n_data, 1)))#Combinedモデルの学習(1バッチ)
|
166
|
+
|
167
|
+
|
168
|
+
|
169
|
+
print(g_loss)
|
170
|
+
|
171
|
+
|
172
|
+
|
173
|
+
```
|