下記のようにGeneratorとDiscriminatorを定義したあとにtf.train.AdamOptimizer
を使おうとすると下記エラーが出て使用できないのは何故なのでしょうか?
ValueError: Variable discriminator/d_h0_conv/w/Adam/ does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?
tf.train.GradientDescentOptimizer(1e-3)は使えるようですが、tf.train.AdamOptimizer
を使わないと上手くいかないのです。どうしたらこのoptimizerが使えるようになるのでしょうか?
ご教授お願いいたします。
追記
早急に解決の必要があるためマルチポストをしてしましました。複数の人に見てもらった方が早く解決できると思ったからです。
リンク
def discriminator(image, reuse=False): batch_size=64 with tf.variable_scope("discriminator") as scope: if reuse: scope.reuse_variales() h0 = lrelu(conv2d(image, 64, name='d_h0_conv')) h1 = lrelu(batch_norm(conv2d(h0, 128, name='d_h1_conv'),'d_bn1')) h2 = lrelu(batch_norm(conv2d(h1, 256, name='d_h2_conv'),'d_bn2')) h3 = lrelu(batch_norm(conv2d(h2, 512, name='d_h3_conv'),'d_bn3')) # shape=(batch_size, 64, 64, 3) h4 = linear_d(tf.reshape(h3, [batch_size, -1]),2,'d_h4_lin') return h4 # shape=(batch_size, 64, 64, 3) def generator(z_): batch_size=64 with tf.variable_scope("generator") as scope: # project `z` and reshape z, h0_w, h0_b = linear(z_, 64*8*4*4, 'g_h0_lin',with_w=True) h0 = tf.nn.relu(batch_norm(tf.reshape(z, [-1, 4, 4, 64*8]), 'g_bn0')) h1, h1_w, h1_b = deconv2d(h0, [batch_size, 8, 8, 64*4], name='g_h1', with_w=True) h1 = tf.nn.relu(batch_norm(h1, 'g_bn1')) h2, h2_w, h2_b = deconv2d(h1, [batch_size, 16, 16, 64*2], name='g_h2', with_w=True) h2 = tf.nn.relu(batch_norm(h2, 'g_bn2')) h3, h3_w, h3_b = deconv2d(h2, [batch_size, 32, 32, 64*1], name='g_h3', with_w=True) h3 = tf.nn.relu(batch_norm(h3, 'g_bn3')) h4, h4_w, h4_b = deconv2d(h3, [batch_size, 64, 64, 3], name='g_h4', with_w=True) return tf.nn.tanh(h4) #shape=(batch_size, 64, 64, 3) def sampler(z_):# shape=(batch_size, 64, 64, 3) batch_size=64 with tf.variable_scope("generator") as scope: # project `z` and reshape z= linear(z_, 64*8*4*4,'g_h0_lin') h0 = tf.nn.relu(batch_norm(tf.reshape(z, [-1, 4, 4, 64*8]),'g_bn0',train=False)) h1 = deconv2d(h0, [batch_size, 8, 8, 64*4], name='g_h1') h1 = tf.nn.relu(batch_norm(h1,'g_bn1',train=False)) h2 = deconv2d(h1, [batch_size, 16, 16, 64*2], name='g_h2') h2 = tf.nn.relu(batch_norm(h2,'g_bn2',train=False)) h3 = deconv2d(h2, [batch_size, 32, 32, 64*1], name='g_h3') h3 = tf.nn.relu(batch_norm(h3,'g_bn3',train=False)) h4 = deconv2d(h3, [batch_size, 64, 64, 3], name='g_h4') return tf.nn.tanh(h4) #shape=(batch_size, 64, 64, 3) G=generator(z) #G(z) D_logits = discriminator(image) #D(x) tf.get_variable_scope().reuse_variables() sampler = sampler(z) D_logits_ = discriminator(G) #D(G(z)) batch_label=64 d_loss_real = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=D_logits, labels=tf.ones([batch_label], dtype=tf.int64))) d_loss_fake = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=D_logits_, labels=tf.zeros([batch_label], dtype=tf.int64))) g_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=D_logits_, labels=tf.ones([batch_label], dtype=tf.int64))) d_loss = d_loss_real + d_loss_fake d_vars = [var for var in tf.trainable_variables() if 'd_' in var.name] g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name] saver=tf.train.Saver() g_optim = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(g_loss, var_list=g_vars) d_optim = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(d_loss, var_list=d_vars) ValueError: Variable discriminator/d_h0_conv/w/Adam/ does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?
マルチポスト。 https://ja.stackoverflow.com/questions/37348/tf-get-variable%e3%81%8c%e5%ae%9a%e7%be%a9%e3%81%95%e3%82%8c%e3%81%aa%e3%81%84%e3%81%9f%e3%82%81tf-train-adamoptimizer%e3%81%8c%e4%bd%bf%e3%81%88%e3%81%aa%e3%81%84%e5%95%8f%e9%a1%8c%e3%81%ab%e3%81%a4%e3%81%84%e3%81%a6 ヘルプ→ https://teratail.com/help#posted-otherservice
ヘルプに従い追記しました
回答2件
あなたの回答
tips
プレビュー