python
1class Agent: 2 LEARNING_RATE = 0.001 3 BATCH_SIZE = 320 4 OUTPUT_SIZE = 2 5 EPSILON = 0.5 6 DECAY_RATE = 0.005 7 MIN_EPSILON = 0.1 8 GAMMA = 0.99 9 MEMORY_SIZE = 150000 10 11 def __init__(self, path, window_size, skip, save=False, saver_path=None, restore=False, action_prior="normal", reparameterize=True, noise=True,norm=True): 12 self.path = path 13 self.window_size = window_size 14 self._preproc() 15 self.state_size = (None, self.window_size, self.df.shape[-1]) 16 self.skip = skip 17 self.memory = Memory(self.MEMORY_SIZE) 18 # normal or uniform 19 self._action_prior = action_prior 20 policy_prior_log_probs = 0.0 21 tf.reset_default_graph() 22 self.sess = tf.InteractiveSession() 23 self.actor = Actor('actor-original', self.state_size, self.OUTPUT_SIZE,noise,norm) 24 self.actor_target = Actor('actor-target', self.state_size, self.OUTPUT_SIZE,noise,norm) 25 self.critic = Critic('critic-original', self.state_size, self.OUTPUT_SIZE, self.LEARNING_RATE,noise,norm) 26 self.critic_target = Critic('critic-target', self.state_size, self.OUTPUT_SIZE, self.LEARNING_RATE,noise,norm) 27 self.actions = tf.placeholder(tf.float32, (None,self.OUTPUT_SIZE)) 28 self.log_pi = tf.placeholder(tf.float32, (None,)) 29 if self._action_prior == 'normal': 30 D_s = self.actions.shape.as_list()[-1] 31 policy_prior = tfp.distributions.MultivariateNormalDiag( 32 loc=tf.zeros(D_s), scale_diag=tf.ones(D_s)) 33 policy_prior_log_probs = policy_prior.log_prob(self.actions) 34 elif self._action_prior == 'uniform': 35 policy_prior_log_probs = 0.0 36 37 min_log_target = tf.minimum(self.critic.qf1, self.critic.qf2) 38 if reparameterize: 39 policy_kl_loss = tf.reduce_mean(self.log_pi - self.critic.qf1) 40 else: 41 policy_kl_loss = tf.reduce_mean(self.log_pi * tf.stop_gradient( 42 self.log_pi - self.critic.qf1 + self.critic.value_fn - policy_prior_log_probs)) 43 policy_regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES,scope="actor") 44 policy_regularization_loss = tf.reduce_sum(policy_regularization_losses) 45 policy_loss = (policy_kl_loss + policy_regularization_loss) 46 47 self.vf_loss = 0.5 * tf.reduce_mean((self.critic.value_fn - tf.stop_gradient(min_log_target - self.log_pi + policy_prior_log_probs)) ** 2) 48 49 self.actor_optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE).minimize(policy_loss) 50 self.vf_optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE).minimize(self.vf_loss) 51 52 self.save = save 53 self.saver = tf.train.Saver() 54 self.saver_path = saver_path 55 56 if restore == True: 57 self.saver.restore(self.sess, tf.train.latest_checkpoint('drive/My Drive/')) 58 else: 59 self.sess.run(tf.global_variables_initializer())
上記のコードなのですが、Agentクラスを再度呼び出してトレーニングを実行しても以下のような結果にしかならず、パラメータをリストアできるとは思えないのですがどのようにすればいいのでしょうか?
上がリストアなしで、下がリストアありになります。
あなたの回答
tips
プレビュー