やりたいこと
このサイトのナンバープレート認識プログラムの内、train.pyを動かしたいのです。しかしTensorflow1.xで書かれているため、Tensorflow2.xに書き換えている所です。
その内の一つのエラーのとり方がわからないので、教えていただきたいです。
optimizersのminimize関数へvar_listを渡さなければならないところで、何を最小化したいのか理解できないです…????
エラーメッセージ
cmd
1WARNING:tensorflow:From C:\Users\main9\anaconda3\envs\kmutt\lib\site-packages\tensorflow_core\python\ops\resource_variable_ops.py:1635: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. 2Instructions for updating: 3If using Keras pass *_constraint arguments to layers. 4Traceback (most recent call last): 5 File "train.py", line 269, in <module> 6 initial_weights=initial_weights) 7 File "train.py", line 178, in train 8 train_step = tf.optimizers.Adam(learn_rate).minimize(loss) 9TypeError: minimize() missing 1 required positional argument: 'var_list'
プログラム
該当部分:
Python
1def train(learn_rate, report_steps, batch_size, initial_weights=None): 2 x, y, params = model.get_training_model() 3 4 # y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1]) 5 tf.compat.v1.disable_eager_execution() 6 y_ = tf.compat.v1.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1]) 7 8 digits_loss, presence_loss, loss = get_loss(y, y_) 9 # train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss) 10 train_step = tf.optimizers.Adam(learn_rate).minimize(loss) 11 12 best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2) 13 correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2) 14 15 if initial_weights is not None: 16 assert len(params) == len(initial_weights) 17 assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)] 18 19 init = tf.initialize_all_variables() 20
該当の関数全体(プログラム全体は文字数以内に入り切りませんでした(>人<)):
Python
1def train(learn_rate, report_steps, batch_size, initial_weights=None): 2 """ 3 Train the network. 4 5 The function operates interactively: Progress is reported on stdout, and 6 training ceases upon `KeyboardInterrupt` at which point the learned weights 7 are saved to `weights.npz`, and also returned. 8 9 :param learn_rate: 10 Learning rate to use. 11 12 :param report_steps: 13 Every `report_steps` batches a progress report is printed. 14 15 :param batch_size: 16 The size of the batches used for training. 17 18 :param initial_weights: 19 (Optional.) Weights to initialize the network with. 20 21 :return: 22 The learned network weights. 23 24 """ 25 x, y, params = model.get_training_model() 26 27 # y_ = tf.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1]) 28 tf.compat.v1.disable_eager_execution() 29 y_ = tf.compat.v1.placeholder(tf.float32, [None, 7 * len(common.CHARS) + 1]) 30 31 digits_loss, presence_loss, loss = get_loss(y, y_) 32 # train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss) 33 train_step = tf.optimizers.Adam(learn_rate).minimize(loss) 34 35 best = tf.argmax(tf.reshape(y[:, 1:], [-1, 7, len(common.CHARS)]), 2) 36 correct = tf.argmax(tf.reshape(y_[:, 1:], [-1, 7, len(common.CHARS)]), 2) 37 38 if initial_weights is not None: 39 assert len(params) == len(initial_weights) 40 assign_ops = [w.assign(v) for w, v in zip(params, initial_weights)] 41 42 init = tf.initialize_all_variables() 43 44 def vec_to_plate(v): 45 return "".join(common.CHARS[i] for i in v) 46 47 def do_report(): 48 r = sess.run([best, 49 correct, 50 tf.greater(y[:, 0], 0), 51 y_[:, 0], 52 digits_loss, 53 presence_loss, 54 loss], 55 feed_dict={x: test_xs, y_: test_ys}) 56 num_correct = numpy.sum( 57 numpy.logical_or( 58 numpy.all(r[0] == r[1], axis=1), 59 numpy.logical_and(r[2] < 0.5, 60 r[3] < 0.5))) 61 r_short = (r[0][:190], r[1][:190], r[2][:190], r[3][:190]) 62 for b, c, pb, pc in zip(*r_short): 63 print("{} {} <-> {} {}".format(vec_to_plate(c), pc, 64 vec_to_plate(b), float(pb)) ) 65 num_p_correct = numpy.sum(r[2] == r[3]) 66 67 print ("B{:3d} {:2.02f}% {:02.02f}% loss: {} " 68 "(digits: {}, presence: {}) |{}|").format( 69 batch_idx, 70 100. * num_correct / (len(r[0])), 71 100. * num_p_correct / len(r[2]), 72 r[6], 73 r[4], 74 r[5], 75 "".join("X "[numpy.array_equal(b, c) or (not pb and not pc)] 76 for b, c, pb, pc in zip(*r_short))) 77 78 def do_batch(): 79 sess.run(train_step, 80 feed_dict={x: batch_xs, y_: batch_ys}) 81 if batch_idx % report_steps == 0: 82 do_report() 83 84 gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95) 85 with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: 86 sess.run(init) 87 if initial_weights is not None: 88 sess.run(assign_ops) 89 90 test_xs, test_ys = unzip(list(read_data("test/*.png"))[:50]) 91 92 try: 93 last_batch_idx = 0 94 last_batch_time = time.time() 95 batch_iter = enumerate(read_batches(batch_size)) 96 for batch_idx, (batch_xs, batch_ys) in batch_iter: 97 do_batch() 98 if batch_idx % report_steps == 0: 99 batch_time = time.time() 100 if last_batch_idx != batch_idx: 101 print ("time for 60 batches {}".format( 102 60 * (last_batch_time - batch_time) / 103 (last_batch_idx - batch_idx)) ) 104 last_batch_idx = batch_idx 105 last_batch_time = batch_time 106 107 except KeyboardInterrupt: 108 last_weights = [p.eval() for p in params] 109 numpy.savez("weights.npz", *last_weights) 110 return last_weights
損失関数:
Python
1def get_loss(y, y_): 2 # Calculate the loss from digits being incorrect. Don't count loss from 3 # digits that are in non-present plates. 4 digits_loss = tf.nn.softmax_cross_entropy_with_logits( 5 tf.reshape(y[:, 1:], 6 [-1, len(common.CHARS)]), 7 tf.reshape(y_[:, 1:], 8 [-1, len(common.CHARS)])) 9 digits_loss = tf.reshape(digits_loss, [-1, 7]) 10 digits_loss = tf.reduce_sum(digits_loss, 1) 11 digits_loss *= (y_[:, 0] != 0) 12 digits_loss = tf.reduce_sum(digits_loss) 13 14 # Calculate the loss from presence indicator being wrong. 15 presence_loss = tf.nn.sigmoid_cross_entropy_with_logits( 16 y[:, :1], y_[:, :1]) 17 presence_loss = 7 * tf.reduce_sum(presence_loss) 18 19 return digits_loss, presence_loss, digits_loss + presence_loss
よろしくお願い致します。
あなたの回答
tips
プレビュー