Peeking into the futureの再現実験
Githubに公開されているPeeking into the futureの論文のコード(next-prediction)を実行してみたところ,AssertErrorが発生し,エラー文に何も出ておらず,解決への糸口が見つからないため,手助けいただきたいです.
Github 再現実験手順URL:https://github.com/JunweiLiang/next-prediction/blob/master/code/prepare_data/README.md
対象のコードURL:https://github.com/JunweiLiang/next-prediction/blob/master/code/prepare_data/step4_generate_traj.py
発生している問題・エラーメッセージ
Traceback (most recent call last): File "step4_generate_traj.py", line 206, in <module> assert frame_lst AssertionError
該当のソースコード
python2.7
1# coding=utf-8 2from __future__ import absolute_import 3from __future__ import division 4from __future__ import print_function 5 6"""Generate trajectory files and scene, person box, other box, activity files.""" 7 8# pylint: disable=g-importing-member 9# pylint: disable=g-bad-import-order 10import argparse 11import os 12import operator 13import numpy as np 14import cPickle as pickle 15from tqdm import tqdm 16from glob import glob 17from utils import activity2id 18from utils import actev_scene2imgsize 19from utils import get_scene 20 21 22parser = argparse.ArgumentParser() 23parser.add_argument("npzpath") 24parser.add_argument("split_path") 25parser.add_argument("out_path") 26parser.add_argument("--drop_frame", default=1, type=int, 27 help="drop frame to match different fps, assuming " 28 "the virat fps is 30fps, so to get 2.5fps, " 29 "need to drop 12 frames every time") 30 31parser.add_argument("--scene_feat_path", 32 help="the scene segmentation output path," 33 "under it should be frame_name.npy") 34 35# the following are the output paths 36parser.add_argument("--scene_map_path", 37 help="frameidx mapping to actual scene feature file output") 38 39parser.add_argument("--person_box_path", 40 help="Person box output") 41 42parser.add_argument("--other_box_path", 43 help="Other object box output") 44 45parser.add_argument("--activity_path", 46 help="activity annotation output") 47 48# for ETH/UCY you need to write your own video size mapping 49# In the PeekingFuture paper we resize ETH/UCY to 720x576 to extract features 50scene2imgsize = actev_scene2imgsize 51 52actid2name = {activity2id[n]: n for n in activity2id} 53 54 55def resize_xy(xy, vname, resize_w, resize_h): 56 """Resize the xy coordinates.""" 57 x_, y_ = xy 58 w, h = scene2imgsize[get_scene(vname)] 59 diff_w = resize_w / float(w) 60 diff_h = resize_h / float(h) 61 x_ *= diff_w 62 y_ *= diff_h 63 64 # normalize coordinates? 65 return [x_, y_] 66 67 68def resize_box(box, vname, resize_w, resize_h): 69 """Resize the box coordintates.""" 70 x1, y1, x2, y2 = [float(o) for o in box] 71 72 w, h = scene2imgsize[get_scene(vname)] 73 diff_w = resize_w / float(w) 74 diff_h = resize_h / float(h) 75 76 x1 *= diff_w 77 x2 *= diff_w 78 y1 *= diff_h 79 y2 *= diff_h 80 return [x1, y1, x2, y2] 81 82 83# frame_lst is [(videoname,frameidx)], assume sorted by the frameidx 84def get_nearest(frame_lst_, frame_idx): 85 """Since we don't run scene seg on every frame, we want to find the nearest one.""" 86 frame_idxs = np.array([i_ for _, i_ in frame_lst_]) 87 cloests_idx = (np.abs(frame_idxs - frame_idx)).argmin() 88 vname, closest_frame_idx = frame_lst_[cloests_idx] 89 return vname, closest_frame_idx, cloests_idx 90 91 92def get_act_list(act_data, frameidx, bgid): 93 """Given a frameidx, get this person' activities.""" 94 95 # act_data is a list of sorted (start,end,actclassid) 96 # return current act list, 97 current_act_list = [(actid, e - frameidx) for s, e, actid in act_data 98 if (frameidx >= s) and (frameidx <= e)] 99 current_act_list.sort(key=operator.itemgetter(1)) # dist to current act's end 100 current_actid_list_ = [actid for actid, _ in current_act_list] 101 current_dist_list_ = [dist for _, dist in current_act_list] 102 103 if not current_act_list: 104 current_actid_list_, current_dist_list_ = [bgid], [-1] 105 106 future_act_list = [(actid, s - frameidx) for s, e, actid in act_data 107 if frameidx < s] 108 future_act_list.sort(key=operator.itemgetter(1)) 109 110 if not future_act_list: 111 return (current_actid_list_, current_dist_list_, [bgid], [-1]) 112 113 # only the nearest future activity? 114 # smallest_dist = future_act_list[0][1] 115 # future_act_list = [(actid,dist) for actid, dist in future_act_list 116 # if dist == smallest_dist] 117 118 future_actid_list_ = [actid for actid, _ in future_act_list] 119 future_dist_list_ = [dist for _, dist in future_act_list] 120 121 return (current_actid_list_, current_dist_list_, 122 future_actid_list_, future_dist_list_) 123 124 125def check_traj(newdata_, vname): 126 """Check and filter data.""" 127 checkdata = np.array(newdata_, dtype="float") 128 frames_ = np.unique(checkdata[:, 0]).tolist() 129 checked_data_ = [] 130 for frame_ in frames_: 131 # all personid in this frame 132 this_frame_data = checkdata[frame_ == checkdata[:, 0], :] # [K,4] 133 ped_ids = this_frame_data[:, 1] 134 unique_ped_ids, unique_idxs = np.unique(ped_ids, return_index=True) 135 if len(ped_ids) != len(unique_ped_ids): 136 tqdm.write("\twarning, %s frame %s has duplicate person annotation person" 137 " ids: %s/%s, removed the duplicate ones" 138 % (vname, frame_, len(unique_ped_ids), len(ped_ids))) 139 140 this_frame_data = this_frame_data[unique_idxs] 141 142 for f_, p_, x_, y_ in this_frame_data: 143 checked_data_.append((f_, p_, x_, y_)) 144 checked_data_.sort(key=operator.itemgetter(0)) 145 return checked_data_ 146 147 148if __name__ == "__main__": 149 args = parser.parse_args() 150 151 # Hard coded for ActEV experiment. 152 # :P 153 args.resize = True 154 args.resize_h = 1080 155 args.resize_w = 1920 156 157 filelst = { 158 "train": [os.path.splitext(os.path.basename(line.strip()))[0] 159 for line in open(os.path.join(args.split_path, 160 "train.lst"), "r").readlines()], 161 "val": [os.path.splitext(os.path.basename(line.strip()))[0] 162 for line in open(os.path.join(args.split_path, 163 "val.lst"), "r").readlines()], 164 "test": [os.path.splitext(os.path.basename(line.strip()))[0] 165 for line in open(os.path.join(args.split_path, 166 "test.lst"), "r").readlines()], 167 } 168 169 for split in tqdm(filelst, ascii=True): 170 out_path = os.path.join(args.out_path, split) 171 172 if not os.path.exists(out_path): 173 os.makedirs(out_path) 174 175 if not os.path.exists(os.path.join(args.person_box_path, split)): 176 os.makedirs(os.path.join(args.person_box_path, split)) 177 178 if not os.path.exists(os.path.join(args.other_box_path, split)): 179 os.makedirs(os.path.join(args.other_box_path, split)) 180 181 if not os.path.exists(os.path.join(args.activity_path, split)): 182 os.makedirs(os.path.join(args.activity_path, split)) 183 184 scene_map_path = os.path.join(args.scene_map_path, split) 185 if not os.path.exists(scene_map_path): 186 os.makedirs(scene_map_path) 187 188 for videoname in tqdm(filelst[split]): 189 npzfile = os.path.join(args.npzpath, "%s.npz" % videoname) 190 191 data = np.load(npzfile, allow_pickle=True) 192 193 # each frame's all boxes, for getting other boxes 194 frameidx2boxes = data["frameidx2boxes"] 195 196 # personId -> all related activity with timespan, sorted by timespan start 197 # (start, end, act_classid) 198 personid2acts = data["personid2acts"] 199 200 # load all the frames for this video first 201 frame_lst = glob(os.path.join(args.scene_feat_path, 202 "%s_F_*.npy"%videoname)) 203 assert frame_lst 204 frame_lst = [(os.path.basename(frame), 205 int(os.path.basename(frame).split(".")[0].split("_F_")[-1])) 206 for frame in frame_lst] 207 208・・・
試したこと
assertの前後にprint文を追加して,どうなっているか確認したところ,平常通りでした.
補足情報(FW/ツールのバージョンなど)
GPU使用のため,CUDAとTensorflowのバージョンは合わせてあります.Tensorflow1.10です.
回答1件
あなたの回答
tips
プレビュー