発生している問題・エラーメッセージ
trainer.run()
をすると以下のようなエラーが発生します。
--------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-19-041e2033e90a> in <module> ----> 1 trainer.run() ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/training/trainer.py in run(self, show_loop_exception_msg) 374 f.write('Traceback (most recent call last):\n') 375 traceback.print_tb(sys.exc_info()[2]) --> 376 six.reraise(*exc_info) 377 finally: 378 for _, entry in extensions: ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/six.py in reraise(tp, value, tb) 701 if value.__traceback__ is not tb: 702 raise value.with_traceback(tb) --> 703 raise value 704 finally: 705 value = None ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/training/trainer.py in run(self, show_loop_exception_msg) 341 self.observation = {} 342 with reporter.scope(self.observation): --> 343 update() 344 for name, entry in extensions: 345 if entry.trigger(self): ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py in update(self) 238 239 """ --> 240 self.update_core() 241 self.iteration += 1 242 ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py in update_core(self) 243 def update_core(self): 244 iterator = self._iterators['main'] --> 245 batch = iterator.next() 246 in_arrays = convert._call_converter( 247 self.converter, batch, self.input_device) ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/iterators/serial_iterator.py in __next__(self) 75 raise StopIteration 76 ---> 77 batch = [self.dataset[index] for index in indices] 78 return batch 79 ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/iterators/serial_iterator.py in <listcomp>(.0) 75 raise StopIteration 76 ---> 77 batch = [self.dataset[index] for index in indices] 78 return batch 79 ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/dataset/dataset_mixin.py in __getitem__(self, index) 65 return [self.get_example(i) for i in index] 66 else: ---> 67 return self.get_example(index) 68 69 def __len__(self): ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainer/datasets/transform_dataset.py in get_example(self, i) 50 def get_example(self, i): 51 in_data = self._dataset[i] ---> 52 return self._transform(in_data) <ipython-input-11-f268809d3c76> in __call__(self, in_data) 7 img, bbox, label = in_data 8 _, H, W = img.shape ----> 9 img = self.faster_rcnn.prepare(img) 10 _, o_H, o_W = img.shape 11 scale = o_H / H ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainercv/links/model/faster_rcnn/faster_rcnn.py in prepare(self, img) 215 scale = self.max_size / max(H, W) 216 --> 217 img = resize(img, (int(H * scale), int(W * scale))) 218 219 img = (img - self.mean).astype(np.float32, copy=False) ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainercv/transforms/image/resize.py in resize(img, size, interpolation) 69 if chainer.config.cv_resize_backend is None: 70 if _cv2_available: ---> 71 return _resize_cv2(img, size, interpolation) 72 else: 73 return _resize_pil(img, size, interpolation) ~/GraduationResearch/GraduationResearch/lib/python3.6/site-packages/chainercv/transforms/image/resize.py in _resize_cv2(img, size, interpolation) 22 cv_interpolation = cv2.INTER_LANCZOS4 23 H, W = size ---> 24 img = cv2.resize(img, dsize=(W, H), interpolation=cv_interpolation) 25 26 # If input is a grayscale image, cv2 returns a two-dimentional array. TypeError: Expected Ptr<cv::UMat> for argument 'src'
該当のソースコード
以下のコードはChainerCVのexampleを参考にしております。
import matplotlib.pyplot as plt %matplotlib inline import os import numpy as np import chainer import random from chainercv.chainer_experimental.datasets.sliceable import TupleDataset from chainercv.links import FasterRCNNVGG16 from chainercv.links.model.faster_rcnn import FasterRCNNTrainChain from chainer.datasets import TransformDataset from chainercv import transforms from chainer import training from chainer.training import extensions import SLATS_256 as bbox data_dir = 'ShinjukuGyoen/SLATS-256' classes_file = 'ShinjukuGyoen/classes.txt' file_img_set = os.path.join(data_dir, 'images.npy') file_bbox_set = os.path.join(data_dir, 'bounding_box_data.npy') file_object_ids = os.path.join(data_dir, 'object_ids.npy') file_classes = os.path.join(classes_file, 'classes.txt') imgs = np.load(file_img_set, allow_pickle=True) bboxs = np.load(file_bbox_set, allow_pickle=True) objectIDs = np.load(file_object_ids, allow_pickle=True) classes = list() with open(classes_file) as fd: for one_line in fd.readlines(): cl = one_line.split('\n')[0] classes.append(cl) print(classes) dataset = TupleDataset(('img', imgs), ('bbox', bboxs), ('label', objectIDs)) N = len(dataset) N_train = (int)(N*0.8) N_test = N - N_train print('total:{}, train:{}, test:{}'.format(N, N_train, N_test)) faster_rcnn = FasterRCNNVGG16(n_fg_class=len(classes), pretrained_model='imagenet') model = FasterRCNNTrainChain(faster_rcnn) model.to_cpu() optimizer = chainer.optimizers.MomentumSGD(lr=0.001, momentum=0.9) optimizer.setup(model) optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005)) class Transform(object): def __init__(self, faster_rcnn): self.faster_rcnn = faster_rcnn def __call__(self, in_data): img, bbox, label = in_data _, H, W = img.shape img = self.faster_rcnn.prepare(img) _, o_H, o_W = img.shape scale = o_H / H bbox = transforms.resize_bbox(bbox, (H, W), (o_H, o_W)) img, params = transforms.random_flip( img, x_random=True, return_param=True) bbox = transforms.flip_bbox( bbox, (o_H, o_W), x_flip=params['x_flip']) return img, bbox, label, scale idxs = list(np.arange(N)) random.shuffle(idxs) train_idxs = idxs[:N_train] test_idxs = idxs[N_train:] train_data = TransformDataset(dataset[train_idxs], Transform(faster_rcnn)) train_iter = chainer.iterators.SerialIterator( train_data, batch_size=1) test_iter = chainer.iterators.SerialIterator( dataset[test_idxs], batch_size=1, repeat=False, shuffle=False) updater = chainer.training.updaters.StandardUpdater( train_iter, optimizer) n_epoch = 20 out_dir = './out' trainer = training.Trainer( updater, (n_epoch, 'epoch'), out=out_dir) step_size = 100 trainer.extend( extensions.snapshot_object(model.faster_rcnn, 'snapshot_model.npz'), trigger=(n_epoch, 'epoch')) trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=(step_size, 'iteration')) log_interval = 1, 'epoch' plot_interval = 1, 'epoch' print_interval = 1, 'epoch' trainer.extend(chainer.training.extensions.observe_lr(), trigger=log_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.PrintReport( ['iteration', 'epoch', 'elapsed_time', 'lr', 'main/loss', 'main/roi_loc_loss', 'main/roi_cls_loss', 'main/rpn_loc_loss', 'main/rpn_cls_loss', 'validation/main/map', ]), trigger=print_interval) trainer.extend( extensions.PlotReport( ['main/loss'], file_name='loss.png', trigger=plot_interval ), trigger=plot_interval ) trainer.extend(extensions.dump_graph('main/loss')) trainer.run()
試したこと
以下の記事を参考にしてみましたが、うまく行きませんでした。
- [OpenCV TypeError: Expected cv::UMat for argument 'src' - What is this?
回答1件
あなたの回答
tips
プレビュー
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。