前提・実現したいこと
keras-yolo3を使用してオリジナルデータをwebカメラを通し、物体の識別をリアルタイムで行いたいです。
発生している問題・エラーメッセージ
参考サイト1と参考サイト2を参考にしてオリジナルデータを作成しました。
参考サイト3の最後の方に書いてある「独自データによる学習済みモデルを読み込んで、物体検出を行う」の項目と同じようにプログラムコードを書き換えました。
webカメラを通してリアルタイム検出を行いたいので、参考サイト4の⑤に書いてあるwebカメラを使ってリアルタイム検知するプログラムで実行したところエラーが発生しました。
ValueError: Dimension 0 in both shapes must be equal, but are 1 and 42. Shapes are [1,1,1024,45] and [42,1024,1,1]. for 'Assign_360' (op: 'Assign') with input shapes: [1,1,1024,45], [42,1024,1,1].
該当のソースコード
python
1# -*- coding: utf-8 -*- 2""" 3Class definition of YOLO_v3 style detection model on image and video 4""" 5 6import colorsys 7import os 8from timeit import default_timer as timer 9 10import numpy as np 11from keras import backend as K 12from keras.models import load_model 13from keras.layers import Input 14from PIL import Image, ImageFont, ImageDraw 15 16from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body 17from yolo3.utils import letterbox_image 18import os 19from keras.utils import multi_gpu_model 20 21class YOLO(object): 22 _defaults = { 23 "model_path": 'model_data/trained_weights_final.h5', 24 "anchors_path": 'model_data/yolo_anchors.txt', 25 "classes_path": 'model_data/my_classes.txt', 26 "score" : 0.3, 27 "iou" : 0.45, 28 "model_image_size" : (416, 416), 29 "gpu_num" : 1, 30 } 31 32 @classmethod 33 def get_defaults(cls, n): 34 if n in cls._defaults: 35 return cls._defaults[n] 36 else: 37 return "Unrecognized attribute name '" + n + "'" 38 39 def __init__(self, **kwargs): 40 self.__dict__.update(self._defaults) # set up default values 41 self.__dict__.update(kwargs) # and update with user overrides 42 self.class_names = self._get_class() 43 self.anchors = self._get_anchors() 44 self.sess = K.get_session() 45 self.boxes, self.scores, self.classes = self.generate() 46 47 def _get_class(self): 48 classes_path = os.path.expanduser(self.classes_path) 49 with open(classes_path) as f: 50 class_names = f.readlines() 51 class_names = [c.strip() for c in class_names] 52 return class_names 53 54 def _get_anchors(self): 55 anchors_path = os.path.expanduser(self.anchors_path) 56 with open(anchors_path) as f: 57 anchors = f.readline() 58 anchors = [float(x) for x in anchors.split(',')] 59 return np.array(anchors).reshape(-1, 2) 60 61 def generate(self): 62 model_path = os.path.expanduser(self.model_path) 63 assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' 64 65 # Load model, or construct model and load weights. 66 num_anchors = len(self.anchors) 67 num_classes = len(self.class_names) 68 is_tiny_version = num_anchors==6 # default setting 69 try: 70 self.yolo_model = load_model(model_path, compile=False) 71 except: 72 self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \ 73 if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) 74 self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match 75 else: 76 assert self.yolo_model.layers[-1].output_shape[-1] == \ 77 num_anchors/len(self.yolo_model.output) * (num_classes + 5), \ 78 'Mismatch between model and given anchor and class sizes' 79 80 print('{} model, anchors, and classes loaded.'.format(model_path)) 81 82 # Generate colors for drawing bounding boxes. 83 hsv_tuples = [(x / len(self.class_names), 1., 1.) 84 for x in range(len(self.class_names))] 85 self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) 86 self.colors = list( 87 map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), 88 self.colors)) 89 np.random.seed(10101) # Fixed seed for consistent colors across runs. 90 np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes. 91 np.random.seed(None) # Reset seed to default. 92 93 # Generate output tensor targets for filtered bounding boxes. 94 self.input_image_shape = K.placeholder(shape=(2, )) 95 if self.gpu_num>=2: 96 self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num) 97 boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors, 98 len(self.class_names), self.input_image_shape, 99 score_threshold=self.score, iou_threshold=self.iou) 100 return boxes, scores, classes 101 102 def detect_image(self, image): 103 start = timer() 104 105 if self.model_image_size != (None, None): 106 assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required' 107 assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required' 108 boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) 109 else: 110 new_image_size = (image.width - (image.width % 32), 111 image.height - (image.height % 32)) 112 boxed_image = letterbox_image(image, new_image_size) 113 image_data = np.array(boxed_image, dtype='float32') 114 115 print(image_data.shape) 116 image_data /= 255. 117 image_data = np.expand_dims(image_data, 0) # Add batch dimension. 118 119 out_boxes, out_scores, out_classes = self.sess.run( 120 [self.boxes, self.scores, self.classes], 121 feed_dict={ 122 self.yolo_model.input: image_data, 123 self.input_image_shape: [image.size[1], image.size[0]], 124 K.learning_phase(): 0 125 }) 126 127 print('Found {} boxes for {}'.format(len(out_boxes), 'img')) 128 129 font = ImageFont.truetype(font='font/FiraMono-Medium.otf', 130 size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32')) 131 thickness = (image.size[0] + image.size[1]) // 300 132 133 for i, c in reversed(list(enumerate(out_classes))): 134 predicted_class = self.class_names[c] 135 box = out_boxes[i] 136 score = out_scores[i] 137 138 label = '{} {:.2f}'.format(predicted_class, score) 139 draw = ImageDraw.Draw(image) 140 label_size = draw.textsize(label, font) 141 142 top, left, bottom, right = box 143 top = max(0, np.floor(top + 0.5).astype('int32')) 144 left = max(0, np.floor(left + 0.5).astype('int32')) 145 bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) 146 right = min(image.size[0], np.floor(right + 0.5).astype('int32')) 147 print(label, (left, top), (right, bottom)) 148 149 if top - label_size[1] >= 0: 150 text_origin = np.array([left, top - label_size[1]]) 151 else: 152 text_origin = np.array([left, top + 1]) 153 154 # My kingdom for a good redistributable image drawing library. 155 for i in range(thickness): 156 draw.rectangle( 157 [left + i, top + i, right - i, bottom - i], 158 outline=self.colors[c]) 159 draw.rectangle( 160 [tuple(text_origin), tuple(text_origin + label_size)], 161 fill=self.colors[c]) 162 draw.text(text_origin, label, fill=(0, 0, 0), font=font) 163 del draw 164 165 end = timer() 166 print(end - start) 167 return image 168 169 def close_session(self): 170 self.sess.close() 171 172def detect_video(yolo, video_path, output_path=""): 173 import cv2 174 vid = cv2.VideoCapture(video_path) 175 if not vid.isOpened(): 176 raise IOError("Couldn't open webcam or video") 177 video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC)) 178 video_fps = vid.get(cv2.CAP_PROP_FPS) 179 video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), 180 int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) 181 isOutput = True if output_path != "" else False 182 if isOutput: 183 print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size)) 184 out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) 185 accum_time = 0 186 curr_fps = 0 187 fps = "FPS: ??" 188 prev_time = timer() 189 while True: 190 return_value, frame = vid.read() 191 image = Image.fromarray(frame) 192 image = yolo.detect_image(image) 193 result = np.asarray(image) 194 curr_time = timer() 195 exec_time = curr_time - prev_time 196 prev_time = curr_time 197 accum_time = accum_time + exec_time 198 curr_fps = curr_fps + 1 199 if accum_time > 1: 200 accum_time = accum_time - 1 201 fps = "FPS: " + str(curr_fps) 202 curr_fps = 0 203 cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, 204 fontScale=0.50, color=(255, 0, 0), thickness=2) 205 cv2.namedWindow("result", cv2.WINDOW_NORMAL) 206 cv2.imshow("result", result) 207 if isOutput: 208 out.write(result) 209 if cv2.waitKey(1) & 0xFF == ord('q'): 210 break 211 yolo.close_session() 212
試したこと
オリジナルデータに書き換える前に元々のサンプルでwebカメラでのリアルタイム検知をやってみたところ普通に動きました。
補足情報
MacBookPro 2020
Anaconda
python3.7
Tensorflow 1.14.0
Keras 2.2.4
もっと良い方法がある場合は追加で教えて頂けると幸いです。
あなたの回答
tips
プレビュー