環境
Blender2.8
Python 3.7.3
macOS High Sierra 10.13.6
上記の環境の元、https://youtu.be/O7nNO3FLkLU こちらのリンク先の動画にあるように、blenderのpythonからopencvを使ってwebカメラを読み込みたいです。
しかし以下のようなエラーが出て、カメラを読み込むことができません。
python
1 [ERROR:0] VIDEOIO(AVFOUNDATION): raised unknown C++ exception! Traceback (most recent call last): File "/Users/okadanaoki/Desktop/mocapdata/Vincent.blend/Text.001", line 83, in modal cv2.error: OpenCV(4.1.0) /Users/travis/build/skvark/opencv-python/opencv/modules/imgproc/src/color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cvtColor' location: <unknown location>:-1 location: <unknown location>:-1
いかに全文を乗せておきます。
python
1import bpy 2from imutils import face_utils 3import dlib 4import cv2 5import time 6import numpy 7from bpy.props import FloatProperty 8 9class OpenCVAnimOperator(bpy.types.Operator): 10"""Operator which runs its self from a timer""" 11bl_idname = "wm.opencv_operator" 12bl_label = "OpenCV Animation Operator" 13 14# p = our pre-treined model directory 15p = "/Users/okadanaoki/Desktop/mocapdata/shape_predictor_68_face_landmarks.dat" # macOS 16#p = "/home/jason/Downloads/shape_predictor_68_face_landmarks.dat" # linux 17detector = dlib.get_frontal_face_detector() 18predictor = dlib.shape_predictor(p) 19 20_timer = None 21_cap = None 22 23width = 800 24height = 600 25 26stop :bpy.props.BoolProperty() 27 28# 3D model points. 29model_points = numpy.array([ 30 (0.0, 0.0, 0.0), # Nose tip 31 (0.0, -330.0, -65.0), # Chin 32 (-225.0, 170.0, -135.0), # Left eye left corner 33 (225.0, 170.0, -135.0), # Right eye right corne 34 (-150.0, -150.0, -125.0), # Left Mouth corner 35 (150.0, -150.0, -125.0) # Right mouth corner 36 ], dtype = numpy.float32) 37# Camera internals 38camera_matrix = numpy.array( 39 [[height, 0.0, width/2], 40 [0.0, height, height/2], 41 [0.0, 0.0, 1.0]], dtype = numpy.float32 42 ) 43 44# Keeps a moving average of given length 45def smooth_value(self, name, length, value): 46 if not hasattr(self, 'smooth'): 47 self.smooth = {} 48 if not name in self.smooth: 49 self.smooth[name] = numpy.array([value]) 50 else: 51 self.smooth[name] = numpy.insert(arr=self.smooth[name], obj=0, values=value) 52 if self.smooth[name].size > length: 53 self.smooth[name] = numpy.delete(self.smooth[name], self.smooth[name].size-1, 0) 54 sum = 0 55 for val in self.smooth[name]: 56 sum += val 57 return sum / self.smooth[name].size 58 59 60# Keeps min and max values, then returns the value in a ranve 0 - 1 61def get_range(self, name, value): 62 if not hasattr(self, 'range'): 63 self.range = {} 64 if not name in self.range: 65 self.range[name] = numpy.array([value, value]) 66 else: 67 self.range[name] = numpy.array([min(value, self.range[name][0]), max(value, self.range[name][1])] ) 68 val_range = self.range[name][1] - self.range[name][0] 69 if val_range != 0: 70 return (value - self.range[name][0]) / val_range 71 else: 72 return 0 73 74def modal(self, context, event): 75 76 if (event.type in {'RIGHTMOUSE', 'ESC'}) or self.stop == True: 77 self.cancel(context) 78 return {'CANCELLED'} 79 80 if event.type == 'TIMER': 81 self.init_camera() 82 _, image = self._cap.read() 83 if image is None: 84 #cv2.imwrite("/Users/okadanaoki/Desktop/test.png", image) 85 print("None") 86 image = cv2.imread("/Users/okadanaoki/Desktop/800x600.jpg") 87 88 print("image4"); 89 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 90 rects = self.detector(gray, 0) 91 # bpy.context.scene.frame_set(frame_num) 92 93 # For each detected face, find the landmark. 94 for (i, rect) in enumerate(rects): 95 shape = self.predictor(gray, rect) 96 shape = face_utils.shape_to_np(shape) 97 98 #2D image points. If you change the image, you need to change vector 99 image_points = numpy.array([shape[30], # Nose tip - 31 100 shape[8], # Chin - 9 101 shape[36], # Left eye left corner - 37 102 shape[45], # Right eye right corne - 46 103 shape[48], # Left Mouth corner - 49 104 shape[54] # Right mouth corner - 55 105 ], dtype = numpy.float32) 106 107 dist_coeffs = numpy.zeros((4,1)) # Assuming no lens distortion 108 109 if hasattr(self, 'rotation_vector'): 110 (success, self.rotation_vector, self.translation_vector) = cv2.solvePnP(self.model_points, 111 image_points, self.camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE, 112 rvec=self.rotation_vector, tvec=self.translation_vector, 113 useExtrinsicGuess=True) 114 else: 115 (success, self.rotation_vector, self.translation_vector) = cv2.solvePnP(self.model_points, 116 image_points, self.camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE, 117 useExtrinsicGuess=False) 118 119 if not hasattr(self, 'first_angle'): 120 self.first_angle = numpy.copy(self.rotation_vector) 121 122 bones = bpy.data.objects["vincent_blenrig"].pose.bones 123 124 bones["head_fk"].rotation_euler[0] = self.smooth_value("h_x", 3, (self.rotation_vector[0] - self.first_angle[0])) / 1 # Up/Down 125 bones["head_fk"].rotation_euler[2] = self.smooth_value("h_y", 3, -(self.rotation_vector[1] - self.first_angle[1])) / 1.5 # Rotate 126 bones["head_fk"].rotation_euler[1] = self.smooth_value("h_z", 3, (self.rotation_vector[2] - self.first_angle[2])) / 1.3 # Left/Right 127 128 bones["mouth_ctrl"].location[2] = self.smooth_value("m_h", 2, -self.get_range("mouth_height", numpy.linalg.norm(shape[62] - shape[66])) * 0.06 ) 129 bones["mouth_ctrl"].location[0] = self.smooth_value("m_w", 2, (self.get_range("mouth_width", numpy.linalg.norm(shape[54] - shape[48])) - 0.5) * -0.04) 130 bones["brow_ctrl_L"].location[2] = self.smooth_value("b_l", 3, (self.get_range("brow_left", numpy.linalg.norm(shape[19] - shape[27])) -0.5) * 0.04) 131 bones["brow_ctrl_R"].location[2] = self.smooth_value("b_r", 3, (self.get_range("brow_right", numpy.linalg.norm(shape[24] - shape[27])) -0.5) * 0.04) 132 133 bones["head_fk"].keyframe_insert(data_path="rotation_euler", index=-1) 134 bones["mouth_ctrl"].keyframe_insert(data_path="location", index=-1) 135 bones["brow_ctrl_L"].keyframe_insert(data_path="location", index=2) 136 bones["brow_ctrl_R"].keyframe_insert(data_path="location", index=2) 137 138 for (x, y) in shape: 139 cv2.circle(image, (x, y), 2, (0, 255, 255), -1) 140 141 cv2.imshow("Output", image) 142 cv2.waitKey(1) 143 144 return {'PASS_THROUGH'} 145 146def init_camera(self): 147 if self._cap == None: 148 self._cap = cv2.VideoCapture(0) 149 self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) 150 self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) 151 self._cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) 152 time.sleep(0.5) 153 154def stop_playback(self, scene): 155 print(format(scene.frame_current) + " / " + format(scene.frame_end)) 156 if scene.frame_current == scene.frame_end: 157 bpy.ops.screen.animation_cancel(restore_frame=False) 158 159def execute(self, context): 160 bpy.app.handlers.frame_change_pre.append(self.stop_playback) 161 162 wm = context.window_manager 163 self._timer = wm.event_timer_add(0.02, window=context.window) 164 wm.modal_handler_add(self) 165 return {'RUNNING_MODAL'} 166 167def cancel(self, context): 168 wm = context.window_manager 169 wm.event_timer_remove(self._timer) 170 cv2.destroyAllWindows() 171 self._cap.release() 172 self._cap = None 173def register(): 174bpy.utils.register_class(OpenCVAnimOperator) 175 176def unregister(): 177bpy.utils.unregister_class(OpenCVAnimOperator) 178 179if name == "main": 180register() 181 182# test call 183#bpy.ops.wm.opencv_operator() 184
よろしくおねがします
※追記10/31
元のコードの80〜85行目を記載しておきます。
python
1 if event.type == 'TIMER': 2 self.init_camera() 3 _, image = self._cap.read() 4 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 5 rects = self.detector(gray, 0) 6 # bpy.context.scene.frame_set(frame_num)
回答2件
あなたの回答
tips
プレビュー