Webサイトのコードを参考に試行錯誤してますが、知識不足で
自力での解決ができませんでした。お力をかしてください。
■やりたい事
OpenCV+AruCoでマーカーの姿勢推定
■前提・困りごと
Webカメラのキャリブレーションは済、添付のコードを実行すると
カメラ画角にマーカーが映り込んだ瞬間にエラーコードが出る。
drawAxisが原因なのは読み取れて、drawFrameAxesに置き換えると
よさそうな所までは調べましたがコード修正できる知識がなく困って
ます。
■キャリブレーションのコード実行: OK
'detected coner!'
'1/50'
'detected coner!'
'2/50'
'detected coner!'
'3/50'
'detected coner!'
'4/50'
'detected coner!'
'5/50'
'detected coner!'
■マーカー作成、検出のコード実行: NG
x : -0.034794117506137735
y : 0.3744266401339573
z : 1.4769175320493957
roll : [-169.28233936]
pitch: [-8.60552705]
yaw : [0.95272568]
AttributeError Traceback (most recent call last)
Cell In [1], line 64
61 cv2.destroyAllWindows()
63 if name == 'main':
---> 64 main()
Cell In [1], line 55, in main()
52 print("yaw : " + str(euler_angle[2]))
54 draw_pole_length = marker_length/2
---> 55 aruco.drawAxis(img, camera_matrix, distortion_coeff, rvec, tvec, draw_pole_length)
57 cv2.imshow('drawDetectedMarkers', img)
58 if cv2.waitKey(10) & 0xFF == ord('q'):
AttributeError: module 'cv2.aruco' has no attribute 'drawAxis'
■Pythonコード
#キャリブレーション
import cv2
import matplotlib.pyplot as plt
import numpy as np
square_size = 2.6 #印刷した人ます分の長さ
pattern_size = (7, 7)
reference_img = 50
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
objpoints = []
imgpoints = []
capture = cv2.VideoCapture(0)
while len(objpoints) < reference_img:
ret, img = capture.read() height = img.shape[0] width = img.shape[1] gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, corner = cv2.findChessboardCorners(gray, pattern_size) if ret == True: display("detected coner!") display(str(len(objpoints)+1) + "/" + str(reference_img)) term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1) cv2.cornerSubPix(gray, corner, (5,5), (-1,-1), term) imgpoints.append(corner.reshape(-1, 2)) objpoints.append(pattern_points) cv2.imshow('image', img) if cv2.waitKey(100) & 0xFF == ord('q'): break
display("calculating camera parameter...")
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
np.save("mtx", mtx)
np.save("dist", dist.ravel())
capture.release()
cv2.destroyAllWindows()
#マーカー作成、検出
import cv2
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
def main():
for i in range(5):
ar_image = aruco.drawMarker(dictionary, i, 150) fileName = "ar" + str(i).zfill(2) + ".png" cv2.imwrite(fileName, ar_image)
if name == "main":
main()
import numpy as np
import cv2
from cv2 import aruco
def main():
cap = cv2.VideoCapture(0)
marker_length = 0.060
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
camera_matrix = np.load("mtx.npy") distortion_coeff = np.load("dist.npy") while True: ret, img = cap.read() corners, ids, rejectedImgPoints = aruco.detectMarkers(img, dictionary) aruco.drawDetectedMarkers(img, corners, ids, (0,255,255)) if len(corners) > 0: for i, corner in enumerate(corners): rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corner, marker_length, camera_matrix, distortion_coeff) tvec = np.squeeze(tvec) rvec = np.squeeze(rvec) rvec_matrix = cv2.Rodrigues(rvec) rvec_matrix = rvec_matrix[0] transpose_tvec = tvec[np.newaxis, :].T proj_matrix = np.hstack((rvec_matrix, transpose_tvec)) euler_angle = cv2.decomposeProjectionMatrix(proj_matrix)[6] print("x : " + str(tvec[0])) print("y : " + str(tvec[1])) print("z : " + str(tvec[2])) print("roll : " + str(euler_angle[0])) print("pitch: " + str(euler_angle[1])) print("yaw : " + str(euler_angle[2])) draw_pole_length = marker_length/2 aruco.drawAxis(img, camera_matrix, distortion_coeff, rvec, tvec, draw_pole_length) cv2.imshow('drawDetectedMarkers', img) if cv2.waitKey(10) & 0xFF == ord('q'): break cv2.destroyAllWindows()
if name == 'main':
main()
あなたの回答
tips
プレビュー