前提・実現したいこと
python3.8 ubuntu20.04 vscodeです。
realsenseによりyoloを起動していますが、classesに波線が入りclassesが認識されていない状態です。
該当のソースコード
import pyrealsense2 as rs import numpy as np import cv2 import sys from playsound import playsound # Initialize the parameters confThreshold = 0.5 nmsThreshold = 0.4 inpWidth = 416 inpHeight = 416 classesFile = "/home/limlab/realsense_yolo_v3_2d/coco.names" # Configure depth and color streams pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) # Start streaming pipeline.start(config) class CAMDEMO: def __init__ (self): self.j = 0 def getOutputsNames(self,net): layersNames = net.getLayerNames() return [layersNames[i[0] -1] for i in net.getUnconnectedOutLayers()] #検出 def drawPredicted(self,classId, conf, left, top,right, bottom, frame,x,y): classes = None with open(classesFile, "rt") as f: classes = f.read().rstrip('\n').split('\n') cv2.rectangle(frame, (left,top), (right,bottom), (255,0,0),3)#囲み cv2.circle(frame,(x,y),radius=1,color=(0,255,0), thickness=5)#・ label = '%.2f' % conf if classes: assert(classId < len(classes)) label = '%s' %(classes[classId]) cv2.putText(frame, label,(left,top-5), cv2.FONT_HERSHEY_SIMPLEX,0.75,(255,255,0),2) global first#グローバル変数firstを定義 first = True #実行一回目がTrueの場合 if first == True: path = '/home/limlab/realsense_yolo_v3_2d/image/' if label == 'bottle': #labelがbottleの場合 self.j += 1 if self.j == 1:#def__init__()で0と定義している、0の場合実行 cv2.imwrite(path + 'image1.png',frame) print('ペットボトルを検出しました') playsound("/home/limlab/programs/bottle.mp3") self.j += 1#iの変数が1になる sys.exit() if label == 'mouse': self.j += 1 if self.j == 1: cv2.imwrite(path + 'image1.png',frame) print('マウスを検出しました') playsound("/home/limlab/programs/mouse.mp3") self.j += 1 sys.exit() labelSize= cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) top = max(top, labelSize[1]) cv2.putText(frame, label,(left,top-5), cv2.FONT_HERSHEY_SIMPLEX,0.75,(0,255,0),2) def process_detection(self,frame, outs): frameHeight = frame.shape[0] frameWidth = frame.shape[1] classIds = [] confidences = [] boxes = [] for out in outs: for detection in out: scores = detection[5:] classId = np.argmax(scores) confidence = scores[classId] if confidence > confThreshold: center_x = int(detection[0]*frameWidth) center_y = int(detection[1]*frameHeight) width = int(detection[2]*frameWidth) height = int(detection[3]*frameHeight) left = int(center_x - width/2) top = int(center_y - height/2) classIds.append(classId) indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold) for i in indices: i = i[0] box = boxes[i] left = box[0] top = box[1] width = box[2] height = box[3] x = int(left+width/2) y = int(top+ height/2) self.drawPredicted(classIds[i], confidences[i], left, top,left+width,top+height,frame,x,y) def main(): cam = CAMDEMO() modelConfiguration = "/home/limlab/realsense_yolo_v3_2d/yolov3.cfg" modelWeights = "/home/limlab/realsense_yolo_v3_2d/yolov3.weights" net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights) net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) try: while True: # Wait for a coherent pair of frames: depth and color frames = pipeline.wait_for_frames() color_frame = frames.get_color_frame() if not color_frame: continue # Convert images to numpy arrays color_image = np.asanyarray(color_frame.get_data()) blob = cv2.dnn.blobFromImage(color_image, 1/255, (inpWidth, inpHeight), [0,0,0],1,crop=False) net.setInput(blob) outs = net.forward(cam.getOutputsNames(net)) # Apply colormap on depth image (image must be converted to 8-bit per pixel first) cam.process_detection(color_image,outs) images = color_image # Show images cv2.imshow('Yolo in RealSense made by Tony', images) if cv2.waitKey(1) & 0xFF == ord('q'): break finally: # Stop streaming pipeline.stop() if __name__ == "__main__": main()
回答1件
あなたの回答
tips
プレビュー