前提・実現したいこと
以下を使って、物体間の距離が一定以下になった回数の検出を試みています。
https://github.com/basileroth75/covid-social-distancing-detection
プログラミングは完全な初心者ですが、仕事の関係で手探りで行っているので、
文章などおかしいかもしれませんがご容赦ください。
発生している問題・エラーメッセージ
サンプルではうまくいくのですが、自分で学習させたモデル?を使った場合エラーが出てしまいます。
Traceback (most recent call last):
File "social_distanciation_video_detection.py", line 193, in <module>
transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
File "C:\covid-social-distancing-detection\src\bird_view_transfo_functions.py", line 31, in compute_point_perspective_transformation
for i in range(0,transformed_points.shape[0]):
AttributeError: 'NoneType' object has no attribute 'shape'
該当のソースコード
social_distanciation_video_detection.py
python
1 2######################################### 3# Select the model # 4######################################### 5model_names_list = [name for name in os.listdir("../models/.") if name.find(".") == -1] 6for index,model_name in enumerate(model_names_list): 7 print(" - {} [{}]".format(model_name,index)) 8model_num = input(" Please select the number related to the model that you want : ") 9if model_num == "": 10 model_path="../models/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb" 11else : 12 model_path = "../models/"+model_names_list[int(model_num)]+"/frozen_inference_graph.pb" 13print(bcolors.WARNING + " [ Loading the TENSORFLOW MODEL ... ]"+bcolors.ENDC) 14model = Model(model_path) 15print(bcolors.OKGREEN +"Done : [ Model loaded and initialized ] ..."+bcolors.ENDC) 16 17 18######################################### 19# Select the video # 20######################################### 21video_names_list = [name for name in os.listdir("../video/") if name.endswith(".mp4") or name.endswith(".avi")] 22for index,video_name in enumerate(video_names_list): 23 print(" - {} [{}]".format(video_name,index)) 24video_num = input("Enter the exact name of the video (including .mp4 or else) : ") 25if video_num == "": 26 video_path="../video/PETS2009.avi" 27else : 28 video_path = "../video/"+video_names_list[int(video_num)] 29 30 31######################################### 32# Minimal distance # 33######################################### 34distance_minimum = input("Prompt the size of the minimal distance between 2 pedestrians : ") 35if distance_minimum == "": 36 distance_minimum = "110" 37 38 39######################################### 40# Compute transformation matrix # 41######################################### 42# Compute transformation matrix from the original frame 43matrix,imgOutput = compute_perspective_transform(corner_points,width_og,height_og,cv2.imread(img_path)) 44height,width,_ = imgOutput.shape 45blank_image = np.zeros((height,width,3), np.uint8) 46height = blank_image.shape[0] 47width = blank_image.shape[1] 48dim = (width, height) 49 50 51 52 53###################################################### 54######### ######### 55# START THE VIDEO STREAM # 56######### ######### 57###################################################### 58vs = cv2.VideoCapture(video_path) 59output_video_1,output_video_2 = None,None 60# Loop until the end of the video stream 61while True: 62 # Load the image of the ground and resize it to the correct size 63 img = cv2.imread("../img/chemin_1.png") 64 bird_view_img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA) 65 66 # Load the frame 67 (frame_exists, frame) = vs.read() 68 # Test if it has reached the end of the video 69 if not frame_exists: 70 break 71 else: 72 # Resize the image to the correct size 73 frame = imutils.resize(frame, width=int(size_frame)) 74 75 # Make the predictions for this frame 76 (boxes, scores, classes) = model.predict(frame) 77 78 # Get the human detected in the frame and return the 2 points to build the bounding box 79 array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1]) 80 81 # Both of our lists that will contain the centroテッds coordonates and the ground points 82 array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected) 83 84 # Use the transform matrix to get the transformed coordonates 85 transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints) 86 87 # Show every point on the top view image 88 for point in transformed_downoids: 89 x,y = point 90 cv2.circle(bird_view_img, (int(x),int(y)), BIG_CIRCLE, COLOR_GREEN, 2) 91 cv2.circle(bird_view_img, (int(x),int(y)), SMALL_CIRCLE, COLOR_GREEN, -1) 92 93 # Check if 2 or more people have been detected (otherwise no need to detect) 94 if len(transformed_downoids) >= 2: 95 for index,downoid in enumerate(transformed_downoids): 96 if not (downoid[0] > width or downoid[0] < 0 or downoid[1] > height+200 or downoid[1] < 0 ): 97 cv2.rectangle(frame,(array_boxes_detected[index][1],array_boxes_detected[index][0]),(array_boxes_detected[index][3],array_boxes_detected[index][2]),COLOR_GREEN,2) 98 99 # Iterate over every possible 2 by 2 between the points combinations 100 list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2)) 101 for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)): 102 # Check if the distance between each combination of points is less than the minimum distance chosen 103 if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum): 104 # Change the colors of the points that are too close from each other to red 105 if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0): 106 change_color_on_topview(pair) 107 # Get the equivalent indexes of these points in the original frame and change the color to red 108 index_pt1 = list_indexes[i][0] 109 index_pt2 = list_indexes[i][1] 110 cv2.rectangle(frame,(array_boxes_detected[index_pt1][1],array_boxes_detected[index_pt1][0]),(array_boxes_detected[index_pt1][3],array_boxes_detected[index_pt1][2]),COLOR_RED,2) 111 cv2.rectangle(frame,(array_boxes_detected[index_pt2][1],array_boxes_detected[index_pt2][0]),(array_boxes_detected[index_pt2][3],array_boxes_detected[index_pt2][2]),COLOR_RED,2) 112 113 114 # Draw the green rectangle to delimitate the detection zone 115 draw_rectangle(corner_points) 116 # Show both images 117 cv2.imshow("Bird view", bird_view_img) 118 cv2.imshow("Original picture", frame) 119 120 121 key = cv2.waitKey(1) & 0xFF 122 123 # Write the both outputs video to a local folders 124 if output_video_1 is None and output_video_2 is None: 125 fourcc1 = cv2.VideoWriter_fourcc(*"MJPG") 126 output_video_1 = cv2.VideoWriter("../output/video.avi", fourcc1, 25,(frame.shape[1], frame.shape[0]), True) 127 fourcc2 = cv2.VideoWriter_fourcc(*"MJPG") 128 output_video_2 = cv2.VideoWriter("../output/bird_view.avi", fourcc2, 25,(bird_view_img.shape[1], bird_view_img.shape[0]), True) 129 elif output_video_1 is not None and output_video_2 is not None: 130 output_video_1.write(frame) 131 output_video_2.write(bird_view_img) 132 133 # Break the loop 134 if key == ord("q"): 135 break 136 137
bird_view_transfo_functions.py
python
1import numpy as np 2import cv2 3 4 5def compute_perspective_transform(corner_points,width,height,image): 6 """ Compute the transformation matrix 7 @ corner_points : 4 corner points selected from the image 8 @ height, width : size of the image 9 """ 10 # Create an array out of the 4 corner points 11 corner_points_array = np.float32(corner_points) 12 # Create an array with the parameters (the dimensions) required to build the matrix 13 img_params = np.float32([[0,0],[width,0],[0,height],[width,height]]) 14 # Compute and return the transformation matrix 15 matrix = cv2.getPerspectiveTransform(corner_points_array,img_params) 16 img_transformed = cv2.warpPerspective(image,matrix,(width,height)) 17 return matrix,img_transformed 18 19 20def compute_point_perspective_transformation(matrix,list_downoids): 21 """ Apply the perspective transformation to every ground point which have been detected on the main frame. 22 @ matrix : the 3x3 matrix 23 @ list_downoids : list that contains the points to transform 24 return : list containing all the new points 25 """ 26 # Compute the new coordinates of our points 27 list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2) 28 transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix) 29 # Loop over the points and add them to the list that will be returned 30 transformed_points_list = list() 31 for i in range(0,transformed_points.shape[0]): 32 transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]]) 33 return transformed_points_list 34
あなたの回答
tips
プレビュー