teratail header banner
teratail header banner
質問するログイン新規登録

質問編集履歴

3

コードの記載方法を教えていただいた通り修正しました。

2021/08/02 14:13

投稿

trafield
trafield

スコア0

title CHANGED
File without changes
body CHANGED
@@ -16,74 +16,131 @@
16
16
  AttributeError: 'NoneType' object has no attribute 'shape'
17
17
 
18
18
  ### 該当のソースコード
19
- social_distanciation_video_detection.pyのコード
19
+ social_distanciation_video_detection.py
20
+ ```python
20
21
 
22
+ #########################################
23
+ # Select the model #
24
+ #########################################
25
+ model_names_list = [name for name in os.listdir("../models/.") if name.find(".") == -1]
26
+ for index,model_name in enumerate(model_names_list):
27
+ print(" - {} [{}]".format(model_name,index))
28
+ model_num = input(" Please select the number related to the model that you want : ")
29
+ if model_num == "":
30
+ model_path="../models/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb"
31
+ else :
32
+ model_path = "../models/"+model_names_list[int(model_num)]+"/frozen_inference_graph.pb"
33
+ print(bcolors.WARNING + " [ Loading the TENSORFLOW MODEL ... ]"+bcolors.ENDC)
34
+ model = Model(model_path)
35
+ print(bcolors.OKGREEN +"Done : [ Model loaded and initialized ] ..."+bcolors.ENDC)
36
+
37
+
38
+ #########################################
39
+ # Select the video #
40
+ #########################################
41
+ video_names_list = [name for name in os.listdir("../video/") if name.endswith(".mp4") or name.endswith(".avi")]
42
+ for index,video_name in enumerate(video_names_list):
43
+ print(" - {} [{}]".format(video_name,index))
44
+ video_num = input("Enter the exact name of the video (including .mp4 or else) : ")
45
+ if video_num == "":
46
+ video_path="../video/PETS2009.avi"
47
+ else :
48
+ video_path = "../video/"+video_names_list[int(video_num)]
49
+
50
+
51
+ #########################################
52
+ # Minimal distance #
53
+ #########################################
54
+ distance_minimum = input("Prompt the size of the minimal distance between 2 pedestrians : ")
55
+ if distance_minimum == "":
56
+ distance_minimum = "110"
57
+
58
+
59
+ #########################################
60
+ # Compute transformation matrix #
61
+ #########################################
62
+ # Compute transformation matrix from the original frame
63
+ matrix,imgOutput = compute_perspective_transform(corner_points,width_og,height_og,cv2.imread(img_path))
64
+ height,width,_ = imgOutput.shape
65
+ blank_image = np.zeros((height,width,3), np.uint8)
66
+ height = blank_image.shape[0]
67
+ width = blank_image.shape[1]
68
+ dim = (width, height)
69
+
70
+
71
+
72
+
73
+ ######################################################
74
+ ######### #########
75
+ # START THE VIDEO STREAM #
76
+ ######### #########
77
+ ######################################################
21
78
  vs = cv2.VideoCapture(video_path)
22
79
  output_video_1,output_video_2 = None,None
23
- Loop until the end of the video stream
80
+ # Loop until the end of the video stream
24
81
  while True:
25
- Load the image of the ground and resize it to the correct size
82
+ # Load the image of the ground and resize it to the correct size
26
83
  img = cv2.imread("../img/chemin_1.png")
27
84
  bird_view_img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
28
85
 
29
- Load the frame
86
+ # Load the frame
30
87
  (frame_exists, frame) = vs.read()
31
- Test if it has reached the end of the video
88
+ # Test if it has reached the end of the video
32
89
  if not frame_exists:
33
90
  break
34
91
  else:
35
- Resize the image to the correct size
92
+ # Resize the image to the correct size
36
93
  frame = imutils.resize(frame, width=int(size_frame))
37
94
 
38
- Make the predictions for this frame
95
+ # Make the predictions for this frame
39
96
  (boxes, scores, classes) = model.predict(frame)
40
97
 
41
- Get the human detected in the frame and return the 2 points to build the bounding box
98
+ # Get the human detected in the frame and return the 2 points to build the bounding box
42
99
  array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1])
43
100
 
44
- Both of our lists that will contain the centroテッds coordonates and the ground points
101
+ # Both of our lists that will contain the centroテッds coordonates and the ground points
45
102
  array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected)
46
103
 
47
- Use the transform matrix to get the transformed coordonates
104
+ # Use the transform matrix to get the transformed coordonates
48
105
  transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
49
106
 
50
- Show every point on the top view image
107
+ # Show every point on the top view image
51
108
  for point in transformed_downoids:
52
109
  x,y = point
53
110
  cv2.circle(bird_view_img, (int(x),int(y)), BIG_CIRCLE, COLOR_GREEN, 2)
54
111
  cv2.circle(bird_view_img, (int(x),int(y)), SMALL_CIRCLE, COLOR_GREEN, -1)
55
112
 
56
- Check if 2 or more people have been detected (otherwise no need to detect)
113
+ # Check if 2 or more people have been detected (otherwise no need to detect)
57
114
  if len(transformed_downoids) >= 2:
58
115
  for index,downoid in enumerate(transformed_downoids):
59
116
  if not (downoid[0] > width or downoid[0] < 0 or downoid[1] > height+200 or downoid[1] < 0 ):
60
117
  cv2.rectangle(frame,(array_boxes_detected[index][1],array_boxes_detected[index][0]),(array_boxes_detected[index][3],array_boxes_detected[index][2]),COLOR_GREEN,2)
61
118
 
62
- Iterate over every possible 2 by 2 between the points combinations
119
+ # Iterate over every possible 2 by 2 between the points combinations
63
120
  list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2))
64
121
  for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)):
65
- Check if the distance between each combination of points is less than the minimum distance chosen
122
+ # Check if the distance between each combination of points is less than the minimum distance chosen
66
123
  if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum):
67
- Change the colors of the points that are too close from each other to red
124
+ # Change the colors of the points that are too close from each other to red
68
125
  if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0):
69
126
  change_color_on_topview(pair)
70
- Get the equivalent indexes of these points in the original frame and change the color to red
127
+ # Get the equivalent indexes of these points in the original frame and change the color to red
71
128
  index_pt1 = list_indexes[i][0]
72
129
  index_pt2 = list_indexes[i][1]
73
130
  cv2.rectangle(frame,(array_boxes_detected[index_pt1][1],array_boxes_detected[index_pt1][0]),(array_boxes_detected[index_pt1][3],array_boxes_detected[index_pt1][2]),COLOR_RED,2)
74
131
  cv2.rectangle(frame,(array_boxes_detected[index_pt2][1],array_boxes_detected[index_pt2][0]),(array_boxes_detected[index_pt2][3],array_boxes_detected[index_pt2][2]),COLOR_RED,2)
75
132
 
76
133
 
77
- Draw the green rectangle to delimitate the detection zone
134
+ # Draw the green rectangle to delimitate the detection zone
78
135
  draw_rectangle(corner_points)
79
- Show both images
136
+ # Show both images
80
137
  cv2.imshow("Bird view", bird_view_img)
81
138
  cv2.imshow("Original picture", frame)
82
139
 
83
140
 
84
141
  key = cv2.waitKey(1) & 0xFF
85
142
 
86
- Write the both outputs video to a local folders
143
+ # Write the both outputs video to a local folders
87
144
  if output_video_1 is None and output_video_2 is None:
88
145
  fourcc1 = cv2.VideoWriter_fourcc(*"MJPG")
89
146
  output_video_1 = cv2.VideoWriter("../output/video.avi", fourcc1, 25,(frame.shape[1], frame.shape[0]), True)
@@ -93,13 +150,14 @@
93
150
  output_video_1.write(frame)
94
151
  output_video_2.write(bird_view_img)
95
152
 
96
- Break the loop
153
+ # Break the loop
97
154
  if key == ord("q"):
98
155
  break
99
156
 
100
- #bird_view_transfo_functions.pyのコード
101
157
 
102
-
158
+ ```
159
+ bird_view_transfo_functions.py
160
+ ```python
103
161
  import numpy as np
104
162
  import cv2
105
163
 
@@ -109,11 +167,11 @@
109
167
  @ corner_points : 4 corner points selected from the image
110
168
  @ height, width : size of the image
111
169
  """
112
- Create an array out of the 4 corner points
170
+ # Create an array out of the 4 corner points
113
171
  corner_points_array = np.float32(corner_points)
114
- Create an array with the parameters (the dimensions) required to build the matrix
172
+ # Create an array with the parameters (the dimensions) required to build the matrix
115
173
  img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
116
- Compute and return the transformation matrix
174
+ # Compute and return the transformation matrix
117
175
  matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
118
176
  img_transformed = cv2.warpPerspective(image,matrix,(width,height))
119
177
  return matrix,img_transformed
@@ -125,11 +183,13 @@
125
183
  @ list_downoids : list that contains the points to transform
126
184
  return : list containing all the new points
127
185
  """
128
- Compute the new coordinates of our points
186
+ # Compute the new coordinates of our points
129
187
  list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
130
188
  transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
131
- Loop over the points and add them to the list that will be returned
189
+ # Loop over the points and add them to the list that will be returned
132
190
  transformed_points_list = list()
133
191
  for i in range(0,transformed_points.shape[0]):
134
192
  transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])
135
- return transformed_points_list
193
+ return transformed_points_list
194
+
195
+ ```

2

追加したコードを修正しました。

2021/08/02 14:13

投稿

trafield
trafield

スコア0

title CHANGED
File without changes
body CHANGED
@@ -14,80 +14,76 @@
14
14
  File "C:\covid-social-distancing-detection\src\bird_view_transfo_functions.py", line 31, in compute_point_perspective_transformation
15
15
  for i in range(0,transformed_points.shape[0]):
16
16
  AttributeError: 'NoneType' object has no attribute 'shape'
17
+
17
- ``### 該当のソースコード
18
+ ### 該当のソースコード
18
19
  social_distanciation_video_detection.pyのコード
19
20
 
20
- ######################################################
21
- ######### #########
22
- # START THE VIDEO STREAM #
23
- ######### #########
24
- ######################################################
25
21
  vs = cv2.VideoCapture(video_path)
26
22
  output_video_1,output_video_2 = None,None
27
- # Loop until the end of the video stream
23
+ Loop until the end of the video stream
28
24
  while True:
29
- # Load the image of the ground and resize it to the correct size
25
+ Load the image of the ground and resize it to the correct size
30
26
  img = cv2.imread("../img/chemin_1.png")
31
27
  bird_view_img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
32
28
 
33
- # Load the frame
29
+ Load the frame
34
30
  (frame_exists, frame) = vs.read()
35
- # Test if it has reached the end of the video
31
+ Test if it has reached the end of the video
36
32
  if not frame_exists:
37
33
  break
38
34
  else:
39
- # Resize the image to the correct size
35
+ Resize the image to the correct size
40
36
  frame = imutils.resize(frame, width=int(size_frame))
41
37
 
42
- # Make the predictions for this frame
38
+ Make the predictions for this frame
43
39
  (boxes, scores, classes) = model.predict(frame)
44
40
 
45
- # Get the human detected in the frame and return the 2 points to build the bounding box
41
+ Get the human detected in the frame and return the 2 points to build the bounding box
46
42
  array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1])
47
43
 
48
- # Both of our lists that will contain the centroテッds coordonates and the ground points
44
+ Both of our lists that will contain the centroテッds coordonates and the ground points
49
45
  array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected)
50
46
 
51
- # Use the transform matrix to get the transformed coordonates
47
+ Use the transform matrix to get the transformed coordonates
52
48
  transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
53
49
 
54
- # Show every point on the top view image
50
+ Show every point on the top view image
55
51
  for point in transformed_downoids:
56
52
  x,y = point
57
53
  cv2.circle(bird_view_img, (int(x),int(y)), BIG_CIRCLE, COLOR_GREEN, 2)
58
54
  cv2.circle(bird_view_img, (int(x),int(y)), SMALL_CIRCLE, COLOR_GREEN, -1)
59
55
 
60
- # Check if 2 or more people have been detected (otherwise no need to detect)
56
+ Check if 2 or more people have been detected (otherwise no need to detect)
61
57
  if len(transformed_downoids) >= 2:
62
58
  for index,downoid in enumerate(transformed_downoids):
63
59
  if not (downoid[0] > width or downoid[0] < 0 or downoid[1] > height+200 or downoid[1] < 0 ):
64
60
  cv2.rectangle(frame,(array_boxes_detected[index][1],array_boxes_detected[index][0]),(array_boxes_detected[index][3],array_boxes_detected[index][2]),COLOR_GREEN,2)
65
61
 
66
- # Iterate over every possible 2 by 2 between the points combinations
62
+ Iterate over every possible 2 by 2 between the points combinations
67
63
  list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2))
68
64
  for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)):
69
- # Check if the distance between each combination of points is less than the minimum distance chosen
65
+ Check if the distance between each combination of points is less than the minimum distance chosen
70
66
  if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum):
71
- # Change the colors of the points that are too close from each other to red
67
+ Change the colors of the points that are too close from each other to red
72
68
  if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0):
73
69
  change_color_on_topview(pair)
74
- # Get the equivalent indexes of these points in the original frame and change the color to red
70
+ Get the equivalent indexes of these points in the original frame and change the color to red
75
71
  index_pt1 = list_indexes[i][0]
76
72
  index_pt2 = list_indexes[i][1]
77
73
  cv2.rectangle(frame,(array_boxes_detected[index_pt1][1],array_boxes_detected[index_pt1][0]),(array_boxes_detected[index_pt1][3],array_boxes_detected[index_pt1][2]),COLOR_RED,2)
78
74
  cv2.rectangle(frame,(array_boxes_detected[index_pt2][1],array_boxes_detected[index_pt2][0]),(array_boxes_detected[index_pt2][3],array_boxes_detected[index_pt2][2]),COLOR_RED,2)
79
75
 
80
76
 
81
- # Draw the green rectangle to delimitate the detection zone
77
+ Draw the green rectangle to delimitate the detection zone
82
78
  draw_rectangle(corner_points)
83
- # Show both images
79
+ Show both images
84
80
  cv2.imshow("Bird view", bird_view_img)
85
81
  cv2.imshow("Original picture", frame)
86
82
 
87
83
 
88
84
  key = cv2.waitKey(1) & 0xFF
89
85
 
90
- # Write the both outputs video to a local folders
86
+ Write the both outputs video to a local folders
91
87
  if output_video_1 is None and output_video_2 is None:
92
88
  fourcc1 = cv2.VideoWriter_fourcc(*"MJPG")
93
89
  output_video_1 = cv2.VideoWriter("../output/video.avi", fourcc1, 25,(frame.shape[1], frame.shape[0]), True)
@@ -97,11 +93,11 @@
97
93
  output_video_1.write(frame)
98
94
  output_video_2.write(bird_view_img)
99
95
 
100
- # Break the loop
96
+ Break the loop
101
97
  if key == ord("q"):
102
98
  break
103
99
 
104
- bird_view_transfo_functions.pyのコード
100
+ #bird_view_transfo_functions.pyのコード
105
101
 
106
102
 
107
103
  import numpy as np
@@ -113,11 +109,11 @@
113
109
  @ corner_points : 4 corner points selected from the image
114
110
  @ height, width : size of the image
115
111
  """
116
- # Create an array out of the 4 corner points
112
+ Create an array out of the 4 corner points
117
113
  corner_points_array = np.float32(corner_points)
118
- # Create an array with the parameters (the dimensions) required to build the matrix
114
+ Create an array with the parameters (the dimensions) required to build the matrix
119
115
  img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
120
- # Compute and return the transformation matrix
116
+ Compute and return the transformation matrix
121
117
  matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
122
118
  img_transformed = cv2.warpPerspective(image,matrix,(width,height))
123
119
  return matrix,img_transformed
@@ -129,10 +125,10 @@
129
125
  @ list_downoids : list that contains the points to transform
130
126
  return : list containing all the new points
131
127
  """
132
- # Compute the new coordinates of our points
128
+ Compute the new coordinates of our points
133
129
  list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
134
130
  transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
135
- # Loop over the points and add them to the list that will be returned
131
+ Loop over the points and add them to the list that will be returned
136
132
  transformed_points_list = list()
137
133
  for i in range(0,transformed_points.shape[0]):
138
134
  transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])

1

コードを追加しました。

2021/08/01 13:49

投稿

trafield
trafield

スコア0

title CHANGED
File without changes
body CHANGED
@@ -8,13 +8,132 @@
8
8
  ### 発生している問題・エラーメッセージ
9
9
  サンプルではうまくいくのですが、自分で学習させたモデル?を使った場合エラーが出てしまいます。
10
10
 
11
-
12
- ### 該当のソースコード
13
-
14
11
  Traceback (most recent call last):
15
12
  File "social_distanciation_video_detection.py", line 193, in <module>
16
13
  transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
17
14
  File "C:\covid-social-distancing-detection\src\bird_view_transfo_functions.py", line 31, in compute_point_perspective_transformation
18
15
  for i in range(0,transformed_points.shape[0]):
19
16
  AttributeError: 'NoneType' object has no attribute 'shape'
17
+ ``### 該当のソースコード
18
+ social_distanciation_video_detection.pyのコード
19
+
20
+ ######################################################
21
+ ######### #########
22
+ # START THE VIDEO STREAM #
23
+ ######### #########
24
+ ######################################################
25
+ vs = cv2.VideoCapture(video_path)
26
+ output_video_1,output_video_2 = None,None
27
+ # Loop until the end of the video stream
28
+ while True:
29
+ # Load the image of the ground and resize it to the correct size
30
+ img = cv2.imread("../img/chemin_1.png")
31
+ bird_view_img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
32
+
33
+ # Load the frame
34
+ (frame_exists, frame) = vs.read()
35
+ # Test if it has reached the end of the video
36
+ if not frame_exists:
20
- ``
37
+ break
38
+ else:
39
+ # Resize the image to the correct size
40
+ frame = imutils.resize(frame, width=int(size_frame))
41
+
42
+ # Make the predictions for this frame
43
+ (boxes, scores, classes) = model.predict(frame)
44
+
45
+ # Get the human detected in the frame and return the 2 points to build the bounding box
46
+ array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1])
47
+
48
+ # Both of our lists that will contain the centroテッds coordonates and the ground points
49
+ array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected)
50
+
51
+ # Use the transform matrix to get the transformed coordonates
52
+ transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
53
+
54
+ # Show every point on the top view image
55
+ for point in transformed_downoids:
56
+ x,y = point
57
+ cv2.circle(bird_view_img, (int(x),int(y)), BIG_CIRCLE, COLOR_GREEN, 2)
58
+ cv2.circle(bird_view_img, (int(x),int(y)), SMALL_CIRCLE, COLOR_GREEN, -1)
59
+
60
+ # Check if 2 or more people have been detected (otherwise no need to detect)
61
+ if len(transformed_downoids) >= 2:
62
+ for index,downoid in enumerate(transformed_downoids):
63
+ if not (downoid[0] > width or downoid[0] < 0 or downoid[1] > height+200 or downoid[1] < 0 ):
64
+ cv2.rectangle(frame,(array_boxes_detected[index][1],array_boxes_detected[index][0]),(array_boxes_detected[index][3],array_boxes_detected[index][2]),COLOR_GREEN,2)
65
+
66
+ # Iterate over every possible 2 by 2 between the points combinations
67
+ list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2))
68
+ for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)):
69
+ # Check if the distance between each combination of points is less than the minimum distance chosen
70
+ if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum):
71
+ # Change the colors of the points that are too close from each other to red
72
+ if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0):
73
+ change_color_on_topview(pair)
74
+ # Get the equivalent indexes of these points in the original frame and change the color to red
75
+ index_pt1 = list_indexes[i][0]
76
+ index_pt2 = list_indexes[i][1]
77
+ cv2.rectangle(frame,(array_boxes_detected[index_pt1][1],array_boxes_detected[index_pt1][0]),(array_boxes_detected[index_pt1][3],array_boxes_detected[index_pt1][2]),COLOR_RED,2)
78
+ cv2.rectangle(frame,(array_boxes_detected[index_pt2][1],array_boxes_detected[index_pt2][0]),(array_boxes_detected[index_pt2][3],array_boxes_detected[index_pt2][2]),COLOR_RED,2)
79
+
80
+
81
+ # Draw the green rectangle to delimitate the detection zone
82
+ draw_rectangle(corner_points)
83
+ # Show both images
84
+ cv2.imshow("Bird view", bird_view_img)
85
+ cv2.imshow("Original picture", frame)
86
+
87
+
88
+ key = cv2.waitKey(1) & 0xFF
89
+
90
+ # Write the both outputs video to a local folders
91
+ if output_video_1 is None and output_video_2 is None:
92
+ fourcc1 = cv2.VideoWriter_fourcc(*"MJPG")
93
+ output_video_1 = cv2.VideoWriter("../output/video.avi", fourcc1, 25,(frame.shape[1], frame.shape[0]), True)
94
+ fourcc2 = cv2.VideoWriter_fourcc(*"MJPG")
95
+ output_video_2 = cv2.VideoWriter("../output/bird_view.avi", fourcc2, 25,(bird_view_img.shape[1], bird_view_img.shape[0]), True)
96
+ elif output_video_1 is not None and output_video_2 is not None:
97
+ output_video_1.write(frame)
98
+ output_video_2.write(bird_view_img)
99
+
100
+ # Break the loop
101
+ if key == ord("q"):
102
+ break
103
+
104
+ bird_view_transfo_functions.pyのコード
105
+
106
+
107
+ import numpy as np
108
+ import cv2
109
+
110
+
111
+ def compute_perspective_transform(corner_points,width,height,image):
112
+ """ Compute the transformation matrix
113
+ @ corner_points : 4 corner points selected from the image
114
+ @ height, width : size of the image
115
+ """
116
+ # Create an array out of the 4 corner points
117
+ corner_points_array = np.float32(corner_points)
118
+ # Create an array with the parameters (the dimensions) required to build the matrix
119
+ img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
120
+ # Compute and return the transformation matrix
121
+ matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
122
+ img_transformed = cv2.warpPerspective(image,matrix,(width,height))
123
+ return matrix,img_transformed
124
+
125
+
126
+ def compute_point_perspective_transformation(matrix,list_downoids):
127
+ """ Apply the perspective transformation to every ground point which have been detected on the main frame.
128
+ @ matrix : the 3x3 matrix
129
+ @ list_downoids : list that contains the points to transform
130
+ return : list containing all the new points
131
+ """
132
+ # Compute the new coordinates of our points
133
+ list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
134
+ transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
135
+ # Loop over the points and add them to the list that will be returned
136
+ transformed_points_list = list()
137
+ for i in range(0,transformed_points.shape[0]):
138
+ transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])
139
+ return transformed_points_list