質問編集履歴

3

コードの記載方法を教えていただいた通り修正しました。

2021/08/02 14:13

投稿

trafield
trafield

スコア0

test CHANGED
File without changes
test CHANGED
@@ -34,19 +34,133 @@
34
34
 
35
35
  ### 該当のソースコード
36
36
 
37
- social_distanciation_video_detection.pyのコード
37
+ social_distanciation_video_detection.py
38
+
38
-
39
+ ```python
40
+
41
+
42
+
39
-
43
+ #########################################
44
+
45
+ # Select the model #
46
+
47
+ #########################################
48
+
49
+ model_names_list = [name for name in os.listdir("../models/.") if name.find(".") == -1]
50
+
51
+ for index,model_name in enumerate(model_names_list):
52
+
53
+ print(" - {} [{}]".format(model_name,index))
54
+
55
+ model_num = input(" Please select the number related to the model that you want : ")
56
+
57
+ if model_num == "":
58
+
59
+ model_path="../models/faster_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb"
60
+
61
+ else :
62
+
63
+ model_path = "../models/"+model_names_list[int(model_num)]+"/frozen_inference_graph.pb"
64
+
65
+ print(bcolors.WARNING + " [ Loading the TENSORFLOW MODEL ... ]"+bcolors.ENDC)
66
+
67
+ model = Model(model_path)
68
+
69
+ print(bcolors.OKGREEN +"Done : [ Model loaded and initialized ] ..."+bcolors.ENDC)
70
+
71
+
72
+
73
+
74
+
75
+ #########################################
76
+
77
+ # Select the video #
78
+
79
+ #########################################
80
+
81
+ video_names_list = [name for name in os.listdir("../video/") if name.endswith(".mp4") or name.endswith(".avi")]
82
+
83
+ for index,video_name in enumerate(video_names_list):
84
+
85
+ print(" - {} [{}]".format(video_name,index))
86
+
87
+ video_num = input("Enter the exact name of the video (including .mp4 or else) : ")
88
+
89
+ if video_num == "":
90
+
91
+ video_path="../video/PETS2009.avi"
92
+
93
+ else :
94
+
95
+ video_path = "../video/"+video_names_list[int(video_num)]
96
+
97
+
98
+
99
+
100
+
101
+ #########################################
102
+
103
+ # Minimal distance #
104
+
105
+ #########################################
106
+
107
+ distance_minimum = input("Prompt the size of the minimal distance between 2 pedestrians : ")
108
+
109
+ if distance_minimum == "":
110
+
111
+ distance_minimum = "110"
112
+
113
+
114
+
115
+
116
+
117
+ #########################################
118
+
119
+ # Compute transformation matrix #
120
+
121
+ #########################################
122
+
123
+ # Compute transformation matrix from the original frame
124
+
125
+ matrix,imgOutput = compute_perspective_transform(corner_points,width_og,height_og,cv2.imread(img_path))
126
+
127
+ height,width,_ = imgOutput.shape
128
+
129
+ blank_image = np.zeros((height,width,3), np.uint8)
130
+
131
+ height = blank_image.shape[0]
132
+
133
+ width = blank_image.shape[1]
134
+
135
+ dim = (width, height)
136
+
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+
145
+ ######################################################
146
+
147
+ ######### #########
148
+
149
+ # START THE VIDEO STREAM #
150
+
151
+ ######### #########
152
+
153
+ ######################################################
40
154
 
41
155
  vs = cv2.VideoCapture(video_path)
42
156
 
43
157
  output_video_1,output_video_2 = None,None
44
158
 
45
- Loop until the end of the video stream
159
+ # Loop until the end of the video stream
46
160
 
47
161
  while True:
48
162
 
49
- Load the image of the ground and resize it to the correct size
163
+ # Load the image of the ground and resize it to the correct size
50
164
 
51
165
  img = cv2.imread("../img/chemin_1.png")
52
166
 
@@ -54,11 +168,11 @@
54
168
 
55
169
 
56
170
 
57
- Load the frame
171
+ # Load the frame
58
172
 
59
173
  (frame_exists, frame) = vs.read()
60
174
 
61
- Test if it has reached the end of the video
175
+ # Test if it has reached the end of the video
62
176
 
63
177
  if not frame_exists:
64
178
 
@@ -66,37 +180,37 @@
66
180
 
67
181
  else:
68
182
 
69
- Resize the image to the correct size
183
+ # Resize the image to the correct size
70
184
 
71
185
  frame = imutils.resize(frame, width=int(size_frame))
72
186
 
73
187
 
74
188
 
75
- Make the predictions for this frame
189
+ # Make the predictions for this frame
76
190
 
77
191
  (boxes, scores, classes) = model.predict(frame)
78
192
 
79
193
 
80
194
 
81
- Get the human detected in the frame and return the 2 points to build the bounding box
195
+ # Get the human detected in the frame and return the 2 points to build the bounding box
82
196
 
83
197
  array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1])
84
198
 
85
199
 
86
200
 
87
- Both of our lists that will contain the centroテッds coordonates and the ground points
201
+ # Both of our lists that will contain the centroテッds coordonates and the ground points
88
202
 
89
203
  array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected)
90
204
 
91
205
 
92
206
 
93
- Use the transform matrix to get the transformed coordonates
207
+ # Use the transform matrix to get the transformed coordonates
94
208
 
95
209
  transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
96
210
 
97
211
 
98
212
 
99
- Show every point on the top view image
213
+ # Show every point on the top view image
100
214
 
101
215
  for point in transformed_downoids:
102
216
 
@@ -108,7 +222,7 @@
108
222
 
109
223
 
110
224
 
111
- Check if 2 or more people have been detected (otherwise no need to detect)
225
+ # Check if 2 or more people have been detected (otherwise no need to detect)
112
226
 
113
227
  if len(transformed_downoids) >= 2:
114
228
 
@@ -120,23 +234,23 @@
120
234
 
121
235
 
122
236
 
123
- Iterate over every possible 2 by 2 between the points combinations
237
+ # Iterate over every possible 2 by 2 between the points combinations
124
238
 
125
239
  list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2))
126
240
 
127
241
  for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)):
128
242
 
129
- Check if the distance between each combination of points is less than the minimum distance chosen
243
+ # Check if the distance between each combination of points is less than the minimum distance chosen
130
244
 
131
245
  if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum):
132
246
 
133
- Change the colors of the points that are too close from each other to red
247
+ # Change the colors of the points that are too close from each other to red
134
248
 
135
249
  if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0):
136
250
 
137
251
  change_color_on_topview(pair)
138
252
 
139
- Get the equivalent indexes of these points in the original frame and change the color to red
253
+ # Get the equivalent indexes of these points in the original frame and change the color to red
140
254
 
141
255
  index_pt1 = list_indexes[i][0]
142
256
 
@@ -150,11 +264,11 @@
150
264
 
151
265
 
152
266
 
153
- Draw the green rectangle to delimitate the detection zone
267
+ # Draw the green rectangle to delimitate the detection zone
154
268
 
155
269
  draw_rectangle(corner_points)
156
270
 
157
- Show both images
271
+ # Show both images
158
272
 
159
273
  cv2.imshow("Bird view", bird_view_img)
160
274
 
@@ -168,7 +282,7 @@
168
282
 
169
283
 
170
284
 
171
- Write the both outputs video to a local folders
285
+ # Write the both outputs video to a local folders
172
286
 
173
287
  if output_video_1 is None and output_video_2 is None:
174
288
 
@@ -188,7 +302,7 @@
188
302
 
189
303
 
190
304
 
191
- Break the loop
305
+ # Break the loop
192
306
 
193
307
  if key == ord("q"):
194
308
 
@@ -196,11 +310,13 @@
196
310
 
197
311
 
198
312
 
313
+
314
+
315
+ ```
316
+
199
- #bird_view_transfo_functions.pyのコード
317
+ bird_view_transfo_functions.py
318
+
200
-
319
+ ```python
201
-
202
-
203
-
204
320
 
205
321
  import numpy as np
206
322
 
@@ -220,15 +336,15 @@
220
336
 
221
337
  """
222
338
 
223
- Create an array out of the 4 corner points
339
+ # Create an array out of the 4 corner points
224
340
 
225
341
  corner_points_array = np.float32(corner_points)
226
342
 
227
- Create an array with the parameters (the dimensions) required to build the matrix
343
+ # Create an array with the parameters (the dimensions) required to build the matrix
228
344
 
229
345
  img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
230
346
 
231
- Compute and return the transformation matrix
347
+ # Compute and return the transformation matrix
232
348
 
233
349
  matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
234
350
 
@@ -252,13 +368,13 @@
252
368
 
253
369
  """
254
370
 
255
- Compute the new coordinates of our points
371
+ # Compute the new coordinates of our points
256
372
 
257
373
  list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
258
374
 
259
375
  transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
260
376
 
261
- Loop over the points and add them to the list that will be returned
377
+ # Loop over the points and add them to the list that will be returned
262
378
 
263
379
  transformed_points_list = list()
264
380
 
@@ -267,3 +383,7 @@
267
383
  transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])
268
384
 
269
385
  return transformed_points_list
386
+
387
+
388
+
389
+ ```

2

追加したコードを修正しました。

2021/08/02 14:13

投稿

trafield
trafield

スコア0

test CHANGED
File without changes
test CHANGED
@@ -30,31 +30,23 @@
30
30
 
31
31
  AttributeError: 'NoneType' object has no attribute 'shape'
32
32
 
33
+
34
+
33
- ``### 該当のソースコード
35
+ ### 該当のソースコード
34
36
 
35
37
  social_distanciation_video_detection.pyのコード
36
38
 
37
39
 
38
40
 
39
- ######################################################
40
-
41
- ######### #########
42
-
43
- # START THE VIDEO STREAM #
44
-
45
- ######### #########
46
-
47
- ######################################################
48
-
49
41
  vs = cv2.VideoCapture(video_path)
50
42
 
51
43
  output_video_1,output_video_2 = None,None
52
44
 
53
- # Loop until the end of the video stream
45
+ Loop until the end of the video stream
54
46
 
55
47
  while True:
56
48
 
57
- # Load the image of the ground and resize it to the correct size
49
+ Load the image of the ground and resize it to the correct size
58
50
 
59
51
  img = cv2.imread("../img/chemin_1.png")
60
52
 
@@ -62,11 +54,11 @@
62
54
 
63
55
 
64
56
 
65
- # Load the frame
57
+ Load the frame
66
58
 
67
59
  (frame_exists, frame) = vs.read()
68
60
 
69
- # Test if it has reached the end of the video
61
+ Test if it has reached the end of the video
70
62
 
71
63
  if not frame_exists:
72
64
 
@@ -74,37 +66,37 @@
74
66
 
75
67
  else:
76
68
 
77
- # Resize the image to the correct size
69
+ Resize the image to the correct size
78
70
 
79
71
  frame = imutils.resize(frame, width=int(size_frame))
80
72
 
81
73
 
82
74
 
83
- # Make the predictions for this frame
75
+ Make the predictions for this frame
84
76
 
85
77
  (boxes, scores, classes) = model.predict(frame)
86
78
 
87
79
 
88
80
 
89
- # Get the human detected in the frame and return the 2 points to build the bounding box
81
+ Get the human detected in the frame and return the 2 points to build the bounding box
90
82
 
91
83
  array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1])
92
84
 
93
85
 
94
86
 
95
- # Both of our lists that will contain the centroテッds coordonates and the ground points
87
+ Both of our lists that will contain the centroテッds coordonates and the ground points
96
88
 
97
89
  array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected)
98
90
 
99
91
 
100
92
 
101
- # Use the transform matrix to get the transformed coordonates
93
+ Use the transform matrix to get the transformed coordonates
102
94
 
103
95
  transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
104
96
 
105
97
 
106
98
 
107
- # Show every point on the top view image
99
+ Show every point on the top view image
108
100
 
109
101
  for point in transformed_downoids:
110
102
 
@@ -116,7 +108,7 @@
116
108
 
117
109
 
118
110
 
119
- # Check if 2 or more people have been detected (otherwise no need to detect)
111
+ Check if 2 or more people have been detected (otherwise no need to detect)
120
112
 
121
113
  if len(transformed_downoids) >= 2:
122
114
 
@@ -128,23 +120,23 @@
128
120
 
129
121
 
130
122
 
131
- # Iterate over every possible 2 by 2 between the points combinations
123
+ Iterate over every possible 2 by 2 between the points combinations
132
124
 
133
125
  list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2))
134
126
 
135
127
  for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)):
136
128
 
137
- # Check if the distance between each combination of points is less than the minimum distance chosen
129
+ Check if the distance between each combination of points is less than the minimum distance chosen
138
130
 
139
131
  if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum):
140
132
 
141
- # Change the colors of the points that are too close from each other to red
133
+ Change the colors of the points that are too close from each other to red
142
134
 
143
135
  if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0):
144
136
 
145
137
  change_color_on_topview(pair)
146
138
 
147
- # Get the equivalent indexes of these points in the original frame and change the color to red
139
+ Get the equivalent indexes of these points in the original frame and change the color to red
148
140
 
149
141
  index_pt1 = list_indexes[i][0]
150
142
 
@@ -158,11 +150,11 @@
158
150
 
159
151
 
160
152
 
161
- # Draw the green rectangle to delimitate the detection zone
153
+ Draw the green rectangle to delimitate the detection zone
162
154
 
163
155
  draw_rectangle(corner_points)
164
156
 
165
- # Show both images
157
+ Show both images
166
158
 
167
159
  cv2.imshow("Bird view", bird_view_img)
168
160
 
@@ -176,7 +168,7 @@
176
168
 
177
169
 
178
170
 
179
- # Write the both outputs video to a local folders
171
+ Write the both outputs video to a local folders
180
172
 
181
173
  if output_video_1 is None and output_video_2 is None:
182
174
 
@@ -196,7 +188,7 @@
196
188
 
197
189
 
198
190
 
199
- # Break the loop
191
+ Break the loop
200
192
 
201
193
  if key == ord("q"):
202
194
 
@@ -204,7 +196,7 @@
204
196
 
205
197
 
206
198
 
207
- bird_view_transfo_functions.pyのコード
199
+ #bird_view_transfo_functions.pyのコード
208
200
 
209
201
 
210
202
 
@@ -228,15 +220,15 @@
228
220
 
229
221
  """
230
222
 
231
- # Create an array out of the 4 corner points
223
+ Create an array out of the 4 corner points
232
224
 
233
225
  corner_points_array = np.float32(corner_points)
234
226
 
235
- # Create an array with the parameters (the dimensions) required to build the matrix
227
+ Create an array with the parameters (the dimensions) required to build the matrix
236
228
 
237
229
  img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
238
230
 
239
- # Compute and return the transformation matrix
231
+ Compute and return the transformation matrix
240
232
 
241
233
  matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
242
234
 
@@ -260,13 +252,13 @@
260
252
 
261
253
  """
262
254
 
263
- # Compute the new coordinates of our points
255
+ Compute the new coordinates of our points
264
256
 
265
257
  list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
266
258
 
267
259
  transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
268
260
 
269
- # Loop over the points and add them to the list that will be returned
261
+ Loop over the points and add them to the list that will be returned
270
262
 
271
263
  transformed_points_list = list()
272
264
 

1

コードを追加しました。

2021/08/01 13:49

投稿

trafield
trafield

スコア0

test CHANGED
File without changes
test CHANGED
@@ -18,12 +18,6 @@
18
18
 
19
19
 
20
20
 
21
-
22
-
23
- ### 該当のソースコード
24
-
25
-
26
-
27
21
  Traceback (most recent call last):
28
22
 
29
23
  File "social_distanciation_video_detection.py", line 193, in <module>
@@ -36,4 +30,248 @@
36
30
 
37
31
  AttributeError: 'NoneType' object has no attribute 'shape'
38
32
 
33
+ ``### 該当のソースコード
34
+
35
+ social_distanciation_video_detection.pyのコード
36
+
37
+
38
+
39
+ ######################################################
40
+
41
+ ######### #########
42
+
43
+ # START THE VIDEO STREAM #
44
+
45
+ ######### #########
46
+
47
+ ######################################################
48
+
49
+ vs = cv2.VideoCapture(video_path)
50
+
51
+ output_video_1,output_video_2 = None,None
52
+
53
+ # Loop until the end of the video stream
54
+
55
+ while True:
56
+
57
+ # Load the image of the ground and resize it to the correct size
58
+
59
+ img = cv2.imread("../img/chemin_1.png")
60
+
61
+ bird_view_img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
62
+
63
+
64
+
65
+ # Load the frame
66
+
67
+ (frame_exists, frame) = vs.read()
68
+
69
+ # Test if it has reached the end of the video
70
+
71
+ if not frame_exists:
72
+
39
- ``
73
+ break
74
+
75
+ else:
76
+
77
+ # Resize the image to the correct size
78
+
79
+ frame = imutils.resize(frame, width=int(size_frame))
80
+
81
+
82
+
83
+ # Make the predictions for this frame
84
+
85
+ (boxes, scores, classes) = model.predict(frame)
86
+
87
+
88
+
89
+ # Get the human detected in the frame and return the 2 points to build the bounding box
90
+
91
+ array_boxes_detected = get_human_box_detection(boxes,scores[0].tolist(),classes[0].tolist(),frame.shape[0],frame.shape[1])
92
+
93
+
94
+
95
+ # Both of our lists that will contain the centroテッds coordonates and the ground points
96
+
97
+ array_centroids,array_groundpoints = get_centroids_and_groundpoints(array_boxes_detected)
98
+
99
+
100
+
101
+ # Use the transform matrix to get the transformed coordonates
102
+
103
+ transformed_downoids = compute_point_perspective_transformation(matrix,array_groundpoints)
104
+
105
+
106
+
107
+ # Show every point on the top view image
108
+
109
+ for point in transformed_downoids:
110
+
111
+ x,y = point
112
+
113
+ cv2.circle(bird_view_img, (int(x),int(y)), BIG_CIRCLE, COLOR_GREEN, 2)
114
+
115
+ cv2.circle(bird_view_img, (int(x),int(y)), SMALL_CIRCLE, COLOR_GREEN, -1)
116
+
117
+
118
+
119
+ # Check if 2 or more people have been detected (otherwise no need to detect)
120
+
121
+ if len(transformed_downoids) >= 2:
122
+
123
+ for index,downoid in enumerate(transformed_downoids):
124
+
125
+ if not (downoid[0] > width or downoid[0] < 0 or downoid[1] > height+200 or downoid[1] < 0 ):
126
+
127
+ cv2.rectangle(frame,(array_boxes_detected[index][1],array_boxes_detected[index][0]),(array_boxes_detected[index][3],array_boxes_detected[index][2]),COLOR_GREEN,2)
128
+
129
+
130
+
131
+ # Iterate over every possible 2 by 2 between the points combinations
132
+
133
+ list_indexes = list(itertools.combinations(range(len(transformed_downoids)), 2))
134
+
135
+ for i,pair in enumerate(itertools.combinations(transformed_downoids, r=2)):
136
+
137
+ # Check if the distance between each combination of points is less than the minimum distance chosen
138
+
139
+ if math.sqrt( (pair[0][0] - pair[1][0])**2 + (pair[0][1] - pair[1][1])**2 ) < int(distance_minimum):
140
+
141
+ # Change the colors of the points that are too close from each other to red
142
+
143
+ if not (pair[0][0] > width or pair[0][0] < 0 or pair[0][1] > height+200 or pair[0][1] < 0 or pair[1][0] > width or pair[1][0] < 0 or pair[1][1] > height+200 or pair[1][1] < 0):
144
+
145
+ change_color_on_topview(pair)
146
+
147
+ # Get the equivalent indexes of these points in the original frame and change the color to red
148
+
149
+ index_pt1 = list_indexes[i][0]
150
+
151
+ index_pt2 = list_indexes[i][1]
152
+
153
+ cv2.rectangle(frame,(array_boxes_detected[index_pt1][1],array_boxes_detected[index_pt1][0]),(array_boxes_detected[index_pt1][3],array_boxes_detected[index_pt1][2]),COLOR_RED,2)
154
+
155
+ cv2.rectangle(frame,(array_boxes_detected[index_pt2][1],array_boxes_detected[index_pt2][0]),(array_boxes_detected[index_pt2][3],array_boxes_detected[index_pt2][2]),COLOR_RED,2)
156
+
157
+
158
+
159
+
160
+
161
+ # Draw the green rectangle to delimitate the detection zone
162
+
163
+ draw_rectangle(corner_points)
164
+
165
+ # Show both images
166
+
167
+ cv2.imshow("Bird view", bird_view_img)
168
+
169
+ cv2.imshow("Original picture", frame)
170
+
171
+
172
+
173
+
174
+
175
+ key = cv2.waitKey(1) & 0xFF
176
+
177
+
178
+
179
+ # Write the both outputs video to a local folders
180
+
181
+ if output_video_1 is None and output_video_2 is None:
182
+
183
+ fourcc1 = cv2.VideoWriter_fourcc(*"MJPG")
184
+
185
+ output_video_1 = cv2.VideoWriter("../output/video.avi", fourcc1, 25,(frame.shape[1], frame.shape[0]), True)
186
+
187
+ fourcc2 = cv2.VideoWriter_fourcc(*"MJPG")
188
+
189
+ output_video_2 = cv2.VideoWriter("../output/bird_view.avi", fourcc2, 25,(bird_view_img.shape[1], bird_view_img.shape[0]), True)
190
+
191
+ elif output_video_1 is not None and output_video_2 is not None:
192
+
193
+ output_video_1.write(frame)
194
+
195
+ output_video_2.write(bird_view_img)
196
+
197
+
198
+
199
+ # Break the loop
200
+
201
+ if key == ord("q"):
202
+
203
+ break
204
+
205
+
206
+
207
+ bird_view_transfo_functions.pyのコード
208
+
209
+
210
+
211
+
212
+
213
+ import numpy as np
214
+
215
+ import cv2
216
+
217
+
218
+
219
+
220
+
221
+ def compute_perspective_transform(corner_points,width,height,image):
222
+
223
+ """ Compute the transformation matrix
224
+
225
+ @ corner_points : 4 corner points selected from the image
226
+
227
+ @ height, width : size of the image
228
+
229
+ """
230
+
231
+ # Create an array out of the 4 corner points
232
+
233
+ corner_points_array = np.float32(corner_points)
234
+
235
+ # Create an array with the parameters (the dimensions) required to build the matrix
236
+
237
+ img_params = np.float32([[0,0],[width,0],[0,height],[width,height]])
238
+
239
+ # Compute and return the transformation matrix
240
+
241
+ matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
242
+
243
+ img_transformed = cv2.warpPerspective(image,matrix,(width,height))
244
+
245
+ return matrix,img_transformed
246
+
247
+
248
+
249
+
250
+
251
+ def compute_point_perspective_transformation(matrix,list_downoids):
252
+
253
+ """ Apply the perspective transformation to every ground point which have been detected on the main frame.
254
+
255
+ @ matrix : the 3x3 matrix
256
+
257
+ @ list_downoids : list that contains the points to transform
258
+
259
+ return : list containing all the new points
260
+
261
+ """
262
+
263
+ # Compute the new coordinates of our points
264
+
265
+ list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)
266
+
267
+ transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)
268
+
269
+ # Loop over the points and add them to the list that will be returned
270
+
271
+ transformed_points_list = list()
272
+
273
+ for i in range(0,transformed_points.shape[0]):
274
+
275
+ transformed_points_list.append([transformed_points[i][0][0],transformed_points[i][0][1]])
276
+
277
+ return transformed_points_list