質問編集履歴

1

ソースコードの追加

2021/11/28 16:25

投稿

caramel
caramel

スコア14

test CHANGED
File without changes
test CHANGED
@@ -88,4 +88,376 @@
88
88
 
89
89
 
90
90
 
91
+
92
+
91
93
  ```
94
+
95
+ ### 該当のソースコード
96
+
97
+
98
+
99
+ ```python
100
+
101
+ #! /usr/bin/env python
102
+
103
+ # -*- coding: utf-8 -*-
104
+
105
+
106
+
107
+ from __future__ import division, print_function, absolute_import
108
+
109
+ import os
110
+
111
+ import datetime
112
+
113
+ from timeit import time
114
+
115
+ import warnings
116
+
117
+ import cv2
118
+
119
+ import numpy as np
120
+
121
+ import argparse
122
+
123
+ #from PIL import Image
124
+
125
+ from PIL import Image, ImageFont, ImageDraw
126
+
127
+ from yolo import YOLO
128
+
129
+ from deep_sort import preprocessing
130
+
131
+ from deep_sort import nn_matching
132
+
133
+ from deep_sort.detection import Detection
134
+
135
+ from deep_sort.tracker import Tracker
136
+
137
+ from tools import generate_detections as gdet
138
+
139
+ from deep_sort.detection import Detection as ddet
140
+
141
+ from collections import deque
142
+
143
+ from keras import backend
144
+
145
+
146
+
147
+ backend.clear_session()
148
+
149
+ ap = argparse.ArgumentParser()
150
+
151
+ ap.add_argument("-i", "--input",help="path to input video", default = "./test_video/test.avi")
152
+
153
+ ap.add_argument("-c", "--class",help="name of class", default = "person")
154
+
155
+ args = vars(ap.parse_args())
156
+
157
+
158
+
159
+ pts = [deque(maxlen=30) for _ in range(9999)]
160
+
161
+ warnings.filterwarnings('ignore')
162
+
163
+
164
+
165
+ # initialize a list of colors to represent each possible class label
166
+
167
+ np.random.seed(100)
168
+
169
+ COLORS = np.random.randint(0, 255, size=(200, 3),
170
+
171
+ dtype="uint8")
172
+
173
+
174
+
175
+ def main(yolo):
176
+
177
+
178
+
179
+ start = time.time()
180
+
181
+ #Definition of the parameters
182
+
183
+ max_cosine_distance = 0.5 #余弦距离的控制阈值
184
+
185
+ nn_budget = None
186
+
187
+ nms_max_overlap = 0.3 #非极大抑制的阈值
188
+
189
+
190
+
191
+ counter = []
192
+
193
+ #deep_sort
194
+
195
+ #model_filename = 'model_data/market1501.pb'
196
+
197
+ model_filename = 'model_data/mars-small128.pb'
198
+
199
+ encoder = gdet.create_box_encoder(model_filename,batch_size=1)
200
+
201
+
202
+
203
+ metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
204
+
205
+ tracker = Tracker(metric)
206
+
207
+
208
+
209
+ writeVideo_flag = True
210
+
211
+ #video_path = "./output/output.avi"
212
+
213
+ video_capture = cv2.VideoCapture(args["input"])
214
+
215
+
216
+
217
+ if writeVideo_flag:
218
+
219
+ # Define the codec and create VideoWriter object
220
+
221
+ w = int(video_capture.get(3))
222
+
223
+ h = int(video_capture.get(4))
224
+
225
+ #fourcc = cv2.VideoWriter_fourcc(*'MJPG')
226
+
227
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
228
+
229
+ out = cv2.VideoWriter('./output/'+args["input"][43:57]+ "_" + args["class"] + '_output.avi', fourcc, 15, (w, h))
230
+
231
+ list_file = open('detection.txt', 'w')
232
+
233
+ frame_index = -1
234
+
235
+
236
+
237
+ fps = 0.0
238
+
239
+
240
+
241
+ font = ImageFont.truetype(font='font/yumin.ttf', size=30)
242
+
243
+
244
+
245
+ while True:
246
+
247
+
248
+
249
+ ret, frame = video_capture.read() # frame shape 640*480*3
250
+
251
+ if ret != True:
252
+
253
+ break
254
+
255
+ t1 = time.time()
256
+
257
+
258
+
259
+ # image = Image.fromarray(frame)
260
+
261
+ image = Image.fromarray(frame[...,::-1]) #bgr to rgb
262
+
263
+ draw = ImageDraw.Draw(image)
264
+
265
+
266
+
267
+ boxs,class_names = yolo.detect_image(image)
268
+
269
+ features = encoder(frame,boxs)
270
+
271
+ # score to 1.0 here).
272
+
273
+ detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
274
+
275
+ # Run non-maxima suppression.
276
+
277
+ boxes = np.array([d.tlwh for d in detections])
278
+
279
+ scores = np.array([d.confidence for d in detections])
280
+
281
+ indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
282
+
283
+ detections = [detections[i] for i in indices]
284
+
285
+
286
+
287
+ # Call the tracker
288
+
289
+ tracker.predict()
290
+
291
+ tracker.update(detections)
292
+
293
+
294
+
295
+ i = int(0)
296
+
297
+ indexIDs = []
298
+
299
+ c = []
300
+
301
+ boxes = []
302
+
303
+ for det in detections:
304
+
305
+ bbox = det.to_tlbr()
306
+
307
+ cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
308
+
309
+
310
+
311
+ for track in tracker.tracks:
312
+
313
+ if not track.is_confirmed() or track.time_since_update > 1:
314
+
315
+ continue
316
+
317
+ #boxes.append([track[0], track[1], track[2], track[3]])
318
+
319
+ indexIDs.append(int(track.track_id))
320
+
321
+ counter.append(int(track.track_id))
322
+
323
+ bbox = track.to_tlbr()
324
+
325
+ color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]
326
+
327
+
328
+
329
+ cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)
330
+
331
+ cv2.putText(frame,str(track.track_id),(int(bbox[0]), int(bbox[1] -50)),0, 5e-3 * 150, (color),2)
332
+
333
+ if len(class_names) > 0:
334
+
335
+ class_name = class_names[0]
336
+
337
+ #cv2.putText(frame, str(class_names[0]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (color),2)
338
+
339
+ draw.text((int(bbox[0]), int(bbox[1])), str(class_names[0]), fill=(255, 255, 255), font=font)
340
+
341
+ frame = np.array(image[...,::-1])
342
+
343
+ #frame = np.array(image)
344
+
345
+ i += 1
346
+
347
+ #bbox_center_point(x,y)
348
+
349
+ center = (int(((bbox[0])+(bbox[2]))/2),int(((bbox[1])+(bbox[3]))/2))
350
+
351
+ #track_id[center]
352
+
353
+ pts[track.track_id].append(center)
354
+
355
+ thickness = 5
356
+
357
+ #center point
358
+
359
+ cv2.circle(frame, (center), 1, color, thickness)
360
+
361
+
362
+
363
+ #draw motion path
364
+
365
+ for j in range(1, len(pts[track.track_id])):
366
+
367
+ if pts[track.track_id][j - 1] is None or pts[track.track_id][j] is None:
368
+
369
+ continue
370
+
371
+ thickness = int(np.sqrt(64 / float(j + 1)) * 2)
372
+
373
+ cv2.line(frame,(pts[track.track_id][j-1]), (pts[track.track_id][j]),(color),thickness)
374
+
375
+ #cv2.putText(frame, str(class_names[j]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (255,255,255),2)
376
+
377
+
378
+
379
+ count = len(set(counter))
380
+
381
+ cv2.putText(frame, "Total Object Counter: "+str(count),(int(20), int(120)),0, 5e-3 * 200, (0,255,0),2)
382
+
383
+ cv2.putText(frame, "Current Object Counter: "+str(i),(int(20), int(80)),0, 5e-3 * 200, (0,255,0),2)
384
+
385
+ cv2.putText(frame, "FPS: %f"%(fps),(int(20), int(40)),0, 5e-3 * 200, (0,255,0),3)
386
+
387
+ cv2.namedWindow("YOLO3_Deep_SORT", 0);
388
+
389
+ cv2.resizeWindow('YOLO3_Deep_SORT', 1024, 768);
390
+
391
+ cv2.imshow('YOLO3_Deep_SORT', frame)
392
+
393
+
394
+
395
+ if writeVideo_flag:
396
+
397
+ #save a frame
398
+
399
+ out.write(frame)
400
+
401
+ frame_index = frame_index + 1
402
+
403
+ list_file.write(str(frame_index)+' ')
404
+
405
+ if len(boxs) != 0:
406
+
407
+ for i in range(0,len(boxs)):
408
+
409
+ list_file.write(str(boxs[i][0]) + ' '+str(boxs[i][1]) + ' '+str(boxs[i][2]) + ' '+str(boxs[i][3]) + ' ')
410
+
411
+ list_file.write('\n')
412
+
413
+ fps = ( fps + (1./(time.time()-t1)) ) / 2
414
+
415
+ #print(set(counter))
416
+
417
+
418
+
419
+ # Press Q to stop!
420
+
421
+ if cv2.waitKey(1) & 0xFF == ord('q'):
422
+
423
+ break
424
+
425
+ print(" ")
426
+
427
+ print("[Finish]")
428
+
429
+ end = time.time()
430
+
431
+
432
+
433
+ if len(pts[track.track_id]) != None:
434
+
435
+ print(args["input"][43:57]+": "+ str(count) + " " + str(class_name) +' Found')
436
+
437
+
438
+
439
+ else:
440
+
441
+ print("[No Found]")
442
+
443
+
444
+
445
+ video_capture.release()
446
+
447
+
448
+
449
+ if writeVideo_flag:
450
+
451
+ out.release()
452
+
453
+ list_file.close()
454
+
455
+ cv2.destroyAllWindows()
456
+
457
+
458
+
459
+ if __name__ == '__main__':
460
+
461
+ main(YOLO())
462
+
463
+ ```