質問編集履歴

2

リンクを追加しました

2021/12/22 01:10

投稿

nample
nample

スコア1

test CHANGED
File without changes
test CHANGED
@@ -8,6 +8,12 @@
8
8
 
9
9
 
10
10
 
11
+ githubよりダウンロードしました
12
+
13
+ [リンク内容](https://github.com/chandrikadeb7/Face-Mask-Detection)
14
+
15
+
16
+
11
17
 
12
18
 
13
19
  ```Python

1

Pythonと追加しました

2021/12/22 01:09

投稿

nample
nample

スコア1

test CHANGED
File without changes
test CHANGED
@@ -10,7 +10,15 @@
10
10
 
11
11
 
12
12
 
13
+ ```Python
14
+
13
- ### 該当のソースコード
15
+ # USAGE
16
+
17
+ # python detect_mask_video.py
18
+
19
+
20
+
21
+ # import the necessary packages
14
22
 
15
23
  from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
16
24
 
@@ -36,6 +44,10 @@
36
44
 
37
45
  def detect_and_predict_mask(frame, faceNet, maskNet):
38
46
 
47
+ # grab the dimensions of the frame and then construct a blob
48
+
49
+ # from it
50
+
39
51
  (h, w) = frame.shape[:2]
40
52
 
41
53
  blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
@@ -44,12 +56,18 @@
44
56
 
45
57
 
46
58
 
59
+ # pass the blob through the network and obtain the face detections
60
+
47
61
  faceNet.setInput(blob)
48
62
 
49
63
  detections = faceNet.forward()
50
64
 
51
65
 
52
66
 
67
+ # initialize our list of faces, their corresponding locations,
68
+
69
+ # and the list of predictions from our face mask network
70
+
53
71
  faces = []
54
72
 
55
73
  locs = []
@@ -58,26 +76,48 @@
58
76
 
59
77
 
60
78
 
79
+ # loop over the detections
80
+
61
81
  for i in range(0, detections.shape[2]):
62
82
 
83
+ # extract the confidence (i.e., probability) associated with
84
+
85
+ # the detection
86
+
63
87
  confidence = detections[0, 0, i, 2]
64
88
 
65
89
 
66
90
 
91
+ # filter out weak detections by ensuring the confidence is
92
+
93
+ # greater than the minimum confidence
94
+
67
95
  if confidence > args["confidence"]:
68
96
 
97
+ # compute the (x, y)-coordinates of the bounding box for
98
+
99
+ # the object
100
+
69
101
  box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
70
102
 
71
103
  (startX, startY, endX, endY) = box.astype("int")
72
104
 
73
105
 
74
106
 
107
+ # ensure the bounding boxes fall within the dimensions of
108
+
109
+ # the frame
110
+
75
111
  (startX, startY) = (max(0, startX), max(0, startY))
76
112
 
77
113
  (endX, endY) = (min(w - 1, endX), min(h - 1, endY))
78
114
 
79
115
 
80
116
 
117
+ # extract the face ROI, convert it from BGR to RGB channel
118
+
119
+ # ordering, resize it to 224x224, and preprocess it
120
+
81
121
  face = frame[startY:endY, startX:endX]
82
122
 
83
123
  if face.any():
@@ -92,18 +132,32 @@
92
132
 
93
133
 
94
134
 
135
+ # only make a predictions if at least one face was detected
136
+
95
137
  if len(faces) > 0:
96
138
 
139
+ # for faster inference we'll make batch predictions on *all*
140
+
141
+ # faces at the same time rather than one-by-one predictions
142
+
143
+ # in the above `for` loop
144
+
97
145
  faces = np.array(faces, dtype="float32")
98
146
 
99
147
  preds = maskNet.predict(faces, batch_size=32)
100
148
 
101
149
 
102
150
 
151
+ # return a 2-tuple of the face locations and their corresponding
152
+
153
+ # locations
154
+
103
155
  return (locs, preds)
104
156
 
105
157
 
106
158
 
159
+ # construct the argument parser and parse the arguments
160
+
107
161
  ap = argparse.ArgumentParser()
108
162
 
109
163
  ap.add_argument("-f", "--face", type=str,
@@ -126,6 +180,8 @@
126
180
 
127
181
 
128
182
 
183
+ # load our serialized face detector model from disk
184
+
129
185
  print("[INFO] loading face detector model...")
130
186
 
131
187
  prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
@@ -138,12 +194,16 @@
138
194
 
139
195
 
140
196
 
197
+ # load the face mask detector model from disk
198
+
141
199
  print("[INFO] loading face mask detector model...")
142
200
 
143
201
  maskNet = load_model(args["model"])
144
202
 
145
203
 
146
204
 
205
+ # initialize the video stream and allow the camera sensor to warm up
206
+
147
207
  print("[INFO] starting video stream...")
148
208
 
149
209
  vs = VideoStream(src=0).start()
@@ -152,36 +212,62 @@
152
212
 
153
213
 
154
214
 
215
+ # loop over the frames from the video stream
216
+
155
217
  while True:
156
218
 
219
+ # grab the frame from the threaded video stream and resize it
220
+
221
+ # to have a maximum width of 400 pixels
222
+
157
223
  frame = vs.read()
158
224
 
159
225
  frame = imutils.resize(frame, width=400)
160
226
 
161
227
 
162
228
 
229
+ # detect faces in the frame and determine if they are wearing a
230
+
231
+ # face mask or not
232
+
163
233
  (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
164
234
 
165
235
 
166
236
 
237
+ # loop over the detected face locations and their corresponding
238
+
239
+ # locations
240
+
167
241
  for (box, pred) in zip(locs, preds):
168
242
 
243
+ # unpack the bounding box and predictions
244
+
169
245
  (startX, startY, endX, endY) = box
170
246
 
171
247
  (mask, withoutMask) = pred
172
248
 
173
249
 
174
250
 
251
+ # determine the class label and color we'll use to draw
252
+
253
+ # the bounding box and text
254
+
175
255
  label = "Mask" if mask > withoutMask else "No Mask"
176
256
 
177
257
  color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
178
258
 
179
259
 
180
260
 
261
+ # include the probability in the label
262
+
181
263
  label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
182
264
 
183
265
 
184
266
 
267
+ # display the label and bounding box rectangle on the output
268
+
269
+ # frame
270
+
185
271
  cv2.putText(frame, label, (startX, startY - 10),
186
272
 
187
273
  cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
@@ -190,18 +276,28 @@
190
276
 
191
277
 
192
278
 
279
+ # show the output frame
280
+
193
281
  cv2.imshow("Frame", frame)
194
282
 
195
283
  key = cv2.waitKey(1) & 0xFF
196
284
 
197
285
 
198
286
 
287
+ # if the `q` key was pressed, break from the loop
288
+
199
289
  if key == ord("q"):
200
290
 
201
291
  break
202
292
 
203
293
 
204
294
 
295
+ # do a bit of cleanup
296
+
205
297
  cv2.destroyAllWindows()
206
298
 
207
299
  vs.stop()
300
+
301
+
302
+
303
+ ```