質問編集履歴

2

JSONの中身を追記しました

2018/11/12 12:45

投稿

nano-ryo
nano-ryo

スコア10

test CHANGED
File without changes
test CHANGED
@@ -138,6 +138,366 @@
138
138
 
139
139
 
140
140
 
141
+ <<追記 JSONの中身>>
142
+
143
+ [
144
+
145
+ {
146
+
147
+ "faceId": "35102aa8-4263-4139-bfd6-185bb0f52d88",
148
+
149
+ "faceRectangle": {
150
+
151
+ "top": 208,
152
+
153
+ "left": 228,
154
+
155
+ "width": 91,
156
+
157
+ "height": 91
158
+
159
+ },
160
+
161
+ "faceAttributes": {
162
+
163
+ "smile": 1,
164
+
165
+ "headPose": {
166
+
167
+ "pitch": 0,
168
+
169
+ "roll": 4.3,
170
+
171
+ "yaw": -0.3
172
+
173
+ },
174
+
175
+ "gender": "female",
176
+
177
+ "age": 27,
178
+
179
+ "facialHair": {
180
+
181
+ "moustache": 0,
182
+
183
+ "beard": 0,
184
+
185
+ "sideburns": 0
186
+
187
+ },
188
+
189
+ "glasses": "NoGlasses",
190
+
191
+ "emotion": {
192
+
193
+ "anger": 0,
194
+
195
+ "contempt": 0,
196
+
197
+ "disgust": 0,
198
+
199
+ "fear": 0,
200
+
201
+ "happiness": 1,
202
+
203
+ "neutral": 0,
204
+
205
+ "sadness": 0,
206
+
207
+ "surprise": 0
208
+
209
+ },
210
+
211
+ "blur": {
212
+
213
+ "blurLevel": "low",
214
+
215
+ "value": 0
216
+
217
+ },
218
+
219
+ "exposure": {
220
+
221
+ "exposureLevel": "goodExposure",
222
+
223
+ "value": 0.65
224
+
225
+ },
226
+
227
+ "noise": {
228
+
229
+ "noiseLevel": "low",
230
+
231
+ "value": 0
232
+
233
+ },
234
+
235
+ "makeup": {
236
+
237
+ "eyeMakeup": true,
238
+
239
+ "lipMakeup": true
240
+
241
+ },
242
+
243
+ "accessories": [],
244
+
245
+ "occlusion": {
246
+
247
+ "foreheadOccluded": false,
248
+
249
+ "eyeOccluded": false,
250
+
251
+ "mouthOccluded": false
252
+
253
+ },
254
+
255
+ "hair": {
256
+
257
+ "bald": 0.06,
258
+
259
+ "invisible": false,
260
+
261
+ "hairColor": [
262
+
263
+ {
264
+
265
+ "color": "brown",
266
+
267
+ "confidence": 1
268
+
269
+ },
270
+
271
+ {
272
+
273
+ "color": "blond",
274
+
275
+ "confidence": 0.5
276
+
277
+ },
278
+
279
+ {
280
+
281
+ "color": "black",
282
+
283
+ "confidence": 0.34
284
+
285
+ },
286
+
287
+ {
288
+
289
+ "color": "red",
290
+
291
+ "confidence": 0.32
292
+
293
+ },
294
+
295
+ {
296
+
297
+ "color": "gray",
298
+
299
+ "confidence": 0.14
300
+
301
+ },
302
+
303
+ {
304
+
305
+ "color": "other",
306
+
307
+ "confidence": 0.03
308
+
309
+ }
310
+
311
+ ]
312
+
313
+ }
314
+
315
+ }
316
+
317
+ },
318
+
319
+ {
320
+
321
+ "faceId": "42502166-31bb-4ac8-81c0-a7adcb3b3e70",
322
+
323
+ "faceRectangle": {
324
+
325
+ "top": 109,
326
+
327
+ "left": 125,
328
+
329
+ "width": 79,
330
+
331
+ "height": 79
332
+
333
+ },
334
+
335
+ "faceAttributes": {
336
+
337
+ "smile": 1,
338
+
339
+ "headPose": {
340
+
341
+ "pitch": 0,
342
+
343
+ "roll": 1.7,
344
+
345
+ "yaw": 2.1
346
+
347
+ },
348
+
349
+ "gender": "male",
350
+
351
+ "age": 32,
352
+
353
+ "facialHair": {
354
+
355
+ "moustache": 0.4,
356
+
357
+ "beard": 0.4,
358
+
359
+ "sideburns": 0.4
360
+
361
+ },
362
+
363
+ "glasses": "NoGlasses",
364
+
365
+ "emotion": {
366
+
367
+ "anger": 0,
368
+
369
+ "contempt": 0,
370
+
371
+ "disgust": 0,
372
+
373
+ "fear": 0,
374
+
375
+ "happiness": 1,
376
+
377
+ "neutral": 0,
378
+
379
+ "sadness": 0,
380
+
381
+ "surprise": 0
382
+
383
+ },
384
+
385
+ "blur": {
386
+
387
+ "blurLevel": "low",
388
+
389
+ "value": 0.11
390
+
391
+ },
392
+
393
+ "exposure": {
394
+
395
+ "exposureLevel": "goodExposure",
396
+
397
+ "value": 0.74
398
+
399
+ },
400
+
401
+ "noise": {
402
+
403
+ "noiseLevel": "low",
404
+
405
+ "value": 0
406
+
407
+ },
408
+
409
+ "makeup": {
410
+
411
+ "eyeMakeup": false,
412
+
413
+ "lipMakeup": true
414
+
415
+ },
416
+
417
+ "accessories": [],
418
+
419
+ "occlusion": {
420
+
421
+ "foreheadOccluded": false,
422
+
423
+ "eyeOccluded": false,
424
+
425
+ "mouthOccluded": false
426
+
427
+ },
428
+
429
+ "hair": {
430
+
431
+ "bald": 0.02,
432
+
433
+ "invisible": false,
434
+
435
+ "hairColor": [
436
+
437
+ {
438
+
439
+ "color": "brown",
440
+
441
+ "confidence": 1
442
+
443
+ },
444
+
445
+ {
446
+
447
+ "color": "blond",
448
+
449
+ "confidence": 0.94
450
+
451
+ },
452
+
453
+ {
454
+
455
+ "color": "red",
456
+
457
+ "confidence": 0.76
458
+
459
+ },
460
+
461
+ {
462
+
463
+ "color": "gray",
464
+
465
+ "confidence": 0.2
466
+
467
+ },
468
+
469
+ {
470
+
471
+ "color": "other",
472
+
473
+ "confidence": 0.03
474
+
475
+ },
476
+
477
+ {
478
+
479
+ "color": "black",
480
+
481
+ "confidence": 0.01
482
+
483
+ }
484
+
485
+ ]
486
+
487
+ }
488
+
489
+ }
490
+
491
+ }
492
+
493
+ ]
494
+
495
+
496
+
497
+
498
+
499
+
500
+
141
501
  ```
142
502
 
143
503
 

1

記載したソースコードに誤りがありましたので、修正致します。

2018/11/12 12:45

投稿

nano-ryo
nano-ryo

スコア10

test CHANGED
File without changes
test CHANGED
@@ -46,6 +46,8 @@
46
46
 
47
47
  from PIL import Image
48
48
 
49
+ from matplotlib import patches
50
+
49
51
  from io import BytesIO
50
52
 
51
53
 
@@ -72,59 +74,67 @@
72
74
 
73
75
  # this region.
74
76
 
75
- vision_base_url = "https://westcentralus.api.cognitive.microsoft.com/vision/v2.0/"
77
+ face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect'
76
78
 
77
79
 
78
80
 
79
- analyze_url = vision_base_url + "analyze"
81
+ # Set image_url to the URL of an image that you want to analyze.
82
+
83
+ image_url = 'https://how-old.net/Images/faces2/main007.jpg'
80
84
 
81
85
 
82
86
 
83
- # Set image_path to the local path of an image that you want to analyze.
87
+ headers = {'Ocp-Apim-Subscription-Key': subscription_key}
84
88
 
89
+ params = {
90
+
91
+ 'returnFaceId': 'true',
92
+
85
- image_path = "C:/Documents/ImageToAnalyze.jpg"
93
+ 'returnFaceLandmarks': 'false',
94
+
95
+ 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
96
+
97
+ 'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
98
+
99
+ }
100
+
101
+ data = {'url': image_url}
102
+
103
+ response = requests.post(face_api_url, params=params, headers=headers, json=data)
104
+
105
+ faces = response.json()
86
106
 
87
107
 
88
108
 
89
- # Read the image into a byte array
109
+ # Display the original image and overlay it with the face information.
90
110
 
91
- image_data = open(image_path, "rb").read()
111
+ image = Image.open(BytesIO(requests.get(image_url).content))
92
112
 
93
- headers = {'Ocp-Apim-Subscription-Key': subscription_key,
113
+ plt.figure(figsize=(8, 8))
94
114
 
95
- 'Content-Type': 'application/octet-stream'}
115
+ ax = plt.imshow(image, alpha=0.6)
96
116
 
97
- params = {'visualFeatures': 'Categories,Description,Color'}
117
+ for face in faces:
98
118
 
99
- response = requests.post(
119
+ fr = face["faceRectangle"]
100
120
 
101
- analyze_url, headers=headers, params=params, data=image_data)
121
+ fa = face["faceAttributes"]
102
122
 
123
+ origin = (fr["left"], fr["top"])
124
+
125
+ p = patches.Rectangle(
126
+
127
+ origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b')
128
+
103
- response.raise_for_status()
129
+ ax.axes.add_patch(p)
130
+
131
+ plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]),
132
+
133
+ fontsize=20, weight="bold", va="bottom")
134
+
135
+ _ = plt.axis("off")
104
136
 
105
137
 
106
-
107
- # The 'analysis' object contains various fields that describe the image. The most
108
-
109
- # relevant caption for the image is obtained from the 'description' property.
110
-
111
- analysis = response.json()
112
-
113
- print(analysis)
114
-
115
- image_caption = analysis["description"]["captions"][0]["text"].capitalize()
116
-
117
-
118
-
119
- # Display the image and overlay it with the caption.
120
-
121
- image = Image.open(BytesIO(image_data))
122
-
123
- plt.imshow(image)
124
-
125
- plt.axis("off")
126
-
127
- _ = plt.title(image_caption, size="x-large", y=-0.1)
128
138
 
129
139
 
130
140