teratail header banner
teratail header banner
質問するログイン新規登録

質問編集履歴

2

JSONの中身を追記しました

2018/11/12 12:45

投稿

nano-ryo
nano-ryo

スコア10

title CHANGED
File without changes
body CHANGED
@@ -68,6 +68,186 @@
68
68
  _ = plt.axis("off")
69
69
 
70
70
 
71
+ <<追記 JSONの中身>>
72
+ [
73
+ {
74
+ "faceId": "35102aa8-4263-4139-bfd6-185bb0f52d88",
75
+ "faceRectangle": {
76
+ "top": 208,
77
+ "left": 228,
78
+ "width": 91,
79
+ "height": 91
80
+ },
81
+ "faceAttributes": {
82
+ "smile": 1,
83
+ "headPose": {
84
+ "pitch": 0,
85
+ "roll": 4.3,
86
+ "yaw": -0.3
87
+ },
88
+ "gender": "female",
89
+ "age": 27,
90
+ "facialHair": {
91
+ "moustache": 0,
92
+ "beard": 0,
93
+ "sideburns": 0
94
+ },
95
+ "glasses": "NoGlasses",
96
+ "emotion": {
97
+ "anger": 0,
98
+ "contempt": 0,
99
+ "disgust": 0,
100
+ "fear": 0,
101
+ "happiness": 1,
102
+ "neutral": 0,
103
+ "sadness": 0,
104
+ "surprise": 0
105
+ },
106
+ "blur": {
107
+ "blurLevel": "low",
108
+ "value": 0
109
+ },
110
+ "exposure": {
111
+ "exposureLevel": "goodExposure",
112
+ "value": 0.65
113
+ },
114
+ "noise": {
115
+ "noiseLevel": "low",
116
+ "value": 0
117
+ },
118
+ "makeup": {
119
+ "eyeMakeup": true,
120
+ "lipMakeup": true
121
+ },
122
+ "accessories": [],
123
+ "occlusion": {
124
+ "foreheadOccluded": false,
125
+ "eyeOccluded": false,
126
+ "mouthOccluded": false
127
+ },
128
+ "hair": {
129
+ "bald": 0.06,
130
+ "invisible": false,
131
+ "hairColor": [
132
+ {
133
+ "color": "brown",
134
+ "confidence": 1
135
+ },
136
+ {
137
+ "color": "blond",
138
+ "confidence": 0.5
139
+ },
140
+ {
141
+ "color": "black",
142
+ "confidence": 0.34
143
+ },
144
+ {
145
+ "color": "red",
146
+ "confidence": 0.32
147
+ },
148
+ {
149
+ "color": "gray",
150
+ "confidence": 0.14
151
+ },
152
+ {
153
+ "color": "other",
154
+ "confidence": 0.03
155
+ }
156
+ ]
157
+ }
158
+ }
159
+ },
160
+ {
161
+ "faceId": "42502166-31bb-4ac8-81c0-a7adcb3b3e70",
162
+ "faceRectangle": {
163
+ "top": 109,
164
+ "left": 125,
165
+ "width": 79,
166
+ "height": 79
167
+ },
168
+ "faceAttributes": {
169
+ "smile": 1,
170
+ "headPose": {
171
+ "pitch": 0,
172
+ "roll": 1.7,
173
+ "yaw": 2.1
174
+ },
175
+ "gender": "male",
176
+ "age": 32,
177
+ "facialHair": {
178
+ "moustache": 0.4,
179
+ "beard": 0.4,
180
+ "sideburns": 0.4
181
+ },
182
+ "glasses": "NoGlasses",
183
+ "emotion": {
184
+ "anger": 0,
185
+ "contempt": 0,
186
+ "disgust": 0,
187
+ "fear": 0,
188
+ "happiness": 1,
189
+ "neutral": 0,
190
+ "sadness": 0,
191
+ "surprise": 0
192
+ },
193
+ "blur": {
194
+ "blurLevel": "low",
195
+ "value": 0.11
196
+ },
197
+ "exposure": {
198
+ "exposureLevel": "goodExposure",
199
+ "value": 0.74
200
+ },
201
+ "noise": {
202
+ "noiseLevel": "low",
203
+ "value": 0
204
+ },
205
+ "makeup": {
206
+ "eyeMakeup": false,
207
+ "lipMakeup": true
208
+ },
209
+ "accessories": [],
210
+ "occlusion": {
211
+ "foreheadOccluded": false,
212
+ "eyeOccluded": false,
213
+ "mouthOccluded": false
214
+ },
215
+ "hair": {
216
+ "bald": 0.02,
217
+ "invisible": false,
218
+ "hairColor": [
219
+ {
220
+ "color": "brown",
221
+ "confidence": 1
222
+ },
223
+ {
224
+ "color": "blond",
225
+ "confidence": 0.94
226
+ },
227
+ {
228
+ "color": "red",
229
+ "confidence": 0.76
230
+ },
231
+ {
232
+ "color": "gray",
233
+ "confidence": 0.2
234
+ },
235
+ {
236
+ "color": "other",
237
+ "confidence": 0.03
238
+ },
239
+ {
240
+ "color": "black",
241
+ "confidence": 0.01
242
+ }
243
+ ]
244
+ }
245
+ }
246
+ }
247
+ ]
248
+
249
+
250
+
71
251
  ```
72
252
 
73
253
  ### 試したこと

1

記載したソースコードに誤りがありましたので、修正致します。

2018/11/12 12:45

投稿

nano-ryo
nano-ryo

スコア10

title CHANGED
File without changes
body CHANGED
@@ -22,6 +22,7 @@
22
22
  #%matplotlib inline
23
23
  import matplotlib.pyplot as plt
24
24
  from PIL import Image
25
+ from matplotlib import patches
25
26
  from io import BytesIO
26
27
 
27
28
  # Replace <Subscription Key> with your valid subscription key.
@@ -35,34 +36,38 @@
35
36
  # Free trial subscription keys are generated in the westcentralus region.
36
37
  # If you use a free trial subscription key, you shouldn't need to change
37
38
  # this region.
38
- vision_base_url = "https://westcentralus.api.cognitive.microsoft.com/vision/v2.0/"
39
+ face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect'
39
40
 
40
- analyze_url = vision_base_url + "analyze"
41
+ # Set image_url to the URL of an image that you want to analyze.
42
+ image_url = 'https://how-old.net/Images/faces2/main007.jpg'
41
43
 
44
+ headers = {'Ocp-Apim-Subscription-Key': subscription_key}
45
+ params = {
46
+ 'returnFaceId': 'true',
47
+ 'returnFaceLandmarks': 'false',
48
+ 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
49
+ 'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
50
+ }
51
+ data = {'url': image_url}
42
- # Set image_path to the local path of an image that you want to analyze.
52
+ response = requests.post(face_api_url, params=params, headers=headers, json=data)
43
- image_path = "C:/Documents/ImageToAnalyze.jpg"
53
+ faces = response.json()
44
54
 
45
- # Read the image into a byte array
55
+ # Display the original image and overlay it with the face information.
56
+ image = Image.open(BytesIO(requests.get(image_url).content))
57
+ plt.figure(figsize=(8, 8))
58
+ ax = plt.imshow(image, alpha=0.6)
59
+ for face in faces:
60
+ fr = face["faceRectangle"]
61
+ fa = face["faceAttributes"]
46
- image_data = open(image_path, "rb").read()
62
+ origin = (fr["left"], fr["top"])
47
- headers = {'Ocp-Apim-Subscription-Key': subscription_key,
63
+ p = patches.Rectangle(
48
- 'Content-Type': 'application/octet-stream'}
49
- params = {'visualFeatures': 'Categories,Description,Color'}
64
+ origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b')
50
- response = requests.post(
51
- analyze_url, headers=headers, params=params, data=image_data)
52
- response.raise_for_status()
65
+ ax.axes.add_patch(p)
66
+ plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]),
67
+ fontsize=20, weight="bold", va="bottom")
68
+ _ = plt.axis("off")
53
69
 
54
- # The 'analysis' object contains various fields that describe the image. The most
55
- # relevant caption for the image is obtained from the 'description' property.
56
- analysis = response.json()
57
- print(analysis)
58
- image_caption = analysis["description"]["captions"][0]["text"].capitalize()
59
70
 
60
- # Display the image and overlay it with the caption.
61
- image = Image.open(BytesIO(image_data))
62
- plt.imshow(image)
63
- plt.axis("off")
64
- _ = plt.title(image_caption, size="x-large", y=-0.1)
65
-
66
71
  ```
67
72
 
68
73
  ### 試したこと