質問編集履歴
1
ソースコードの追加
test
CHANGED
File without changes
|
test
CHANGED
@@ -30,6 +30,388 @@
|
|
30
30
|
|
31
31
|
|
32
32
|
|
33
|
+
### ソースコード
|
34
|
+
|
35
|
+
```python
|
36
|
+
|
37
|
+
#!/usr/bin/env python
|
38
|
+
|
39
|
+
# -*- coding: utf-8 -*-
|
40
|
+
|
41
|
+
|
42
|
+
|
43
|
+
import numpy as np
|
44
|
+
|
45
|
+
import chainer
|
46
|
+
|
47
|
+
import chainer.functions as F
|
48
|
+
|
49
|
+
from chainer.links.caffe import CaffeFunction
|
50
|
+
|
51
|
+
import pickle
|
52
|
+
|
53
|
+
import models.Alex
|
54
|
+
|
55
|
+
from PIL import Image
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
from adv_models.fast_gradient import fast_gradient
|
60
|
+
|
61
|
+
from adv_models.iterative_gradient import iterative_gradient
|
62
|
+
|
63
|
+
from adv_models.iterative_least_likely import iterative_least_likely
|
64
|
+
|
65
|
+
|
66
|
+
|
67
|
+
|
68
|
+
|
69
|
+
IMAGENET_MEAN_FILE = "../data/ilsvrc_2012_mean.npy"
|
70
|
+
|
71
|
+
INPUT_IMAGE_SIZE = 227
|
72
|
+
|
73
|
+
|
74
|
+
|
75
|
+
def load_caffemodel(model_path):
|
76
|
+
|
77
|
+
caffe_model = CaffeFunction(model_path)
|
78
|
+
|
79
|
+
return caffe_model
|
80
|
+
|
81
|
+
|
82
|
+
|
83
|
+
|
84
|
+
|
85
|
+
def save_models(caffe_model, save_model_path):
|
86
|
+
|
87
|
+
with open(save_model_path, 'wb') as f:
|
88
|
+
|
89
|
+
pickle.dump(caffe_model, f)
|
90
|
+
|
91
|
+
|
92
|
+
|
93
|
+
|
94
|
+
|
95
|
+
def load_models(save_model_path):
|
96
|
+
|
97
|
+
with open(save_model_path, 'rb') as f:
|
98
|
+
|
99
|
+
model = pickle.load(f)
|
100
|
+
|
101
|
+
return model
|
102
|
+
|
103
|
+
|
104
|
+
|
105
|
+
|
106
|
+
|
107
|
+
def copy_model(src, dst):
|
108
|
+
|
109
|
+
assert isinstance(src, chainer.Chain)
|
110
|
+
|
111
|
+
assert isinstance(dst, chainer.Chain)
|
112
|
+
|
113
|
+
for child in src.children():
|
114
|
+
|
115
|
+
if child.name not in dst.__dict__: continue
|
116
|
+
|
117
|
+
dst_child = dst[child.name]
|
118
|
+
|
119
|
+
if type(child) != type(dst_child): continue
|
120
|
+
|
121
|
+
if isinstance(child, chainer.Chain):
|
122
|
+
|
123
|
+
copy_model(child, dst_child)
|
124
|
+
|
125
|
+
if isinstance(child, chainer.Link):
|
126
|
+
|
127
|
+
match = True
|
128
|
+
|
129
|
+
for a, b in zip(child.namedparams(), dst_child.namedparams()):
|
130
|
+
|
131
|
+
if a[0] != b[0]:
|
132
|
+
|
133
|
+
match = False
|
134
|
+
|
135
|
+
break
|
136
|
+
|
137
|
+
if a[1].data.shape != b[1].data.shape:
|
138
|
+
|
139
|
+
match = False
|
140
|
+
|
141
|
+
break
|
142
|
+
|
143
|
+
if not match:
|
144
|
+
|
145
|
+
print('Ignore %s because of parameter mismatch' % child.name)
|
146
|
+
|
147
|
+
continue
|
148
|
+
|
149
|
+
for a, b in zip(child.namedparams(), dst_child.namedparams()):
|
150
|
+
|
151
|
+
b[1].data = a[1].data
|
152
|
+
|
153
|
+
print('Copy %s' % child.name)
|
154
|
+
|
155
|
+
|
156
|
+
|
157
|
+
|
158
|
+
|
159
|
+
def create_mean_image_array(pic_mean_data_path, size_image):
|
160
|
+
|
161
|
+
mean_data = np.load(pic_mean_data_path)
|
162
|
+
|
163
|
+
mean_data = Image.fromarray(mean_data.astype(np.uint8), 'RGB').resize((size_image, size_image))
|
164
|
+
|
165
|
+
mean_data = np.asarray(mean_data).astype(np.float32)
|
166
|
+
|
167
|
+
return mean_data
|
168
|
+
|
169
|
+
|
170
|
+
|
171
|
+
|
172
|
+
|
173
|
+
def substract_mean_image(target_array, mean_array):
|
174
|
+
|
175
|
+
# mean_value: 104 B
|
176
|
+
|
177
|
+
# mean_value: 117 G
|
178
|
+
|
179
|
+
# mean_value: 123 R
|
180
|
+
|
181
|
+
result_array = target_array - mean_array
|
182
|
+
|
183
|
+
return result_array
|
184
|
+
|
185
|
+
|
186
|
+
|
187
|
+
|
188
|
+
|
189
|
+
def add_mean_image(target_array, mean_array):
|
190
|
+
|
191
|
+
result_array = target_array + mean_array
|
192
|
+
|
193
|
+
return result_array
|
194
|
+
|
195
|
+
|
196
|
+
|
197
|
+
|
198
|
+
|
199
|
+
def resize_image(original_image_path):
|
200
|
+
|
201
|
+
img = Image.open(original_image_path)
|
202
|
+
|
203
|
+
print("original image format:{} {}".format(img.size, img.mode))
|
204
|
+
|
205
|
+
|
206
|
+
|
207
|
+
img_resize = img.resize((INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE))
|
208
|
+
|
209
|
+
print("resize image format:{} {}".format(img_resize.size, img_resize.mode))
|
210
|
+
|
211
|
+
return img_resize
|
212
|
+
|
213
|
+
|
214
|
+
|
215
|
+
|
216
|
+
|
217
|
+
def format2chainer(img_data):
|
218
|
+
|
219
|
+
# RGB to GBR
|
220
|
+
|
221
|
+
arrayImg = np.asarray(img_data).astype(np.float32)[:, :, ::-1]
|
222
|
+
|
223
|
+
# HWC to CWH
|
224
|
+
|
225
|
+
arrayImg = arrayImg.transpose(2, 0, 1)
|
226
|
+
|
227
|
+
# 3-dimensions to 4-dimensions
|
228
|
+
|
229
|
+
arrayImg = arrayImg.reshape((1,) + arrayImg.shape)
|
230
|
+
|
231
|
+
return arrayImg
|
232
|
+
|
233
|
+
|
234
|
+
|
235
|
+
|
236
|
+
|
237
|
+
def format2orig(chainer_img):
|
238
|
+
|
239
|
+
# CWH to HWC
|
240
|
+
|
241
|
+
#orig_image = chainer_img.transpose(1, 2, 0).astype(np.uint8)
|
242
|
+
|
243
|
+
orig_image = chainer_img.transpose(1, 2, 0)
|
244
|
+
|
245
|
+
# BGR to RGB
|
246
|
+
|
247
|
+
orig_image = orig_image[:,:,::-1]
|
248
|
+
|
249
|
+
return orig_image
|
250
|
+
|
251
|
+
|
252
|
+
|
253
|
+
|
254
|
+
|
255
|
+
def create_label_list(label_file_path):
|
256
|
+
|
257
|
+
label_d = {}
|
258
|
+
|
259
|
+
with open(label_file_path, "r") as f:
|
260
|
+
|
261
|
+
for line in f:
|
262
|
+
|
263
|
+
line = line.rstrip("\n").strip(" ").split(":")
|
264
|
+
|
265
|
+
if len(line) == 2:
|
266
|
+
|
267
|
+
label_d[int(line[0])] = line[1].strip(" ")
|
268
|
+
|
269
|
+
return label_d
|
270
|
+
|
271
|
+
|
272
|
+
|
273
|
+
|
274
|
+
|
275
|
+
def get_result(predict_result, label_d):
|
276
|
+
|
277
|
+
prob = np.max(predict_result)
|
278
|
+
|
279
|
+
label_ind = np.argmax(predict_result)
|
280
|
+
|
281
|
+
label = label_d[label_ind]
|
282
|
+
|
283
|
+
return prob, label_ind, label
|
284
|
+
|
285
|
+
|
286
|
+
|
287
|
+
|
288
|
+
|
289
|
+
if __name__ == '__main__':
|
290
|
+
|
291
|
+
#model = load_caffemodel("models/bvlc_alexnet.caffemodel")
|
292
|
+
|
293
|
+
#save_models(model, "models/alexnet.chainermodel")
|
294
|
+
|
295
|
+
|
296
|
+
|
297
|
+
caffe_model = load_models("../models/alexnet.chainermodel")
|
298
|
+
|
299
|
+
chainer_model = models.Alex.Alex()
|
300
|
+
|
301
|
+
|
302
|
+
|
303
|
+
# get label dict
|
304
|
+
|
305
|
+
label_d = create_label_list("../data/imagenet_label.txt")
|
306
|
+
|
307
|
+
|
308
|
+
|
309
|
+
# copy caffe_model W, b to chainer_model
|
310
|
+
|
311
|
+
copy_model(caffe_model, chainer_model)
|
312
|
+
|
313
|
+
|
314
|
+
|
315
|
+
# create mean image array
|
316
|
+
|
317
|
+
mean_image_array = create_mean_image_array(IMAGENET_MEAN_FILE, INPUT_IMAGE_SIZE)
|
318
|
+
|
319
|
+
|
320
|
+
|
321
|
+
# predict target_image
|
322
|
+
|
323
|
+
orig_img = resize_image("../data/panda2.jpeg")
|
324
|
+
|
325
|
+
orig_img.show()
|
326
|
+
|
327
|
+
|
328
|
+
|
329
|
+
orig_array = np.asarray(orig_img)
|
330
|
+
|
331
|
+
orig_array = substract_mean_image(orig_array, mean_image_array)
|
332
|
+
|
333
|
+
|
334
|
+
|
335
|
+
chainer_array = format2chainer(orig_array)
|
336
|
+
|
337
|
+
|
338
|
+
|
339
|
+
# apply gradient sign method
|
340
|
+
|
341
|
+
#adv_array, adv_part_array, orig_result = fast_gradient(chainer_model, chainer_array, eps=8.0)
|
342
|
+
|
343
|
+
|
344
|
+
|
345
|
+
# apply iterative gradient sign method
|
346
|
+
|
347
|
+
#adv_array, adv_part_array, orig_result = iterative_gradient(chainer_model, chainer_array,
|
348
|
+
|
349
|
+
# eps=8.0, alpha=1.0)
|
350
|
+
|
351
|
+
|
352
|
+
|
353
|
+
# apply iterative least likely class method
|
354
|
+
|
355
|
+
adv_array, adv_part_array, orig_result = iterative_least_likely(chainer_model, chainer_array,
|
356
|
+
|
357
|
+
eps=8.0, alpha=1.0)
|
358
|
+
|
359
|
+
least_ind = np.argmin(orig_result)
|
360
|
+
|
361
|
+
print("least likely category {}".format(label_d[least_ind]))
|
362
|
+
|
363
|
+
|
364
|
+
|
365
|
+
# predict original image_result
|
366
|
+
|
367
|
+
orig_prob, orig_ind, orig_label = get_result(orig_result, label_d)
|
368
|
+
|
369
|
+
print("predict_original_image: {} predict_prob: {}".format(orig_label.strip(" "), orig_prob))
|
370
|
+
|
371
|
+
|
372
|
+
|
373
|
+
# predict adversarial_image
|
374
|
+
|
375
|
+
predict_result = F.softmax(chainer_model(adv_part_array)).data
|
376
|
+
|
377
|
+
part_prob, part_label_ind, part_label = get_result(predict_result, label_d)
|
378
|
+
|
379
|
+
print("predict_adversarial_perturbations: {} predict_prob: {}".format(part_label, part_prob))
|
380
|
+
|
381
|
+
|
382
|
+
|
383
|
+
predict_result = F.softmax(chainer_model(adv_array)).data
|
384
|
+
|
385
|
+
adv_prob, adv_label_ind, adv_label = get_result(predict_result, label_d)
|
386
|
+
|
387
|
+
print("predict_adversarial_examples: {} predict_prob: {}".format(adv_label, adv_prob))
|
388
|
+
|
389
|
+
print("original category prob with adv_images {}".format(predict_result[0][orig_ind]))
|
390
|
+
|
391
|
+
|
392
|
+
|
393
|
+
# show adv_image
|
394
|
+
|
395
|
+
adv_array = format2orig(adv_array[0])
|
396
|
+
|
397
|
+
adv_part_array = format2orig(adv_part_array[0])
|
398
|
+
|
399
|
+
adv_array = add_mean_image(adv_array, mean_image_array)
|
400
|
+
|
401
|
+
adv_array = np.clip(adv_array, 0, 255)
|
402
|
+
|
403
|
+
adv_array = adv_array.astype(np.uint8)
|
404
|
+
|
405
|
+
Image.fromarray(adv_array, 'RGB').show()
|
406
|
+
|
407
|
+
Image.fromarray(adv_part_array, 'RGB').show()
|
408
|
+
|
409
|
+
```
|
410
|
+
|
411
|
+
|
412
|
+
|
413
|
+
|
414
|
+
|
33
415
|
### 補足情報(FW/ツールのバージョンなど)
|
34
416
|
|
35
417
|
|