質問編集履歴
1
ソースコードの追加
title
CHANGED
File without changes
|
body
CHANGED
@@ -14,6 +14,197 @@
|
|
14
14
|
ImportError: No module named models.Alex
|
15
15
|
```
|
16
16
|
|
17
|
+
### ソースコード
|
18
|
+
```python
|
19
|
+
#!/usr/bin/env python
|
20
|
+
# -*- coding: utf-8 -*-
|
21
|
+
|
22
|
+
import numpy as np
|
23
|
+
import chainer
|
24
|
+
import chainer.functions as F
|
25
|
+
from chainer.links.caffe import CaffeFunction
|
26
|
+
import pickle
|
27
|
+
import models.Alex
|
28
|
+
from PIL import Image
|
29
|
+
|
30
|
+
from adv_models.fast_gradient import fast_gradient
|
31
|
+
from adv_models.iterative_gradient import iterative_gradient
|
32
|
+
from adv_models.iterative_least_likely import iterative_least_likely
|
33
|
+
|
34
|
+
|
35
|
+
IMAGENET_MEAN_FILE = "../data/ilsvrc_2012_mean.npy"
|
36
|
+
INPUT_IMAGE_SIZE = 227
|
37
|
+
|
38
|
+
def load_caffemodel(model_path):
|
39
|
+
caffe_model = CaffeFunction(model_path)
|
40
|
+
return caffe_model
|
41
|
+
|
42
|
+
|
43
|
+
def save_models(caffe_model, save_model_path):
|
44
|
+
with open(save_model_path, 'wb') as f:
|
45
|
+
pickle.dump(caffe_model, f)
|
46
|
+
|
47
|
+
|
48
|
+
def load_models(save_model_path):
|
49
|
+
with open(save_model_path, 'rb') as f:
|
50
|
+
model = pickle.load(f)
|
51
|
+
return model
|
52
|
+
|
53
|
+
|
54
|
+
def copy_model(src, dst):
|
55
|
+
assert isinstance(src, chainer.Chain)
|
56
|
+
assert isinstance(dst, chainer.Chain)
|
57
|
+
for child in src.children():
|
58
|
+
if child.name not in dst.__dict__: continue
|
59
|
+
dst_child = dst[child.name]
|
60
|
+
if type(child) != type(dst_child): continue
|
61
|
+
if isinstance(child, chainer.Chain):
|
62
|
+
copy_model(child, dst_child)
|
63
|
+
if isinstance(child, chainer.Link):
|
64
|
+
match = True
|
65
|
+
for a, b in zip(child.namedparams(), dst_child.namedparams()):
|
66
|
+
if a[0] != b[0]:
|
67
|
+
match = False
|
68
|
+
break
|
69
|
+
if a[1].data.shape != b[1].data.shape:
|
70
|
+
match = False
|
71
|
+
break
|
72
|
+
if not match:
|
73
|
+
print('Ignore %s because of parameter mismatch' % child.name)
|
74
|
+
continue
|
75
|
+
for a, b in zip(child.namedparams(), dst_child.namedparams()):
|
76
|
+
b[1].data = a[1].data
|
77
|
+
print('Copy %s' % child.name)
|
78
|
+
|
79
|
+
|
80
|
+
def create_mean_image_array(pic_mean_data_path, size_image):
|
81
|
+
mean_data = np.load(pic_mean_data_path)
|
82
|
+
mean_data = Image.fromarray(mean_data.astype(np.uint8), 'RGB').resize((size_image, size_image))
|
83
|
+
mean_data = np.asarray(mean_data).astype(np.float32)
|
84
|
+
return mean_data
|
85
|
+
|
86
|
+
|
87
|
+
def substract_mean_image(target_array, mean_array):
|
88
|
+
# mean_value: 104 B
|
89
|
+
# mean_value: 117 G
|
90
|
+
# mean_value: 123 R
|
91
|
+
result_array = target_array - mean_array
|
92
|
+
return result_array
|
93
|
+
|
94
|
+
|
95
|
+
def add_mean_image(target_array, mean_array):
|
96
|
+
result_array = target_array + mean_array
|
97
|
+
return result_array
|
98
|
+
|
99
|
+
|
100
|
+
def resize_image(original_image_path):
|
101
|
+
img = Image.open(original_image_path)
|
102
|
+
print("original image format:{} {}".format(img.size, img.mode))
|
103
|
+
|
104
|
+
img_resize = img.resize((INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE))
|
105
|
+
print("resize image format:{} {}".format(img_resize.size, img_resize.mode))
|
106
|
+
return img_resize
|
107
|
+
|
108
|
+
|
109
|
+
def format2chainer(img_data):
|
110
|
+
# RGB to GBR
|
111
|
+
arrayImg = np.asarray(img_data).astype(np.float32)[:, :, ::-1]
|
112
|
+
# HWC to CWH
|
113
|
+
arrayImg = arrayImg.transpose(2, 0, 1)
|
114
|
+
# 3-dimensions to 4-dimensions
|
115
|
+
arrayImg = arrayImg.reshape((1,) + arrayImg.shape)
|
116
|
+
return arrayImg
|
117
|
+
|
118
|
+
|
119
|
+
def format2orig(chainer_img):
|
120
|
+
# CWH to HWC
|
121
|
+
#orig_image = chainer_img.transpose(1, 2, 0).astype(np.uint8)
|
122
|
+
orig_image = chainer_img.transpose(1, 2, 0)
|
123
|
+
# BGR to RGB
|
124
|
+
orig_image = orig_image[:,:,::-1]
|
125
|
+
return orig_image
|
126
|
+
|
127
|
+
|
128
|
+
def create_label_list(label_file_path):
|
129
|
+
label_d = {}
|
130
|
+
with open(label_file_path, "r") as f:
|
131
|
+
for line in f:
|
132
|
+
line = line.rstrip("\n").strip(" ").split(":")
|
133
|
+
if len(line) == 2:
|
134
|
+
label_d[int(line[0])] = line[1].strip(" ")
|
135
|
+
return label_d
|
136
|
+
|
137
|
+
|
138
|
+
def get_result(predict_result, label_d):
|
139
|
+
prob = np.max(predict_result)
|
140
|
+
label_ind = np.argmax(predict_result)
|
141
|
+
label = label_d[label_ind]
|
142
|
+
return prob, label_ind, label
|
143
|
+
|
144
|
+
|
145
|
+
if __name__ == '__main__':
|
146
|
+
#model = load_caffemodel("models/bvlc_alexnet.caffemodel")
|
147
|
+
#save_models(model, "models/alexnet.chainermodel")
|
148
|
+
|
149
|
+
caffe_model = load_models("../models/alexnet.chainermodel")
|
150
|
+
chainer_model = models.Alex.Alex()
|
151
|
+
|
152
|
+
# get label dict
|
153
|
+
label_d = create_label_list("../data/imagenet_label.txt")
|
154
|
+
|
155
|
+
# copy caffe_model W, b to chainer_model
|
156
|
+
copy_model(caffe_model, chainer_model)
|
157
|
+
|
158
|
+
# create mean image array
|
159
|
+
mean_image_array = create_mean_image_array(IMAGENET_MEAN_FILE, INPUT_IMAGE_SIZE)
|
160
|
+
|
161
|
+
# predict target_image
|
162
|
+
orig_img = resize_image("../data/panda2.jpeg")
|
163
|
+
orig_img.show()
|
164
|
+
|
165
|
+
orig_array = np.asarray(orig_img)
|
166
|
+
orig_array = substract_mean_image(orig_array, mean_image_array)
|
167
|
+
|
168
|
+
chainer_array = format2chainer(orig_array)
|
169
|
+
|
170
|
+
# apply gradient sign method
|
171
|
+
#adv_array, adv_part_array, orig_result = fast_gradient(chainer_model, chainer_array, eps=8.0)
|
172
|
+
|
173
|
+
# apply iterative gradient sign method
|
174
|
+
#adv_array, adv_part_array, orig_result = iterative_gradient(chainer_model, chainer_array,
|
175
|
+
# eps=8.0, alpha=1.0)
|
176
|
+
|
177
|
+
# apply iterative least likely class method
|
178
|
+
adv_array, adv_part_array, orig_result = iterative_least_likely(chainer_model, chainer_array,
|
179
|
+
eps=8.0, alpha=1.0)
|
180
|
+
least_ind = np.argmin(orig_result)
|
181
|
+
print("least likely category {}".format(label_d[least_ind]))
|
182
|
+
|
183
|
+
# predict original image_result
|
184
|
+
orig_prob, orig_ind, orig_label = get_result(orig_result, label_d)
|
185
|
+
print("predict_original_image: {} predict_prob: {}".format(orig_label.strip(" "), orig_prob))
|
186
|
+
|
187
|
+
# predict adversarial_image
|
188
|
+
predict_result = F.softmax(chainer_model(adv_part_array)).data
|
189
|
+
part_prob, part_label_ind, part_label = get_result(predict_result, label_d)
|
190
|
+
print("predict_adversarial_perturbations: {} predict_prob: {}".format(part_label, part_prob))
|
191
|
+
|
192
|
+
predict_result = F.softmax(chainer_model(adv_array)).data
|
193
|
+
adv_prob, adv_label_ind, adv_label = get_result(predict_result, label_d)
|
194
|
+
print("predict_adversarial_examples: {} predict_prob: {}".format(adv_label, adv_prob))
|
195
|
+
print("original category prob with adv_images {}".format(predict_result[0][orig_ind]))
|
196
|
+
|
197
|
+
# show adv_image
|
198
|
+
adv_array = format2orig(adv_array[0])
|
199
|
+
adv_part_array = format2orig(adv_part_array[0])
|
200
|
+
adv_array = add_mean_image(adv_array, mean_image_array)
|
201
|
+
adv_array = np.clip(adv_array, 0, 255)
|
202
|
+
adv_array = adv_array.astype(np.uint8)
|
203
|
+
Image.fromarray(adv_array, 'RGB').show()
|
204
|
+
Image.fromarray(adv_part_array, 'RGB').show()
|
205
|
+
```
|
206
|
+
|
207
|
+
|
17
208
|
### 補足情報(FW/ツールのバージョンなど)
|
18
209
|
|
19
210
|
開発環境
|