質問編集履歴
2
文法修正
test
CHANGED
@@ -1 +1 @@
|
|
1
|
-
画像のプロットのエラーpython
|
1
|
+
matplotlibによる画像のプロット時のエラーpython
|
test
CHANGED
File without changes
|
1
コード改善
test
CHANGED
File without changes
|
test
CHANGED
@@ -10,15 +10,15 @@
|
|
10
10
|
|
11
11
|
|
12
12
|
|
13
|
-
File "C:\Users\tSNE_CNN.py", line 24
|
13
|
+
File "C:\Users\tSNE_CNN.py", line 248, in <module>
|
14
14
|
|
15
15
|
main()
|
16
16
|
|
17
17
|
|
18
18
|
|
19
|
-
File "C:\Users\tSNE_CNN.py", line 20
|
19
|
+
File "C:\Users\tSNE_CNN.py", line 209, in main
|
20
|
-
|
20
|
+
|
21
|
-
plt.scatter(X_embeddedsc[label==index,
|
21
|
+
plt.scatter(X_embeddedsc[label==index,0], X_embeddedsc[label==index,1], X_embeddedsc[label==index,2],X_embeddedsc[label==index,3],
|
22
22
|
|
23
23
|
|
24
24
|
|
@@ -28,7 +28,7 @@
|
|
28
28
|
|
29
29
|
|
30
30
|
|
31
|
-
以下にそれらを8種類分類用にして作ったコードを掲載します。
|
31
|
+
こちらはどういったエラーなのでしょうか。以下にそれらを8種類分類用にして作ったコードを掲載します。
|
32
32
|
|
33
33
|
よろしくお願いします。
|
34
34
|
|
@@ -38,12 +38,358 @@
|
|
38
38
|
|
39
39
|
```ここに言語を入力
|
40
40
|
|
41
|
+
# -*- coding: utf-8 -*
|
42
|
+
|
43
|
+
import glob
|
44
|
+
|
45
|
+
import cv2
|
46
|
+
|
47
|
+
import numpy as np
|
48
|
+
|
49
|
+
import time
|
50
|
+
|
51
|
+
from tqdm import tqdm
|
52
|
+
|
53
|
+
from skimage import feature,exposure
|
54
|
+
|
55
|
+
from keras.models import model_from_json
|
56
|
+
|
57
|
+
from keras.utils.vis_utils import plot_model
|
58
|
+
|
59
|
+
from keras.models import Model
|
60
|
+
|
61
|
+
import pandas as pd
|
62
|
+
|
63
|
+
from matplotlib import pylab as plt
|
64
|
+
|
65
|
+
from sklearn.manifold import TSNE
|
66
|
+
|
67
|
+
from sklearn.preprocessing import StandardScaler
|
68
|
+
|
69
|
+
from sklearn.preprocessing import MinMaxScaler
|
70
|
+
|
71
|
+
|
72
|
+
|
73
|
+
|
74
|
+
|
75
|
+
|
76
|
+
|
77
|
+
# 特徴量抽出
|
78
|
+
|
79
|
+
def create_images_array(load_img_paths,model,layer_name):
|
80
|
+
|
81
|
+
imgs=[]
|
82
|
+
|
83
|
+
middle_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
|
84
|
+
|
85
|
+
for load_img_path in tqdm(load_img_paths):
|
86
|
+
|
87
|
+
img = cv2.imread(load_img_path)
|
88
|
+
|
89
|
+
#型を合わせる。255は正規化のため。
|
90
|
+
|
91
|
+
target = np.reshape(img, (1, img.shape[0], img.shape[1], img.shape[2])).astype('float') / 255.0
|
92
|
+
|
93
|
+
middle_output = middle_layer_model.predict(target)
|
94
|
+
|
95
|
+
imgs.append(middle_output)
|
96
|
+
|
97
|
+
#imgsは384枚の特徴量配列(384,1,7,7,2048)をもつ。それを(384,100352)に変更する
|
98
|
+
|
99
|
+
imgs = np.reshape(imgs, [np.shape(imgs)[0], -1])
|
100
|
+
|
101
|
+
print('\nimgs.shape')
|
102
|
+
|
103
|
+
print(imgs.shape)
|
104
|
+
|
105
|
+
return imgs
|
106
|
+
|
107
|
+
|
108
|
+
|
109
|
+
|
110
|
+
|
111
|
+
def tSNE(X,seikika):
|
112
|
+
|
113
|
+
print('正規化start')
|
114
|
+
|
115
|
+
#データの正規化
|
116
|
+
|
117
|
+
#X_pre_emb = seikika.fit_transform(X)
|
118
|
+
|
119
|
+
#tSNEの適用
|
120
|
+
|
121
|
+
print('t-SNE適用start')
|
122
|
+
|
123
|
+
tsne = TSNE(n_components=2, random_state=0)
|
124
|
+
|
125
|
+
X_embedded = tsne.fit_transform(X)
|
126
|
+
|
127
|
+
#tSNEで次元削減したデータの正規化
|
128
|
+
|
129
|
+
X_2d= seikika.fit_transform(X_embedded)
|
130
|
+
|
131
|
+
print(X_2d.shape)
|
132
|
+
|
133
|
+
#print(X_2d)
|
134
|
+
|
135
|
+
|
136
|
+
|
137
|
+
return X_2d
|
138
|
+
|
139
|
+
|
140
|
+
|
141
|
+
|
142
|
+
|
143
|
+
|
144
|
+
|
145
|
+
|
146
|
+
|
147
|
+
|
148
|
+
|
149
|
+
def main():
|
150
|
+
|
151
|
+
t1 = time.time()
|
152
|
+
|
153
|
+
|
154
|
+
|
155
|
+
json_string = open('./preprocess_images_keras_2class/case3/model.json').read()
|
156
|
+
|
157
|
+
model = model_from_json(json_string)
|
158
|
+
|
159
|
+
|
160
|
+
|
161
|
+
model.load_weights('./preprocess_images_keras_2class/case3/weight.hdf5')
|
162
|
+
|
163
|
+
|
164
|
+
|
165
|
+
layer_name = 'conv5_block3_out'
|
166
|
+
|
167
|
+
|
168
|
+
|
169
|
+
|
170
|
+
|
171
|
+
# 学習用の画像ファイルの格納先
|
172
|
+
|
173
|
+
LOAD_TRAIN_IMG1S_PATH = './preprocess_images_keras/case3/train/T1-FE/*'
|
174
|
+
|
175
|
+
LOAD_TRAIN_IMG2S_PATH = './preprocess_images_keras/case3/train/T2-FE/*'
|
176
|
+
|
177
|
+
LOAD_TRAIN_IMG3S_PATH = './preprocess_images_keras/case3/train/T3-FE/*'
|
178
|
+
|
179
|
+
LOAD_TRAIN_IMG4S_PATH = './preprocess_images_keras/case3/train/T4-FE/*'
|
180
|
+
|
181
|
+
LOAD_TRAIN_IMG5S_PATH = './preprocess_images_keras/case3/train/T5-FE/*'
|
182
|
+
|
183
|
+
LOAD_TRAIN_IMG6S_PATH = './preprocess_images_keras/case3/train/T6-FE/*'
|
184
|
+
|
185
|
+
LOAD_TRAIN_IMG7S_PATH = './preprocess_images_keras/case3/train/T7-FE/*'
|
186
|
+
|
187
|
+
LOAD_TRAIN_IMG8S_PATH = './preprocess_images_keras/case3/train/T8-FE/*'
|
188
|
+
|
189
|
+
|
190
|
+
|
191
|
+
LOAD_TRAIN_IMG9S_PATH = './preprocess_images_keras/case3/train/T1-W/*'
|
192
|
+
|
193
|
+
LOAD_TRAIN_IMG10S_PATH = './preprocess_images_keras/case3/train/T2-W/*'
|
194
|
+
|
195
|
+
LOAD_TRAIN_IMG11S_PATH = './preprocess_images_keras/case3/train/T3-W/*'
|
196
|
+
|
197
|
+
LOAD_TRAIN_IMG12S_PATH = './preprocess_images_keras/case3/train/T4-W/*'
|
198
|
+
|
199
|
+
LOAD_TRAIN_IMG13S_PATH = './preprocess_images_keras/case3/train/T5-W/*'
|
200
|
+
|
201
|
+
LOAD_TRAIN_IMG14S_PATH = './preprocess_images_keras/case3/train/T6-W/*'
|
202
|
+
|
203
|
+
LOAD_TRAIN_IMG15S_PATH = './preprocess_images_keras/case3/train/T7-W/*'
|
204
|
+
|
205
|
+
LOAD_TRAIN_IMG16S_PATH = './preprocess_images_keras/case3/train/T8-W/*'
|
206
|
+
|
207
|
+
|
208
|
+
|
209
|
+
|
210
|
+
|
211
|
+
|
212
|
+
|
213
|
+
# 学習用の画像ファイルのパスを取得
|
214
|
+
|
215
|
+
load_img1_paths = glob.glob(LOAD_TRAIN_IMG1S_PATH)
|
216
|
+
|
217
|
+
load_img2_paths = glob.glob(LOAD_TRAIN_IMG2S_PATH)
|
218
|
+
|
219
|
+
load_img3_paths = glob.glob(LOAD_TRAIN_IMG3S_PATH)
|
220
|
+
|
221
|
+
load_img4_paths = glob.glob(LOAD_TRAIN_IMG4S_PATH)
|
222
|
+
|
223
|
+
load_img5_paths = glob.glob(LOAD_TRAIN_IMG5S_PATH)
|
224
|
+
|
225
|
+
load_img6_paths = glob.glob(LOAD_TRAIN_IMG6S_PATH)
|
226
|
+
|
227
|
+
load_img7_paths = glob.glob(LOAD_TRAIN_IMG7S_PATH)
|
228
|
+
|
229
|
+
load_img8_paths = glob.glob(LOAD_TRAIN_IMG8S_PATH)
|
230
|
+
|
231
|
+
|
232
|
+
|
233
|
+
load_img9_paths = glob.glob(LOAD_TRAIN_IMG9S_PATH)
|
234
|
+
|
235
|
+
load_img10_paths = glob.glob(LOAD_TRAIN_IMG10S_PATH)
|
236
|
+
|
237
|
+
load_img11_paths = glob.glob(LOAD_TRAIN_IMG11S_PATH)
|
238
|
+
|
239
|
+
load_img12_paths = glob.glob(LOAD_TRAIN_IMG12S_PATH)
|
240
|
+
|
241
|
+
load_img13_paths = glob.glob(LOAD_TRAIN_IMG13S_PATH)
|
242
|
+
|
243
|
+
load_img14_paths = glob.glob(LOAD_TRAIN_IMG14S_PATH)
|
244
|
+
|
245
|
+
load_img15_paths = glob.glob(LOAD_TRAIN_IMG15S_PATH)
|
246
|
+
|
247
|
+
load_img16_paths = glob.glob(LOAD_TRAIN_IMG16S_PATH)
|
248
|
+
|
249
|
+
|
250
|
+
|
251
|
+
|
252
|
+
|
253
|
+
|
254
|
+
|
255
|
+
|
256
|
+
|
257
|
+
# 学習用の画像ファイルをロードし特徴量抽出
|
258
|
+
|
259
|
+
imgs1 = create_images_array(load_img1_paths,model,layer_name)
|
260
|
+
|
261
|
+
imgs2 = create_images_array(load_img2_paths,model,layer_name)
|
262
|
+
|
263
|
+
imgs3 = create_images_array(load_img3_paths,model,layer_name)
|
264
|
+
|
265
|
+
imgs4 = create_images_array(load_img4_paths,model,layer_name)
|
266
|
+
|
267
|
+
imgs5 = create_images_array(load_img5_paths,model,layer_name)
|
268
|
+
|
269
|
+
imgs6 = create_images_array(load_img6_paths,model,layer_name)
|
270
|
+
|
271
|
+
imgs7 = create_images_array(load_img7_paths,model,layer_name)
|
272
|
+
|
273
|
+
imgs8 = create_images_array(load_img8_paths,model,layer_name)
|
274
|
+
|
275
|
+
|
276
|
+
|
277
|
+
imgs9 = create_images_array(load_img9_paths,model,layer_name)
|
278
|
+
|
279
|
+
imgs10 = create_images_array(load_img10_paths,model,layer_name)
|
280
|
+
|
281
|
+
imgs11 = create_images_array(load_img11_paths,model,layer_name)
|
282
|
+
|
283
|
+
imgs12 = create_images_array(load_img12_paths,model,layer_name)
|
284
|
+
|
285
|
+
imgs13 = create_images_array(load_img13_paths,model,layer_name)
|
286
|
+
|
287
|
+
imgs14 = create_images_array(load_img14_paths,model,layer_name)
|
288
|
+
|
289
|
+
imgs15 = create_images_array(load_img15_paths,model,layer_name)
|
290
|
+
|
291
|
+
imgs16 = create_images_array(load_img16_paths,model,layer_name)
|
292
|
+
|
293
|
+
|
294
|
+
|
295
|
+
|
296
|
+
|
297
|
+
X = np.r_[imgs1, imgs2, imgs3, imgs4, imgs5, imgs6, imgs7, imgs8, imgs9, imgs10, imgs11, imgs12, imgs13, imgs14, imgs15, imgs16]
|
298
|
+
|
299
|
+
print('X')
|
300
|
+
|
301
|
+
print(X)
|
302
|
+
|
303
|
+
# 正解ラベルを生成imgs.番号でラベルを決める
|
304
|
+
|
305
|
+
|
306
|
+
|
307
|
+
labels1 = np.full(len(load_img1_paths), 0, np.int32)
|
308
|
+
|
309
|
+
labels2 = np.full(len(load_img2_paths), 1, np.int32)
|
310
|
+
|
311
|
+
labels3 = np.full(len(load_img3_paths), 2, np.int32)
|
312
|
+
|
313
|
+
labels4 = np.full(len(load_img4_paths), 3, np.int32)
|
314
|
+
|
315
|
+
labels5 = np.full(len(load_img5_paths), 4, np.int32)
|
316
|
+
|
317
|
+
labels6 = np.full(len(load_img6_paths), 5, np.int32)
|
318
|
+
|
319
|
+
labels7 = np.full(len(load_img7_paths), 6, np.int32)
|
320
|
+
|
321
|
+
labels8 = np.full(len(load_img8_paths), 7, np.int32)
|
322
|
+
|
323
|
+
|
324
|
+
|
325
|
+
labels9 = np.full(len(load_img9_paths), 0, np.int32)
|
326
|
+
|
327
|
+
labels10 = np.full(len(load_img10_paths), 1, np.int32)
|
328
|
+
|
329
|
+
labels11 = np.full(len(load_img11_paths), 2, np.int32)
|
330
|
+
|
331
|
+
labels12 = np.full(len(load_img12_paths), 3, np.int32)
|
332
|
+
|
333
|
+
labels13 = np.full(len(load_img13_paths), 4, np.int32)
|
334
|
+
|
335
|
+
labels14 = np.full(len(load_img14_paths), 5, np.int32)
|
336
|
+
|
337
|
+
labels15 = np.full(len(load_img15_paths), 6, np.int32)
|
338
|
+
|
339
|
+
labels16 = np.full(len(load_img16_paths), 7, np.int32)
|
340
|
+
|
341
|
+
|
342
|
+
|
343
|
+
|
344
|
+
|
345
|
+
label = np.r_[labels1, labels2, labels3, labels4, labels5, labels6, labels7, labels8, labels9, labels10, labels11, labels12, labels13, labels14, labels15, labels16]
|
346
|
+
|
347
|
+
print('label')
|
348
|
+
|
349
|
+
print(label.shape)
|
350
|
+
|
351
|
+
#正規化
|
352
|
+
|
353
|
+
seikika = StandardScaler()
|
354
|
+
|
355
|
+
#seikika = MinMaxScaler()
|
356
|
+
|
357
|
+
X_embeddedsc = tSNE(X,seikika)
|
358
|
+
|
359
|
+
print(X_embeddedsc.shape)
|
360
|
+
|
361
|
+
|
362
|
+
|
363
|
+
|
364
|
+
|
365
|
+
|
366
|
+
|
367
|
+
# グラフを横長にする
|
368
|
+
|
369
|
+
from matplotlib.pylab import rcParams
|
370
|
+
|
371
|
+
rcParams['figure.figsize'] = 15, 6
|
372
|
+
|
373
|
+
|
374
|
+
|
375
|
+
import matplotlib as mpl
|
376
|
+
|
377
|
+
mpl.rcParams['font.family'] = ['serif']
|
378
|
+
|
379
|
+
|
380
|
+
|
381
|
+
|
382
|
+
|
383
|
+
|
384
|
+
|
41
|
-
#8種類分類のプロット
|
385
|
+
#8種類分類のプロット
|
42
386
|
|
43
387
|
print("-----------------------------------------------------------")
|
44
388
|
|
45
389
|
# 可視化の用意
|
46
390
|
|
391
|
+
#listは1番目は0から読み取る。matlabとの大きな違い
|
392
|
+
|
47
393
|
color_list1 = ["blue","green", "red","cyan","magenta","yellow","black","white"]
|
48
394
|
|
49
395
|
color_list2 = ["blue","green", "red","cyan","magenta","yellow","black","black"]
|
@@ -64,9 +410,9 @@
|
|
64
410
|
|
65
411
|
for index in range(len(np.unique(label))):
|
66
412
|
|
67
|
-
plt.scatter(X_embeddedsc[label==index,
|
413
|
+
plt.scatter(X_embeddedsc[label==index,0], X_embeddedsc[label==index,1], X_embeddedsc[label==index,2],X_embeddedsc[label==index,3],
|
68
|
-
|
414
|
+
|
69
|
-
X_embeddedsc[label==index,
|
415
|
+
X_embeddedsc[label==index,4],X_embeddedsc[label==index,5],X_embeddedsc[label==index,6],X_embeddedsc[label==index,7],
|
70
416
|
|
71
417
|
edgecolor=color_list2[index],color=color_list1[index], marker=marker_list[index], label=label_list[index])
|
72
418
|
|
@@ -78,6 +424,34 @@
|
|
78
424
|
|
79
425
|
plt.show()
|
80
426
|
|
81
|
-
print("-----------------------------------------------------------\n\n")
|
427
|
+
print("-----------------------------------------------------------\n\n")
|
428
|
+
|
429
|
+
|
430
|
+
|
431
|
+
|
432
|
+
|
433
|
+
|
434
|
+
|
435
|
+
t2 = time.time()
|
436
|
+
|
437
|
+
elapsed_time = (t2-t1)/3600
|
438
|
+
|
439
|
+
print(f"経過時間:{elapsed_time}")
|
440
|
+
|
441
|
+
|
442
|
+
|
443
|
+
|
444
|
+
|
445
|
+
|
446
|
+
|
447
|
+
if __name__ == '__main__':
|
448
|
+
|
449
|
+
main()
|
450
|
+
|
451
|
+
|
452
|
+
|
453
|
+
|
454
|
+
|
455
|
+
|
82
456
|
|
83
457
|
```
|