前提・実現したいこと
Python(tensorflow,keras)を使用してディープラーニングによる画像のノイズ低減をしようと考えています。
最終的に実現したいことは、トレーニングデータとして低画質の画像と対になる高画質の画像を学習させて、低画質画像を入力して、高画質化を行いたいと考えています。できれば画像のフォーマットはDICOMで行いたいのですが現状ではPNGで行っています。(Python上でDICOMからPNGに変換もありと考えています。)画像の出力方法もわからず悩んでいます。
本やネットなどの情報からUNETのコードを書いてみたのですが学習の実行をするところでエラーコードが発生しました。
import random import numpy as np import glob import matplotlib.pyplot as plt from skimage.io import imread, imshow from keras.models import Model, load_model from keras.layers import Input from keras.layers.core import Lambda from keras.layers import BatchNormalization from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers.merge import concatenate from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau from keras import backend as K from keras.optimizers import adam from keras.layers.advanced_activations import LeakyReLU import tensorflow as tf %matplotlib inline from zipfile import ZipFile file_name ='./Segmentation.zip' with ZipFile(file_name,'r') as zip: zip.extractall() IMG_WIDTH = 64 IMG_HEIGHT = 64 IMG_CHANNELS = 1 train_low_imgs = glob.glob("./train/low/*.png") train_high_imgs = glob.glob("./train/high/*.png") test_low_imgs = glob.glob("./test/low/*.png") test_high_imgs = glob.glob("./test/high/*.png") X_train = np.zeros((len(train_low_imgs), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),dtype=np.uint8) Y_train = np.zeros((len(train_low_imgs), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),dtype=np.bool) for n in range(len(train_low_imgs)): X_train[n] = imread(train_low_imgs[n],as_gray=True).reshape(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) Y_train[n] = imread(train_org_imgs[n],as_gray=True).reshape(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) X_test = np.zeros((len(test_low_imgs), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),dtype=np.uint8) Y_test = np.zeros((len(test_org_imgs), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),dtype=np.uint8) for n in range(len(test_low_imgs)): X_test[n] = imread(test_low_imgs[n],as_gray=True).reshape(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) Y_test[n] = imread(test_org_imgs[n],as_gray=True).reshape(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) def mean_iou(y_true,y_pred): prec = [] for t in np.arange(0.5,1.0,0.05): y_pred_=tf.to_int32(y_pred > t) score, up_opt = tf.metrics.mean_iou(y_true,y_pred_, 2) K.get_session().run(tf.local_variables_initializer()) with tf.control_dependencies([up_opt]): score = tf.identity(score) return K.mean(K.stack(prec),axis=0) inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)) c1 = Conv2D(16,(3,3), padding = 'same', activation='relu')(inputs) c1 = BatchNormalization()(c1) c1 = Conv2D(16,(3,3), activation='relu',padding = 'same')(c1) p1 = MaxPooling2D((2,2))(c1) c2 = Conv2D(32,(3,3), padding = 'same', activation='relu')(p1) c2= BatchNormalization()(c2) c2 = Conv2D(32,(3,3), activation='relu',padding = 'same')(c2) p2 = MaxPooling2D((2,2))(c2) c3 = Conv2D(64,(3,3), padding = 'same', activation='relu')(p2) c3= BatchNormalization()(c3) c3 = Conv2D(64,(3,3), activation='relu',padding = 'same')(c3) p3 = MaxPooling2D((2,2))(c3) c4 = Conv2D(128,(3,3), padding = 'same', activation='relu')(p3) c4= BatchNormalization()(c4) c4 = Conv2D(128,(3,3), activation='relu',padding = 'same')(c4) p4 = MaxPooling2D((2,2))(c4) c5 = Conv2D(256,(3,3), padding = 'same', activation='relu')(p4) c5= BatchNormalization()(c5) c5 = Conv2D(256,(3,3), activation='relu',padding = 'same')(c5) u6 = Conv2DTranspose(128,(2,2),strides=(2,2),padding='same')(c5) u6 = concatenate([u6,c4]) c6 = Conv2D(128,(3,3),padding='same',activation='relu')(u6) c6 = BatchNormalization()(c6) c6 = Conv2D(128,(3,3),padding ='same',activation='relu')(c6) u7 = Conv2DTranspose(64,(2,2),strides=(2,2),padding='same')(c6) u7 = concatenate([u7,c3]) c7 = Conv2D(64,(3,3),padding='same',activation='relu')(u7) c7 = BatchNormalization()(c7) c7 = Conv2D(64,(3,3),padding ='same',activation='relu')(c7) u8 = Conv2DTranspose(32,(2,2),strides=(2,2),padding='same')(c7) u8 = concatenate([u8,c2]) c8 = Conv2D(32,(3,3),padding='same',activation='relu')(u8) c8 = BatchNormalization()(c8) c8 = Conv2D(32,(3,3),padding ='same',activation='relu')(c8) u9 = Conv2DTranspose(16,(2,2),strides=(2,2),padding='same')(c8) u9 = concatenate([u9,c1],axis=3) c9 = Conv2D(16,(3,3),padding='same',activation='relu')(u9) c9 = BatchNormalization(momentum=0.9)(c9) c9 = Conv2D(16,(3,3),padding='same',activation='relu')(c9) outputs = Conv2D(1,(1,1),activation='sigmoid')(c9) model = Model(inputs=[inputs],outputs=[outputs]) model.compile(optimizer=adam(lr=1e-4), loss='binary_crossentropy',metrics=['binary_accuracy',mean_iou]) modelCheckpoint = ModelCheckpoint('UNet.hdf5',verbose=1, save_best_only=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor=0.1,patience=7) results = model.fit(x_train, y_train, validation_split=0.1, batch_size=9, epochs=100, callbacks=[reduce_lr,modelCheckpoint])
発生している問題・エラーメッセージ
Train on 0 samples, validate on 0 samples Epoch 1/10 --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-113-c5baae34f48a> in <module> 6 batch_size=1, 7 epochs=10, ----> 8 callbacks=[reduce_lr, modelCheckpoint]) ~\Anaconda3\envs\dls\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs) 1037 initial_epoch=initial_epoch, 1038 steps_per_epoch=steps_per_epoch, -> 1039 validation_steps=validation_steps) 1040 1041 def evaluate(self, x=None, y=None, ~\Anaconda3\envs\dls\lib\site-packages\keras\engine\training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps) 215 for l, o in zip(out_labels, val_outs): 216 epoch_logs['val_' + l] = o --> 217 callbacks.on_epoch_end(epoch, epoch_logs) 218 if callback_model.stop_training: 219 break ~\Anaconda3\envs\dls\lib\site-packages\keras\callbacks.py in on_epoch_end(self, epoch, logs) 77 logs = logs or {} 78 for callback in self.callbacks: ---> 79 callback.on_epoch_end(epoch, logs) 80 81 def on_batch_begin(self, batch, logs=None): ~\Anaconda3\envs\dls\lib\site-packages\keras\callbacks.py in on_epoch_end(self, epoch, logs) 336 self.log_values.append((k, logs[k])) 337 if self.verbose: --> 338 self.progbar.update(self.seen, self.log_values) 339 340 AttributeError: 'ProgbarLogger' object has no attribute 'log_values'
該当のソースコード
Python(tensorflow,keras)
1modelCheckpoint = ModelCheckpoint('UNet.hdf5',verbose=1, save_best_only=True) 2reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor=0.1,patience=7) 3results = model.fit(X_train, 4 Y_train, 5 validation_split=0.1, 6 batch_size=1, 7 epochs=10, 8 callbacks=[reduce_lr, modelCheckpoint])
試したこと
バッチサイズなどを変更してみましたが同じエラーが発生しました。
補足情報(FW/ツールのバージョンなど)
ここにより詳細な情報を記載してください。
「*_high_imgs」と「*_org_imgs」は、どちらかに統一しないと