現在autoencoderに画像を入力し、autoencoderから出力された画像で教師データどうかの判定を行いautoencoderを学習させるといったGANの開発を行なっています。
標準のGANならばランダムノイズから画像生成するコードの部分の改良を行なったため、エラーが起こってるのではないかと思います。
しかし実行しようとすると以下のようなエラーが発生しています。
Traceback (most recent call last): File "keras_dcgan_copy.py", line 192, in <module> main() File "keras_dcgan_copy.py", line 150, in main discriminator_on_generator = generator_containing_discriminator(generator, discriminator) File "keras_dcgan_copy.py", line 107, in generator_containing_discriminator model.add(discriminator) File "/usr/local/lib/python3.6/dist-packages/keras/engine/sequential.py", line 181, in add output_tensor = layer(self.outputs[0]) File "/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py", line 457, in __call__ output = self.call(inputs, **kwargs) File "/usr/local/lib/python3.6/dist-packages/keras/engine/network.py", line 564, in call output_tensors, _, _ = self.run_internal_graph(inputs, masks) File "/usr/local/lib/python3.6/dist-packages/keras/engine/network.py", line 721, in run_internal_graph layer.call(computed_tensor, **kwargs)) File "/usr/local/lib/python3.6/dist-packages/keras/layers/convolutional.py", line 171, in call dilation_rate=self.dilation_rate) File "/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py", line 3650, in conv2d data_format=tf_data_format) File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py", line 779, in convolution data_format=data_format) File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py", line 839, in __init__ filter_shape[num_spatial_dims])) ValueError: number of input channels does not match corresponding dimension of filter, 32 != 3
機械学習初心者で汚いコードかもしれませんが、ご教授お願いします。
以上がコードの全文です
python Keras
1 2from keras.models import Sequential 3from keras.layers import Dense 4from keras.layers import Reshape 5from keras.layers.core import Activation 6from keras.layers.normalization import BatchNormalization 7from keras.layers.convolutional import UpSampling2D 8from keras.layers.convolutional import Conv2D, MaxPooling2D 9from keras.layers.advanced_activations import LeakyReLU 10from keras.layers.core import Flatten,Dropout 11from keras.optimizers import Adam 12import numpy as np 13from PIL import Image 14import os 15import glob 16import random 17 18n_colors = 3 19 20''' 21def generator_model(): 22 model = Sequential() 23 24 model.add(Dense(1024, input_shape=(100,))) 25 model.add(Activation('tanh')) 26 27 model.add(Dense(128 * 16 * 16)) 28 model.add(BatchNormalization()) 29 model.add(Activation('tanh')) 30 31 model.add(Reshape((16, 16, 128))) 32 model.add(UpSampling2D(size=(2, 2))) 33 model.add(Conv2D(64, (5, 5), padding='same')) 34 model.add(Activation('tanh')) 35 36 model.add(UpSampling2D(size=(2, 2))) 37 model.add(Conv2D(n_colors, (5, 5), padding='same')) 38 model.add(Activation('tanh')) 39 return model 40''' 41 42def generator_model(): 43 model = Sequential() 44 #encode 45 model.add(Conv2D(32, (5, 5),input_shape=(64, 64, n_colors),activation='relu', padding='same')) 46 model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) 47 48 model.add(Conv2D(16, (5, 5),activation='relu', padding='same')) 49 model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) 50 51 model.add(Conv2D(16, (5, 5),activation='relu', padding='same')) 52 model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) 53 #decode 54 model.add(Conv2D(16,(5,5),activation='relu',padding='same')) 55 model.add(UpSampling2D(size=(2, 2))) 56 57 model.add(Conv2D(16,(5,5),activation='relu',padding='same')) 58 model.add(UpSampling2D(size=(2, 2))) 59 60 model.add(Conv2D(32,(5,5),activation='relu')) 61 model.add(UpSampling2D(size=(2, 2))) 62 return model 63 64 65def discriminator_model(): 66 model = Sequential() 67 68 model.add(Conv2D(64, (5, 5), input_shape=(64, 64, n_colors), padding='same')) 69 model.add(LeakyReLU(alpha=0.2)) 70 model.add(MaxPooling2D(pool_size=(2, 2))) 71 72 model.add(Conv2D(128, (5, 5))) 73 model.add(LeakyReLU(alpha=0.2)) 74 model.add(MaxPooling2D(pool_size=(2, 2))) 75 model.add(Flatten()) 76 77 model.add(Dense(1024)) 78 model.add(LeakyReLU(alpha=0.2)) 79 80 model.add(Dense(1)) 81 model.add(Activation('sigmoid')) 82 return model 83 84''' 85def discriminator_model(): 86 model = Sequential() 87 88 model.add(Conv2D(64, (5,5), strides=(2, 2), input_shape=(64, 64, 3), padding="same")) 89 model.add(LeakyReLU(0.2)) 90 91 model.add(Conv2D(128, (5,5), strides=(2, 2))) 92 model.add(LeakyReLU(0.2)) 93 94 model.add(Flatten()) 95 model.add(Dense(256)) 96 97 model.add(LeakyReLU(0.2)) 98 model.add(Dropout(0.5)) 99 100 model.add(Dense(1)) 101 model.add(Activation('sigmoid')) 102 return model 103''' 104def generator_containing_discriminator(generator, discriminator): 105 model = Sequential() 106 model.add(generator) 107 model.add(discriminator) 108 return model 109 110def image_batch(batch_size): 111 files = glob.glob("./in_images/**/*.jpg", recursive=True) 112 files = random.sample(files, batch_size) 113 # print(files) 114 res = [] 115 for path in files: 116 img = Image.open(path) 117 img = img.resize((64, 64)) 118 arr = np.array(img) 119 arr = (arr - 127.5) / 127.5 120 arr.resize((64, 64, n_colors)) 121 res.append(arr) 122 return np.array(res) 123 124def combine_images(generated_images, cols=5, rows=5): 125 shape = generated_images.shape 126 h = shape[1] 127 w = shape[2] 128 image = np.zeros((rows * h, cols * w, n_colors)) 129 for index, img in enumerate(generated_images): 130 if index >= cols * rows: 131 break 132 i = index // cols 133 j = index % cols 134 image[i*h:(i+1)*h, j*w:(j+1)*w, :] = img[:, :, :] 135 image = image * 127.5 + 127.5 136 image = Image.fromarray(image.astype(np.uint8)) 137 return image 138 139def set_trainable(model, trainable): 140 model.trainable = trainable 141 for layer in model.layers: 142 layer.trainable = trainable 143 144def main(): 145 batch_size = 64 146 147 discriminator = discriminator_model() 148 generator = generator_model() 149 150 discriminator_on_generator = generator_containing_discriminator(generator, discriminator) 151 152 set_trainable(discriminator, False) 153 discriminator_on_generator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5)) 154 155 print(generator.summary()) 156 print(discriminator_on_generator.summary()) 157 158 set_trainable(discriminator, True) 159 discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0002, beta_1=0.5)) 160 161 print(discriminator.summary()) 162 163 for i in range(30 * 1000): 164 batch_images = image_batch(batch_size) 165 166 # generator更新 167 noise = np.random.uniform(size=[batch_size, batch_images], low=-1.0, high=1.0) 168 169 generated_images = generator.predict(image_batch) 170 171 # discriminatorを更新 172 X = np.concatenate((batch_images, generated_images)) 173 # 訓練データのラベルが1、生成画像のラベルが0になるよう学習する 174 y = [1] * batch_size + [0] * batch_size 175 d_loss = discriminator.train_on_batch(X, y) 176 177 # generator更新 178 noise = np.random.uniform(size=[batch_size, batch_images], low=-1.0, high=1.0) 179 180 # 生成画像をdiscriminatorにいれたときに 181 # 出力が1に近くなる(訓練画像と識別される確率が高くなる)ように学習する 182 g_loss = discriminator_on_generator.train_on_batch(noise, [1] * batch_size) 183 184 if i % 100 == 0: 185 print("step %d d_loss, g_loss : %g %g" % (i, d_loss, g_loss)) 186 image = combine_images(generated_images) 187 os.system('mkdir -p ./gen_images') 188 image.save("./gen_images/gen%05d.jpg" % i) 189 generator.save_weights('generator.h5', True) 190 discriminator.save_weights('discriminator.h5', True) 191 192main() 193
回答1件
あなたの回答
tips
プレビュー
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。
2019/01/18 19:33 編集
2019/01/18 19:37
2019/01/20 08:57