質問編集履歴

3 コードの修正

SuzuAya

SuzuAya score 45

2019/05/12 22:13  投稿

Python3 invalid syntaxのエラーが消えない
### 前提・実現したいこと
Pythonで自作のデータセットを使ってVAEのコードを書いています。
train画像を準備する部分で、以下のようなエラーメッセージが出てしまいました。
このエラーが出た時はいつもコードを一度消して再度入力するとエラーが解消されることがほとんどなのですが、今回は何度再入力しても、エラーが消えないため、質問をさせていただきました。
お手数をお掛けしますが、解決策についてご存じの方がいらっしゃいましたら、ご教示いただけますと幸いです。
### 発生している問題・エラーメッセージ
```
File "<ipython-input-14-7dac0b4001f0>", line 111
   X = []
   ^
SyntaxError: invalid syntax
```
### 該当のソースコード
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras.models import Sequential, model_from_json
from keras.losses import mse, binary_crossentropy
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.utils import plot_model, np_utils
from keras.utils import plot_model
from keras.callbacks import Callback, EarlyStopping, TensorBoard, ModelCheckpoint
from keras import optimizers
from keras import backend as K
from keras.preprocessing.image import array_to_img, img_to_array,load_img
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import re
import glob
import random as rn
import tensorflow as tf
import cv2
import easydict
from PIL import Image
#from google.colab.patches import cv2_imshow
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
def sampling(args):
   z_mean, z_log_var = args
   batch = K.shape(z_mean)[0]
   dim = K.int_shape(z_mean)[1]
   # by default, random_normal has mean=0 and std=1.0
   epsilon = K.random_normal(shape=(batch, dim))
   return z_mean + K.exp(0.5 * z_log_var) * epsilon
#def plot_results(models,
                #data,
                #batch_size=500,#128,
                #model_name="vae_OCT"):
   """Plots labels and MNIST digits as function of 2-dim latent vector
   # Arguments
       models (tuple): encoder and decoder models
       data (tuple): test data and label
       batch_size (int): prediction batch size
   encoder, decoder = models
   x_test = data #, y_test削除
   os.makedirs(model_name, exist_ok=True)
       model_name (string): which model is using this function
   """
   filename = os.path.join('.', "vae_mean.png")
   # display a 2D plot of the digit classes in the latent space
   z_mean, _, _ = encoder.predict(x_test,
                                  batch_size=batch_size)
   #plt.figure(figsize=(12, 10))
   #plt.scatter(z_mean[:, 0], z_mean[:, 1])#c=y_test削除
   #plt.colorbar()
   #plt.xlabel("z[0]")
   #plt.ylabel("z[1]")
   #plt.savefig(filename)
   #plt.show()
   #filename = os.path.join(model_name, "digits_over_latent.png")
   # display a 30x30 2D manifold of digits
   #n = 30
   #digit_size = 496 うまく動かないので一旦プロット部分はすべてコメントアウト
   #digit_size_width = 512 #width追加
   #digit_size_height = 496 #height追加
   #figure = np.zeros((digit_size_width * n, digit_size_height * n))#width, height追加
   # linearly spaced coordinates corresponding to the 2D plot
   # of digit classes in the latent space
   #grid_x = np.linspace(-4, 4, n)
   #grid_y = np.linspace(-4, 4, n)[::-1]
   #for i, yi in enumerate(grid_y):
       #for j, xi in enumerate(grid_x):
           #z_sample = np.array([[xi, yi]])
           #x_decoded = decoder.predict(z_sample)
           #digit = x_decoded[0].reshape(digit_size_width, digit_size_height)#width, height追加
           #figure[i * digit_size_width: (i + 1) * digit_size_height,#width, height追加
                  #j * digit_size_width: (j + 1) * digit_size_height] = digit#width, height追加
   #plt.figure(figsize=(10, 10))
   #start_range = digit_size // 2
   #end_range = n * digit_size + start_range + 1
   #pixel_range = np.arange(start_range, end_range, digit_size)
   #sample_range_x = np.round(grid_x, 1)
   #sample_range_y = np.round(grid_y, 1)
   #plt.xticks(pixel_range, sample_range_x)
   #plt.yticks(pixel_range, sample_range_y)
   #plt.xlabel("z[0]")
   #plt.ylabel("z[1]")
   #plt.imshow(figure, cmap='Greys_r')
   #plt.savefig(filename)
   #plt.show()
#original dataset
#train
filenames = glob.glob("./train/NORMAL_train_dataset/*.jpeg")
x_train = []
X = []
                                 
for filename in filenames:
   img = img_to_array(load_img(
   filename, color_mode = "grayscale", target_size=(512,496))
   x_train.append(img)
   X.append(img)
x_train = np.asarray(X)
#test
filenames = glob.glob("./validation/*.jpeg")
x_test = []
for filename in filenames:
   img = img_to_array(load_img(
   filename, color_mode = "grayscale", target_size=(512,496))
   x_test.append(img)
x_test = np.asarray(x_test)
image_size = x_train.shape[1]
original_dim = 512 * 496 *1 #3削除
x_train = np.reshape(x_train, [-1, original_dim,1])# x_train = np.reshape(x_train, [-1, original_dim])
x_test = np.reshape(x_test, [-1, original_dim,1])# x_test = np.reshape(x_test, [-1, original_dim])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
#train_generator = train_datagen.flow(x_train)#追記。generator作成
#test_generator = test_datagen.flow(x_test)#追記。generator作成
print(x_train.shape)
print(x_test.shape)
# network parameters
input_shape = (512, 496, 1)# (original_dim,)
kernel_size = 3
filters = 16
#intermediate_dim = 512
batch_size = 500#128
latent_dim = 2# Dimensionality of the latent space: a plane 潜在空間の次元数:平面 https://fisproject.jp/2018/09/vae-with-python-keras/#vae-with-keras
epochs = 5#1#50
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(4):
   filters *= 2
   x = Conv2D(filters=filters,kernel_size=kernel_size,activation='relu',strides=2,padding='same')(x)
# shape info needed to build decoder model これは画像のshapeとって割る時とかに結構使う. ちなみにtensorflowでのみ動作します. https://www.mathgram.xyz/entry/keras/backend
shape = K.int_shape(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
for i in range(4):
   x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x)
   filters //= 2
outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size, activation='sigmoid', padding='same', name='decoder_output')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')
if __name__ == '__main__':
   args = easydict.EasyDict({
  "batchsize": 500,#40,
   "epoch": 1,#50,
   #"gpu": 0,
   "out": "result",
   "resume": False,
   #"unit": 1000
})
   #parser = argparse.ArgumentParser() parserがうまくうごかないため削除
   #help_ = "Load h5 model trained weights"
   #parser.add_argument("-w", "--weights", help=help_)
   #help_ = "Use mse loss instead of binary cross entropy (default)"
   #parser.add_argument("-m",
                       #"--mse",
                       #help=help_, action='store_true')
   #args = parser.parse_args()
   models = (encoder, decoder)
   data = (x_test)#, y_test削除
   # VAE loss = mse_loss or xent_loss + kl_loss
   #if args.mse:
       #reconstruction_loss = mse(inputs, outputs)
   #else:
   reconstruction_loss = binary_crossentropy(inputs,
                                                 outputs)
   reconstruction_loss *= original_dim
   kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
   kl_loss = K.sum(kl_loss, axis=-1)
   kl_loss *= -0.5
   vae_loss = K.mean(reconstruction_loss + kl_loss)
   vae.add_loss(vae_loss)
   vae.compile(optimizer='adam')
   vae.summary()
   plot_model(vae,
   to_file='vae_mlp.png',
   show_shapes=True)
         
   callbacks = []
   callbacks.append(ModelCheckpoint(filepath="model.ep{epoch:02d}.h5"))# 各epochでのモデルの保存
   callbacks.append(EarlyStopping(monitor='val_loss', patience=0, verbose=1))
   callbacks.append(LearningRateScheduler(lambda ep: float(1e-3 / 3 ** (ep * 4 // MAX_EPOCH))))
   callbacks.append(CSVLogger("history.csv"))
   
   #if args.weights:
       #vae.load_weights(args.weights)
   #else:
       # train the autoencoder
   history = vae.fit(x_train,
               epochs=epochs,
               batch_size=batch_size,
               validation_data=(x_test, None),
               callbacks=callbacks)
   
   score = model.evaluate(x_test, verbose=0)#y_test削除
   print('Test loss:', score[0])
   print('Test accuracy:', score[1])
   plt.plot(history.history["acc"], label="acc", ls="-", marker="o")
   plt.plot(history.history["val_acc"], label="val_acc", ls="-", marker="x")
   plt.ylabel("accuracy")
   plt.xlabel("epoch")
   plt.legend(loc="best")
   plt.show()
```
### 補足情報(FW/ツールのバージョンなど)
ubuntuで、anacondaの仮想環境を使って実装しています。
  • Python 3.x

    15727 questions

    Python 3はPythonプログラミング言語の最新バージョンであり、2008年12月3日にリリースされました。

2 全コード掲載

SuzuAya

SuzuAya score 45

2019/05/12 22:11  投稿

Python3 invalid syntaxのエラーが消えない
### 前提・実現したいこと
Pythonで自作のデータセットを使ってVAEのコードを書いています。
train画像を準備する部分で、以下のようなエラーメッセージが出てしまいました。
このエラーが出た時はいつもコードを一度消して再度入力するとエラーが解消されることがほとんどなのですが、今回は何度再入力しても、エラーが消えないため、質問をさせていただきました。
お手数をお掛けしますが、解決策についてご存じの方がいらっしゃいましたら、ご教示いただけますと幸いです。
### 発生している問題・エラーメッセージ
```
File "<ipython-input-14-7dac0b4001f0>", line 111
   X = []
   ^
SyntaxError: invalid syntax
```
### 該当のソースコード
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras.models import Sequential, model_from_json
from keras.losses import mse, binary_crossentropy
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.utils import plot_model, np_utils
from keras.utils import plot_model
from keras.callbacks import Callback, EarlyStopping, TensorBoard, ModelCheckpoint
from keras import optimizers
from keras import backend as K
from keras.preprocessing.image import array_to_img, img_to_array,load_img
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import re
import glob
import random as rn
import tensorflow as tf
import cv2
import easydict
from PIL import Image
#from google.colab.patches import cv2_imshow  
 
import warnings  
warnings.filterwarnings('ignore')  
 
%matplotlib inline  
 
def sampling(args):  
   z_mean, z_log_var = args  
   batch = K.shape(z_mean)[0]  
   dim = K.int_shape(z_mean)[1]  
   # by default, random_normal has mean=0 and std=1.0  
   epsilon = K.random_normal(shape=(batch, dim))  
   return z_mean + K.exp(0.5 * z_log_var) * epsilon  
 
#def plot_results(models,  
                #data,  
                #batch_size=500,#128,  
                #model_name="vae_OCT"):  
   """Plots labels and MNIST digits as function of 2-dim latent vector  
 
   # Arguments  
       models (tuple): encoder and decoder models  
       data (tuple): test data and label  
       batch_size (int): prediction batch size  
   encoder, decoder = models  
   x_test = data #, y_test削除  
   os.makedirs(model_name, exist_ok=True)  
       model_name (string): which model is using this function  
   """  
 
   filename = os.path.join('.', "vae_mean.png")  
   # display a 2D plot of the digit classes in the latent space  
   z_mean, _, _ = encoder.predict(x_test,  
                                  batch_size=batch_size)  
   #plt.figure(figsize=(12, 10))  
   #plt.scatter(z_mean[:, 0], z_mean[:, 1])#c=y_test削除  
   #plt.colorbar()  
   #plt.xlabel("z[0]")  
   #plt.ylabel("z[1]")  
   #plt.savefig(filename)  
   #plt.show()  
 
   #filename = os.path.join(model_name, "digits_over_latent.png")  
   # display a 30x30 2D manifold of digits  
   #n = 30  
   #digit_size = 496 うまく動かないので一旦プロット部分はすべてコメントアウト  
   #digit_size_width = 512 #width追加  
   #digit_size_height = 496 #height追加  
   #figure = np.zeros((digit_size_width * n, digit_size_height * n))#width, height追加  
   # linearly spaced coordinates corresponding to the 2D plot  
   # of digit classes in the latent space  
   #grid_x = np.linspace(-4, 4, n)  
   #grid_y = np.linspace(-4, 4, n)[::-1]  
 
   #for i, yi in enumerate(grid_y):  
       #for j, xi in enumerate(grid_x):  
           #z_sample = np.array([[xi, yi]])  
           #x_decoded = decoder.predict(z_sample)  
           #digit = x_decoded[0].reshape(digit_size_width, digit_size_height)#width, height追加  
           #figure[i * digit_size_width: (i + 1) * digit_size_height,#width, height追加  
                  #j * digit_size_width: (j + 1) * digit_size_height] = digit#width, height追加  
 
   #plt.figure(figsize=(10, 10))  
   #start_range = digit_size // 2  
   #end_range = n * digit_size + start_range + 1  
   #pixel_range = np.arange(start_range, end_range, digit_size)  
   #sample_range_x = np.round(grid_x, 1)  
   #sample_range_y = np.round(grid_y, 1)  
   #plt.xticks(pixel_range, sample_range_x)  
   #plt.yticks(pixel_range, sample_range_y)  
   #plt.xlabel("z[0]")  
   #plt.ylabel("z[1]")  
   #plt.imshow(figure, cmap='Greys_r')  
   #plt.savefig(filename)  
   #plt.show()  
#original dataset
#train
filenames = glob.glob("./train/NORMAL_train_dataset/*.jpeg")
x_train = []  
                                 
X = []                         
 
for filename in filenames:
   img = img_to_array(load_img(
   filename, color_mode = "grayscale"
   , target_size=(512,496)))
   X.append(img)
   filename, color_mode = "grayscale", target_size=(512,496))
   x_train.append(img)
x_train = np.asarray(X)
#test
filenames = glob.glob("./validation/*.jpeg")
x_test = []
for filename in filenames:
   img = img_to_array(load_img(
   filename, color_mode = "grayscale", target_size=(512,496))
   x_test.append(img)
x_test = np.asarray(x_test)
image_size = x_train.shape[1]
original_dim = 512 * 496 *1 #3削除
x_train = np.reshape(x_train, [-1, original_dim,1])# x_train = np.reshape(x_train, [-1, original_dim])
x_test = np.reshape(x_test, [-1, original_dim,1])# x_test = np.reshape(x_test, [-1, original_dim])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
#train_generator = train_datagen.flow(x_train)#追記。generator作成
#test_generator = test_datagen.flow(x_test)#追記。generator作成
print(x_train.shape)
print(x_test.shape)
# network parameters
input_shape = (512, 496, 1)# (original_dim,)
kernel_size = 3
filters = 16
#intermediate_dim = 512
batch_size = 500#128
latent_dim = 2# Dimensionality of the latent space: a plane 潜在空間の次元数:平面 https://fisproject.jp/2018/09/vae-with-python-keras/#vae-with-keras
epochs = 5#1#50
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(4):
   filters *= 2
   x = Conv2D(filters=filters,kernel_size=kernel_size,activation='relu',strides=2,padding='same')(x)
# shape info needed to build decoder model これは画像のshapeとって割る時とかに結構使う. ちなみにtensorflowでのみ動作します. https://www.mathgram.xyz/entry/keras/backend
shape = K.int_shape(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
for i in range(4):
   x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, activation='relu', strides=2, padding='same')(x)
   filters //= 2
outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size, activation='sigmoid', padding='same', name='decoder_output')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')
if __name__ == '__main__':
   args = easydict.EasyDict({
  "batchsize": 500,#40,
   "epoch": 1,#50,
   #"gpu": 0,
   "out": "result",
   "resume": False,
   #"unit": 1000
})
   #parser = argparse.ArgumentParser() parserがうまくうごかないため削除
   #help_ = "Load h5 model trained weights"
   #parser.add_argument("-w", "--weights", help=help_)
   #help_ = "Use mse loss instead of binary cross entropy (default)"
   #parser.add_argument("-m",
                       #"--mse",
                       #help=help_, action='store_true')
   #args = parser.parse_args()
   models = (encoder, decoder)
   data = (x_test)#, y_test削除
   # VAE loss = mse_loss or xent_loss + kl_loss
   #if args.mse:
       #reconstruction_loss = mse(inputs, outputs)
   #else:
   reconstruction_loss = binary_crossentropy(inputs,
                                                 outputs)
   reconstruction_loss *= original_dim
   kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
   kl_loss = K.sum(kl_loss, axis=-1)
   kl_loss *= -0.5
   vae_loss = K.mean(reconstruction_loss + kl_loss)
   vae.add_loss(vae_loss)
   vae.compile(optimizer='adam')
   vae.summary()
   plot_model(vae,
   to_file='vae_mlp.png',
   show_shapes=True)
         
   callbacks = []
   callbacks.append(ModelCheckpoint(filepath="model.ep{epoch:02d}.h5"))# 各epochでのモデルの保存
   callbacks.append(EarlyStopping(monitor='val_loss', patience=0, verbose=1))
   callbacks.append(LearningRateScheduler(lambda ep: float(1e-3 / 3 ** (ep * 4 // MAX_EPOCH))))
   callbacks.append(CSVLogger("history.csv"))
   
x_train = np.asarray(X)
   #if args.weights:
       #vae.load_weights(args.weights)
   #else:
       # train the autoencoder
   history = vae.fit(x_train,
               epochs=epochs,
               batch_size=batch_size,
               validation_data=(x_test, None),
               callbacks=callbacks)
   
   score = model.evaluate(x_test, verbose=0)#y_test削除
   print('Test loss:', score[0])
   print('Test accuracy:', score[1])
   plt.plot(history.history["acc"], label="acc", ls="-", marker="o")
   plt.plot(history.history["val_acc"], label="val_acc", ls="-", marker="x")
   plt.ylabel("accuracy")
   plt.xlabel("epoch")
   plt.legend(loc="best")
   plt.show()
```
### 補足情報(FW/ツールのバージョンなど)
ubuntuで、anacondaの仮想環境を使って実装しています。
  • Python 3.x

    15727 questions

    Python 3はPythonプログラミング言語の最新バージョンであり、2008年12月3日にリリースされました。

1 コードの修正

SuzuAya

SuzuAya score 45

2019/05/12 20:38  投稿

Python3 invalid syntaxのエラーが消えない
### 前提・実現したいこと
Pythonで自作のデータセットを使ってVAEのコードを書いています。
train画像を準備する部分で、以下のようなエラーメッセージが出てしまいました。
このエラーが出た時はいつもコードを一度消して再度入力するとエラーが解消されることがほとんどなのですが、今回は何度再入力しても、エラーが消えないため、質問をさせていただきました。
お手数をお掛けしますが、解決策についてご存じの方がいらっしゃいましたら、ご教示いただけますと幸いです。
### 発生している問題・エラーメッセージ
```
File "<ipython-input-14-7dac0b4001f0>", line 111
   X = []
   ^
SyntaxError: invalid syntax
```
### 該当のソースコード
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras.models import Sequential, model_from_json
from keras.losses import mse, binary_crossentropy
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.utils import plot_model, np_utils
from keras.utils import plot_model
from keras.callbacks import Callback, EarlyStopping, TensorBoard, ModelCheckpoint
from keras import optimizers
from keras import backend as K
from keras.preprocessing.image import array_to_img, img_to_array,load_img
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
import re
import glob
import random as rn
import tensorflow as tf
import cv2
import easydict
from PIL import Image
#original dataset
#train
filenames = glob.glob("./train/NORMAL_train_dataset/*.jpeg")
                                 
x_train = []                       
X = []                       
for filename in filenames:
   img = img_to_array(load_img(
   filename, color_mode = "grayscale"
   , target_size=(512,496)))
   x_train.append(img)
   X.append(img)
   
x_train = np.asarray(x_train)
x_train = np.asarray(X)
```
### 補足情報(FW/ツールのバージョンなど)
ubuntuで、anacondaの仮想環境を使って実装しています。
  • Python 3.x

    15727 questions

    Python 3はPythonプログラミング言語の最新バージョンであり、2008年12月3日にリリースされました。

思考するエンジニアのためのQ&Aサイト「teratail」について詳しく知る