1'''
2#Train a simple deep CNN on the CIFAR10 small images dataset.
3It gets to 75% validation accuracy in 25 epochs, and 79% after 50 epochs.
4(it's still underfitting at that point, though).
5'''6import keras
7from keras.datasets import cifar10
8from keras.preprocessing.image import ImageDataGenerator
9from keras.models import Sequential
10from keras.layers import Dense, Dropout, Activation, Flatten
11from keras.layers import Conv2D, MaxPooling2D
12import os
1314batch_size =3215num_classes =1016epochs =10017data_augmentation =True18num_predictions =2019#save_dir = os.path.join(os.getcwd(), 'saved_models')20save_dir ="."21model_name ='cnn_model.h5'22w_name ='cnn_weights.h5'2324# The data, split between train and test sets:25(x_train, y_train),(x_test, y_test)= cifar10.load_data()26print('x_train shape:', x_train.shape)27print(x_train.shape[0],'train samples')28print(x_test.shape[0],'test samples')2930# Convert class vectors to binary class matrices.31y_train = keras.utils.to_categorical(y_train, num_classes)32y_test = keras.utils.to_categorical(y_test, num_classes)3334model = Sequential()35model.add(Conv2D(32,(3,3), padding='same',36 input_shape=x_train.shape[1:]))37model.add(Activation('relu'))38model.add(Conv2D(32,(3,3)))39model.add(Activation('relu'))40model.add(MaxPooling2D(pool_size=(2,2)))41model.add(Dropout(0.25))4243model.add(Conv2D(64,(3,3), padding='same'))44model.add(Activation('relu'))45model.add(Conv2D(64,(3,3)))46model.add(Activation('relu'))47model.add(MaxPooling2D(pool_size=(2,2)))48model.add(Dropout(0.25))4950model.add(Flatten())51model.add(Dense(512))52model.add(Activation('relu'))53model.add(Dropout(0.5))54model.add(Dense(num_classes))55model.add(Activation('softmax'))5657# initiate RMSprop optimizer58opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)5960# Let's train the model using RMSprop61model.compile(loss='categorical_crossentropy',62 optimizer=opt,63 metrics=['accuracy'])6465x_train = x_train.astype('float32')66x_test = x_test.astype('float32')67x_train /=25568x_test /=2556970ifnot data_augmentation:71print('Not using data augmentation.')72 model.fit(x_train, y_train,73 batch_size=batch_size,74 epochs=epochs,75 validation_data=(x_test, y_test),76 shuffle=True)77else:78print('Using real-time data augmentation.')79# This will do preprocessing and realtime data augmentation:80 datagen = ImageDataGenerator(81 featurewise_center=False,# set input mean to 0 over the dataset82 samplewise_center=False,# set each sample mean to 083 featurewise_std_normalization=False,# divide inputs by std of the dataset84 samplewise_std_normalization=False,# divide each input by its std85 zca_whitening=False,# apply ZCA whitening86 zca_epsilon=1e-06,# epsilon for ZCA whitening87 rotation_range=0,# randomly rotate images in the range (degrees, 0 to 180)88# randomly shift images horizontally (fraction of total width)89 width_shift_range=0.1,90# randomly shift images vertically (fraction of total height)91 height_shift_range=0.1,92 shear_range=0.,# set range for random shear93 zoom_range=0.,# set range for random zoom94 channel_shift_range=0.,# set range for random channel shifts95# set mode for filling points outside the input boundaries96 fill_mode='nearest',97 cval=0.,# value used for fill_mode = "constant"98 horizontal_flip=True,# randomly flip images99 vertical_flip=False,# randomly flip images100# set rescaling factor (applied before any other transformation)101 rescale=None,102# set function that will be applied on each input103 preprocessing_function=None,104# image data format, either "channels_first" or "channels_last"105 data_format=None,106# fraction of images reserved for validation (strictly between 0 and 1)107 validation_split=0.0)108109# Compute quantities required for feature-wise normalization110# (std, mean, and principal components if ZCA whitening is applied).111 datagen.fit(x_train)112113# Fit the model on the batches generated by datagen.flow().114 model.fit_generator(datagen.flow(x_train, y_train,115 batch_size=batch_size),116 epochs=epochs,117 validation_data=(x_test, y_test),118 workers=4)119120# Save model and weights121ifnot os.path.isdir(save_dir):122 os.makedirs(save_dir)123model_path = os.path.join(save_dir, model_name)124model.save(model_path)125print('Saved trained model at %s '% model_path)126127w_path = os.path.join(save_dir, w_name)128model.save_weights(w_path)129130# Score trained model.131scores = model.evaluate(x_test, y_test, verbose=1)132print('Test loss:', scores[0])133print('Test accuracy:', scores[1])
非常にご親切な回答、誠にありがとうございます。
ご助言を踏まえ、上記pyファイルを反映しましたところ、以下のエラーが発生しております。たびたび恐縮でございますが、解決策等、ございますでしょうか。
tensorflow.python.framework.errors_impl.FailedPreconditionError: Error while reading resource variable conv2d_10/kernel from Container: localhost. This could mean that the variable was uninitialized. Not found: Container localhost does not exist. (Could not find resource: localhost/conv2d_10/kernel)
[[{{node conv2d_10/convolution/ReadVariableOp}}]]
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。
2020/02/16 02:23 編集
2020/02/16 02:42