質問編集履歴

3 誤字の修正

SuzuAya

SuzuAya score 47

2019/04/18 21:13  投稿

Python3 VInvalidArgumentError (see above for traceback): Incompatible shapesのエラー解消方法について
### 前提・実現したいこと
KerasのVAEを自作データセットで実装中、以下のようなエラーが出ました。該当の部分を見ても、どう修正したら良いか分からず、質問させて頂きました。
### 発生している問題・エラーメッセージ
InvalidArgumentError                     Traceback (most recent call last)
InvalidArgumentError: Incompatible shapes: [12800] vs. [450]
   
   
### 該当のソースコード
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
from keras import optimizers
import numpy as np
import matplotlib.pyplot as plt
import argparse
import tensorflow as tf
import random as rn
import os
import easydict
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
def sampling(args):
   z_mean, z_log_var = args
   batch = K.shape(z_mean)[0]
   dim = K.int_shape(z_mean)[1]
   # by default, random_normal has mean=0 and std=1.0  
   epsilon = K.random_normal(shape=(batch, dim))
   return z_mean + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
                data,
                batch_size=50,
                model_name="vae_mnist"):
 
   encoder, decoder = models
   os.makedirs(model_name, exist_ok=True)
   filename = os.path.join(model_name, "vae_mean.png")
   
   z_mean, _, _ = encoder.predict(x_test,
                                  batch_size=batch_size)
   plt.figure(figsize=(12, 10))
   plt.scatter(z_mean[:, 0], z_mean[:, 1])
   plt.colorbar()
   plt.xlabel("z[0]")
   plt.ylabel("z[1]")
   plt.savefig(filename)
   plt.show()
   filename = os.path.join(model_name, "digits_over_latent.png")
   
   n = 30
   digit_size = 224
   figure = np.zeros((digit_size * n, digit_size * n))
   
   grid_x = np.linspace(-4, 4, n)
   grid_y = np.linspace(-4, 4, n)[::-1]
   for i, yi in enumerate(grid_y):
       for j, xi in enumerate(grid_x):
           z_sample = np.array([[xi, yi]])
           x_decoded = decoder.predict(z_sample)
           digit = x_decoded[0].reshape(digit_size, digit_size)
           figure[i * digit_size: (i + 1) * digit_size,
                  j * digit_size: (j + 1) * digit_size] = digit
   plt.figure(figsize=(10, 10))
   start_range = digit_size // 2
   end_range = n * digit_size + start_range + 1
   pixel_range = np.arange(start_range, end_range, digit_size)
   sample_range_x = np.round(grid_x, 1)
   sample_range_y = np.round(grid_y, 1)
   plt.xticks(pixel_range, sample_range_x)
   plt.yticks(pixel_range, sample_range_y)
   plt.xlabel("z[0]")
   plt.ylabel("z[1]")
   plt.imshow(figure, cmap='Greys_r')
   plt.savefig(filename)
   plt.show() 
x_train_tom = np.load('./folder_a.npy')
x_test_tom = np.load('./folder_b.npy')
image_size = x_train_tom.shape[1]
x_train = np.reshape(x_train_tom, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test_tom, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
print(x_train.shape,x_test.shape)
input_shape = (image_size,image_size, 1)
batch_size = 50
kernel_size = 3
filters = 16
latent_dim = 2
epochs = 50
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(4):
   filters *= 2
   x = Conv2D(filters=filters,
              kernel_size=kernel_size,
              activation='relu',
              strides=2,
              padding='same')(x)
shape = K.int_shape(x)
x = Flatten()(x)
x = Dense(64, activation='relu')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
for i in range(4):
   x = Conv2DTranspose(filters=filters,
                       kernel_size=kernel_size,
                       activation='relu',
                       strides=2,
                       padding='same')(x)
   filters //= 2
outputs = Conv2DTranspose(filters=1,
                         kernel_size=kernel_size,
                         activation='sigmoid',
                         padding='same',
                         name='decoder_output')(x)
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')
def plot_history(history):
   
   plt.plot(history.history['loss'])
   plt.plot(history.history['val_loss'])
   plt.title('model accuracy')
   plt.xlabel('epoch')
   plt.ylabel('accuracy')
   plt.legend(['acc', 'val_acc'], loc='lower right')
   plt.show()
   plt.plot(history.history['loss'])
   plt.plot(history.history['val_loss'])
   plt.title('model loss')
   plt.xlabel('epoch')
   plt.ylabel('loss')
   plt.legend(['loss', 'val_loss'], loc='lower right')
   plt.savefig('loss.png') # -----(2)
   plt.show()
if __name__ == '__main__':
   args = easydict.EasyDict({
       "batchsize": 50,
       "epoch": 50,
       "gpu": 0,
       "out": "result",
       "resume": False,
       "unit": 1000
})
   models = (encoder, decoder)
   
   
   os.environ['PYTHONHASHSEED'] = '0'
   np.random.seed(5)
   rn.seed(5)
   config = tf.ConfigProto(
       gpu_options=tf.GPUOptions(
           visible_device_list="0,1", 
           allow_growth=True
       )
   )
   tf.set_random_seed(5)
   sess = tf.Session(graph=tf.get_default_graph(), config=config)
   K.set_session(sess)
   reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                                 K.flatten(outputs))
   reconstruction_loss *= image_size * image_size
   kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
   kl_loss = K.sum(kl_loss, axis=-1)
   kl_loss *= -0.5
   vae_loss = K.mean(reconstruction_loss + kl_loss)
   vae.add_loss(vae_loss)
   Adam = optimizers.Adam(lr=0.0005)
   vae.compile(optimizer=Adam)
   vae.summary()
   plot_model(vae, to_file='vae_cnn.png', show_shapes=True)
   history = vae.fit(x_train,
             epochs=epochs,
             batch_size=batch_size,
             validation_data=(x_test, None))
   open('vae_cnn.json', "w").write(vae.to_json())
   vae.save('vae_cnn.h5')
   plot_results(models, data, batch_size=batch_size, model_name="vae_cnn")
   plot_history(history)
#補足
kerasやtensorflowのバージョンが問題になっている場合もあるようですが、上手くいった人のバージョンに変更しても変化がありませんでした。
コードが長くてお手数をおかけしますが、アドバイス頂けると幸いです。どうぞよろしくお願い致します。
  • Python 3.x

    16151 questions

    Python 3はPythonプログラミング言語の最新バージョンであり、2008年12月3日にリリースされました。

2 誤字の修正

SuzuAya

SuzuAya score 47

2019/04/18 21:13  投稿

Python3 VInvalidArgumentError (see above for traceback): Incompatible shapesのエラー解消方法について
### 前提・実現したいこと
KerasのVAEを自作データセットで実装中、以下のようなエラーが出ました。該当の部分を見ても、どう修正したら良いか分からず、質問させて頂きました。
### 発生している問題・エラーメッセージ
InvalidArgumentError                     Traceback (most recent call last)
InvalidArgumentError: Incompatible shapes: [12800] vs. [450]
   
   
### 該当のソースコード
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
from keras import optimizers
import numpy as np
import matplotlib.pyplot as plt
import argparse
import tensorflow as tf
import random as rn
import os
import easydict
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
def sampling(args):
   z_mean, z_log_var = args
   batch = K.shape(z_mean)[0]
   dim = K.int_shape(z_mean)[1]
   # by default, random_normal has mean=0 and std=1.0
   epsilon = K.random_normal(shape=(batch, dim))
   return z_mean + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
                data,
                batch_size=50,
                model_name="vae_mnist"):
 
   encoder, decoder = models
   #x_test, y_test = data  
   os.makedirs(model_name, exist_ok=True)
   filename = os.path.join(model_name, "vae_mean.png")
   
   z_mean, _, _ = encoder.predict(x_test,
                                  batch_size=batch_size)
   plt.figure(figsize=(12, 10))
   plt.scatter(z_mean[:, 0], z_mean[:, 1])
   plt.colorbar()
   plt.xlabel("z[0]")
   plt.ylabel("z[1]")
   plt.savefig(filename)
   plt.show()
   filename = os.path.join(model_name, "digits_over_latent.png")
   
   n = 30
   digit_size = 224
   figure = np.zeros((digit_size * n, digit_size * n))
   
   grid_x = np.linspace(-4, 4, n)
   grid_y = np.linspace(-4, 4, n)[::-1]
   for i, yi in enumerate(grid_y):
       for j, xi in enumerate(grid_x):
           z_sample = np.array([[xi, yi]])
           x_decoded = decoder.predict(z_sample)
           digit = x_decoded[0].reshape(digit_size, digit_size)
           figure[i * digit_size: (i + 1) * digit_size,
                  j * digit_size: (j + 1) * digit_size] = digit
   plt.figure(figsize=(10, 10))
   start_range = digit_size // 2
   end_range = n * digit_size + start_range + 1
   pixel_range = np.arange(start_range, end_range, digit_size)
   sample_range_x = np.round(grid_x, 1)
   sample_range_y = np.round(grid_y, 1)
   plt.xticks(pixel_range, sample_range_x)
   plt.yticks(pixel_range, sample_range_y)
   plt.xlabel("z[0]")
   plt.ylabel("z[1]")
   plt.imshow(figure, cmap='Greys_r')
   plt.savefig(filename)
   plt.show() 
x_train_tom = np.load('./folder_a.npy')
x_test_tom = np.load('./folder_b.npy')
image_size = x_train_tom.shape[1]
x_train = np.reshape(x_train_tom, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test_tom, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
print(x_train.shape,x_test.shape)
input_shape = (image_size,image_size, 1)
batch_size = 50
kernel_size = 3
filters = 16
latent_dim = 2
epochs = 50
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(4):
   filters *= 2
   x = Conv2D(filters=filters,
              kernel_size=kernel_size,
              activation='relu',
              strides=2,
              padding='same')(x)
shape = K.int_shape(x)
x = Flatten()(x)
x = Dense(64, activation='relu')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
for i in range(4):
   x = Conv2DTranspose(filters=filters,
                       kernel_size=kernel_size,
                       activation='relu',
                       strides=2,
                       padding='same')(x)
   filters //= 2
outputs = Conv2DTranspose(filters=1,
                         kernel_size=kernel_size,
                         activation='sigmoid',
                         padding='same',
                         name='decoder_output')(x)
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')
def plot_history(history):
   
   plt.plot(history.history['loss'])
   plt.plot(history.history['val_loss'])
   plt.title('model accuracy')
   plt.xlabel('epoch')
   plt.ylabel('accuracy')
   plt.legend(['acc', 'val_acc'], loc='lower right')
   plt.show()
   plt.plot(history.history['loss'])
   plt.plot(history.history['val_loss'])
   plt.title('model loss')
   plt.xlabel('epoch')
   plt.ylabel('loss')
   plt.legend(['loss', 'val_loss'], loc='lower right')
   plt.savefig('loss.png') # -----(2)
   plt.show()
if __name__ == '__main__':
   args = easydict.EasyDict({
       "batchsize": 50,
       "epoch": 50,
       "gpu": 0,
       "out": "result",
       "resume": False,
       "unit": 1000
})
   models = (encoder, decoder)
   
   
   os.environ['PYTHONHASHSEED'] = '0'
   np.random.seed(5)
   rn.seed(5)
   config = tf.ConfigProto(
       gpu_options=tf.GPUOptions(
           visible_device_list="0,1", 
           allow_growth=True
       )
   )
   tf.set_random_seed(5)
   sess = tf.Session(graph=tf.get_default_graph(), config=config)
   K.set_session(sess)
   reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                                 K.flatten(outputs))
   reconstruction_loss *= image_size * image_size
   kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
   kl_loss = K.sum(kl_loss, axis=-1)
   kl_loss *= -0.5
   vae_loss = K.mean(reconstruction_loss + kl_loss)
   vae.add_loss(vae_loss)
   Adam = optimizers.Adam(lr=0.0005)
   vae.compile(optimizer=Adam)
   vae.summary()
   plot_model(vae, to_file='vae_cnn.png', show_shapes=True)
   history = vae.fit(x_train,
             epochs=epochs,
             batch_size=batch_size,
             validation_data=(x_test, None))
   open('vae_cnn.json', "w").write(vae.to_json())
   vae.save('vae_cnn.h5')
   plot_results(models, data, batch_size=batch_size, model_name="vae_cnn")
   plot_history(history)
#補足
kerasやtensorflowのバージョンが問題になっている場合もあるようですが、上手くいった人のバージョンに変更しても変化がありませんでした。
コードが長くてお手数をおかけしますが、アドバイス頂けると幸いです。どうぞよろしくお願い致します。
  • Python 3.x

    16151 questions

    Python 3はPythonプログラミング言語の最新バージョンであり、2008年12月3日にリリースされました。

1 エラー内容の変更

SuzuAya

SuzuAya score 47

2019/04/18 21:12  投稿

Python3 ValueError: Input 0 is incompatible with layer conv2d_61のエラー解消方法について
Python3 VInvalidArgumentError (see above for traceback): Incompatible shapesのエラー解消方法について
### 前提・実現したいこと
KerasのVAEを自作データセットで実装中、以下のようなエラーが出ました。該当の部分を見ても、どう修正したら良いか分からず、また、似たような質問がなかったため質問させて頂きました。
https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py
KerasのVAEを自作データセットで実装中、以下のようなエラーが出ました。該当の部分を見ても、どう修正したら良いか分からず、質問させて頂きました。
### 発生している問題・エラーメッセージ
ValueError: Input 0 is incompatible with layer conv2d_61: expected ndim=4, found ndim=2
InvalidArgumentError                     Traceback (most recent call last)
InvalidArgumentError: Incompatible shapes: [12800] vs. [450]
   
   
### 該当のソースコード
# build encoder model
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
from keras import optimizers
import numpy as np
import matplotlib.pyplot as plt
import argparse
import tensorflow as tf
import random as rn
import os
import easydict
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
def sampling(args):
   z_mean, z_log_var = args
   batch = K.shape(z_mean)[0]
   dim = K.int_shape(z_mean)[1]
   # by default, random_normal has mean=0 and std=1.0
   epsilon = K.random_normal(shape=(batch, dim))
   return z_mean + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
                data,
                batch_size=50,
                model_name="vae_mnist"):
 
   encoder, decoder = models
   #x_test, y_test = data
   os.makedirs(model_name, exist_ok=True)
   filename = os.path.join(model_name, "vae_mean.png")
   
   z_mean, _, _ = encoder.predict(x_test,
                                  batch_size=batch_size)
   plt.figure(figsize=(12, 10))
   plt.scatter(z_mean[:, 0], z_mean[:, 1])
   plt.colorbar()
   plt.xlabel("z[0]")
   plt.ylabel("z[1]")
   plt.savefig(filename)
   plt.show()
   filename = os.path.join(model_name, "digits_over_latent.png")
   
   n = 30
   digit_size = 224
   figure = np.zeros((digit_size * n, digit_size * n))
   
   grid_x = np.linspace(-4, 4, n)
   grid_y = np.linspace(-4, 4, n)[::-1]
   for i, yi in enumerate(grid_y):
       for j, xi in enumerate(grid_x):
           z_sample = np.array([[xi, yi]])
           x_decoded = decoder.predict(z_sample)
           digit = x_decoded[0].reshape(digit_size, digit_size)
           figure[i * digit_size: (i + 1) * digit_size,
                  j * digit_size: (j + 1) * digit_size] = digit
   plt.figure(figsize=(10, 10))
   start_range = digit_size // 2
   end_range = n * digit_size + start_range + 1
   pixel_range = np.arange(start_range, end_range, digit_size)
   sample_range_x = np.round(grid_x, 1)
   sample_range_y = np.round(grid_y, 1)
   plt.xticks(pixel_range, sample_range_x)
   plt.yticks(pixel_range, sample_range_y)
   plt.xlabel("z[0]")
   plt.ylabel("z[1]")
   plt.imshow(figure, cmap='Greys_r')
   plt.savefig(filename)
   plt.show() 
x_train_tom = np.load('./folder_a.npy')
x_test_tom = np.load('./folder_b.npy')
image_size = x_train_tom.shape[1]
x_train = np.reshape(x_train_tom, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test_tom, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
print(x_train.shape,x_test.shape)
input_shape = (image_size,image_size, 1)
batch_size = 50
kernel_size = 3
filters = 16
latent_dim = 2
epochs = 50
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(4):
   filters *= 2
   x = Conv2D(filters=filters,kernel_size=kernel_size,activation='relu',strides=2,padding='same')(x)
   x = Conv2D(filters=filters,
              kernel_size=kernel_size,
              activation='relu',
              strides=2,
              padding='same')(x)
shape = K.int_shape(x)
x = Flatten()(x)
x = Dense(64, activation='relu')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
for i in range(4):
   x = Conv2DTranspose(filters=filters,
                       kernel_size=kernel_size,
                       activation='relu',
                       strides=2,
                       padding='same')(x)
   filters //= 2
outputs = Conv2DTranspose(filters=1,
                         kernel_size=kernel_size,
                         activation='sigmoid',
                         padding='same',
                         name='decoder_output')(x)
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae')
def plot_history(history):
   
   plt.plot(history.history['loss'])
   plt.plot(history.history['val_loss'])
   plt.title('model accuracy')
   plt.xlabel('epoch')
   plt.ylabel('accuracy')
   plt.legend(['acc', 'val_acc'], loc='lower right')
   plt.show()
   plt.plot(history.history['loss'])
   plt.plot(history.history['val_loss'])
   plt.title('model loss')
   plt.xlabel('epoch')
   plt.ylabel('loss')
   plt.legend(['loss', 'val_loss'], loc='lower right')
   plt.savefig('loss.png') # -----(2)
   plt.show()
if __name__ == '__main__':
   args = easydict.EasyDict({
       "batchsize": 50,
       "epoch": 50,
       "gpu": 0,
       "out": "result",
       "resume": False,
       "unit": 1000
})
   models = (encoder, decoder)
   
   
   os.environ['PYTHONHASHSEED'] = '0'
   np.random.seed(5)
   rn.seed(5)
   config = tf.ConfigProto(
       gpu_options=tf.GPUOptions(
           visible_device_list="0,1", 
           allow_growth=True
       )
   )
   tf.set_random_seed(5)
   sess = tf.Session(graph=tf.get_default_graph(), config=config)
   K.set_session(sess)
   reconstruction_loss = binary_crossentropy(K.flatten(inputs),
                                                 K.flatten(outputs))
   reconstruction_loss *= image_size * image_size
   kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
   kl_loss = K.sum(kl_loss, axis=-1)
   kl_loss *= -0.5
   vae_loss = K.mean(reconstruction_loss + kl_loss)
   vae.add_loss(vae_loss)
   Adam = optimizers.Adam(lr=0.0005)
   vae.compile(optimizer=Adam)
   vae.summary()
   plot_model(vae, to_file='vae_cnn.png', show_shapes=True)
   history = vae.fit(x_train,
             epochs=epochs,
             batch_size=batch_size,
             validation_data=(x_test, None))
   open('vae_cnn.json', "w").write(vae.to_json())
   vae.save('vae_cnn.h5')
   plot_results(models, data, batch_size=batch_size, model_name="vae_cnn")
   plot_history(history)
#補足
kerasやtensorflowのバージョンが問題になっている場合もあるようですが、上手くいった人のバージョンに変更しても変化がありませんでした。
コードが長くてお手数をおかけしますが、アドバイス頂けると幸いです。どうぞよろしくお願い致します。
  • Python 3.x

    16151 questions

    Python 3はPythonプログラミング言語の最新バージョンであり、2008年12月3日にリリースされました。

思考するエンジニアのためのQ&Aサイト「teratail」について詳しく知る