前提・実現したいこと
python3.8
cuda10.1
cuDNN7.6
tensorflow-gpu 2.3を使用しています
申し訳ございませんがエラーがでている意味もあまりわかっていない状況です。自分なりにメモリを制限するコードなどを試して見ましたが効果はありませんでした。
お手数をおかけしますが宜しくおねがい致します。
コードとエラーメッセージになります
from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.applications.vgg16 import preprocess_input from tensorflow.keras.models import Model from tensorflow.keras.models import load_model #tokenizerをloadする tokenizer = load(open('tokenizer1.pkl', 'rb')) #trainigしたときのpre-defineでmax sequence lengthの値 max_length = 43 #modelのload model = load_model('model-ep004-loss2.778-val_loss3.210.h5') #text_readingに使われる画像のpath text_reading_image_path ='image2.png' class Image_captioning: #説明文を生成するクラス # extract features from each photo in the directory def extract_features(self,filename): print('a') # load the model model = VGG16() print('b') # re-structure the model model.layers.pop() print('c') model = Model(inputs=model.inputs, outputs=model.layers[-1].output) print('d') # load the photo image = load_img(filename, target_size=(224, 224)) print('e') # convert the image pixels to a numpy array image = img_to_array(image) print('f') # reshape data for the model image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) print('g') # prepare the image for the VGG model image = preprocess_input(image) print("h") # get features feature = model.predict(image, verbose=0) print('i') return feature # map an integer to a word def word_for_id(self,integer, tokenizer): for word, index in tokenizer.word_index.items(): if index == integer: return word return None # generate a description for an image def generate_desc(self,model, tokenizer, photo, max_length): # seed the generation process in_text = 'startseq' # iterate over the whole length of the sequence for i in range(max_length): # integer encode input sequence sequence = tokenizer.texts_to_sequences([in_text])[0] # pad input sequence = pad_sequences([sequence], maxlen=max_length) # predict next word yhat = model.predict([photo,sequence], verbose=0) # convert probability to integer yhat = argmax(yhat) # map integer to word word = self.word_for_id(yhat, tokenizer) # stop if we cannot map the word if word is None: break # append as input for generating the next word in_text += ' ' + word # stop if we predict the end of the sequence if word == 'endseq': break return in_text def text_reading(self): photo = self.extract_features(text_reading_image_path) d = self.generate_desc(model, tokenizer, photo, max_length) description = d.replace('startseq',' ',1).replace('endseq',' ',1) print(description) sound = gTTS(text=description,lang='ja',slow=False) sound.save('/home/limlab/program/navigation/potential/voice/navigation.mp3') playsound('/home/limlab/program/voice/1.wav') playsound("/home/limlab/program/navigation/potential/voice/navigation.mp3") #ここからはエラーメッセージとなります a 2021-11-21 11:58:17.479209: I tensorflow/stream_executor/cuda/cuda_driver.cc:775] failed to allocate 1.00G (1073741824 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory 2021-11-21 11:58:17.479540: I tensorflow/stream_executor/cuda/cuda_driver.cc:775] failed to allocate 921.60M (966367744 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory 2021-11-21 11:58:17.479817: I tensorflow/stream_executor/cuda/cuda_driver.cc:775] failed to allocate 829.44M (869731072 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory 2021-11-21 11:58:17.480066: I tensorflow/stream_executor/cuda/cuda_driver.cc:775] failed to allocate 746.50M (782758144 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory 2021-11-21 11:58:17.480313: I tensorflow/stream_executor/cuda/cuda_driver.cc:775] failed to allocate 671.85M (704482304 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory 2021-11-21 11:58:17.480559: I tensorflow/stream_executor/cuda/cuda_driver.cc:775] failed to allocate 604.66M (634034176 bytes) from device: CUDA_ERROR_OUT_OF_MEMORY: out of memory 2021-11-21 11:58:18.318427: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 411041792 exceeds 10% of free system memory. b c d e f g h 2021-11-21 11:58:18.680658: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcublas.so.10 2021-11-21 11:58:18.779763: E tensorflow/stream_executor/cuda/cuda_blas.cc:225] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2021-11-21 11:58:18.781533: E tensorflow/stream_executor/cuda/cuda_blas.cc:225] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2021-11-21 11:58:18.782176: E tensorflow/stream_executor/cuda/cuda_blas.cc:225] failed to create cublas handle: CUBLAS_STATUS_NOT_INITIALIZED 2021-11-21 11:58:18.782845: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudnn.so.7 2021-11-21 11:58:18.785736: E tensorflow/stream_executor/cuda/cuda_dnn.cc:328] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR 2021-11-21 11:58:18.786488: E tensorflow/stream_executor/cuda/cuda_dnn.cc:328] Could not create cudnn handle: CUDNN_STATUS_INTERNAL_ERROR 2021-11-21 11:58:18.786504: W tensorflow/core/framework/op_kernel.cc:1767] OP_REQUIRES failed at conv_ops_fused_impl.h:642 : Unknown: Failed to get convolution algorithm. This is probably because cuDNN failed to initialize, so try looking to see if a warning log message was printed above.
試したこと
下記のコードを挿入し改善しようとしました。
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
torch.rand(1).cuda()
回答1件
あなたの回答
tips
プレビュー