前提・実現したいこと
kerasでSSDの実装を行っています。
その際、モデルをロードするコードでエラーが発生しました。
このエラーを解消する方法を教えていただきたいです。
発生している問題・エラーメッセージ
AttributeError Traceback (most recent call last) <ipython-input-42-ce81430fdd80> in <module> 1 model = SSD300(input_shape, num_classes=NUM_CLASSES) ----> 2 model.load_weights('weights_SSD300.hdf5', by_name=True) AttributeError: 'NoneType' object has no attribute 'load_weights'
該当のソースコード
python
1import cv2 2from tensorflow import keras 3from tensorflow.keras.applications.imagenet_utils import preprocess_input 4from tensorflow.keras.models import Model 5from tensorflow.keras.preprocessing import image 6import matplotlib.pyplot as plt 7import numpy as np 8import pickle 9from random import shuffle 10from imageio import imread 11from PIL import Image 12import tensorflow as tf 13 14from ssd import SSD300 15from ssd_training import MultiboxLoss 16from ssd_utils import BBoxUtility 17 18%matplotlib inline 19plt.rcParams['figure.figsize'] = (8, 8) 20plt.rcParams['image.interpolation'] = 'nearest' 21 22np.set_printoptions(suppress=True) 23 24# config = tf.ConfigProto() 25# config.gpu_options.per_process_gpu_memory_fraction = 0.9 26# set_session(tf.Session(config=config)) 27 28from ssd import SSD300 29from ssd_training import MultiboxLoss 30from ssd_utils import BBoxUtility 31 32# some constants 33NUM_CLASSES = 21 34input_shape = (300, 300, 3) 35 36priors = pickle.load(open('prior_boxes_ssd300.pkl', 'rb')) 37bbox_util = BBoxUtility(NUM_CLASSES, priors) 38 39# Annotation 40#gt = pickle.load(open('gt_pascal.pkl', 'rb')) 41gt = pickle.load(open('PASCAL_VOC/VOC2012.pkl','rb')) 42keys = sorted(gt.keys()) 43num_train = int(round(0.8 * len(keys))) 44train_keys = keys[:num_train] 45val_keys = keys[num_train:] 46num_val = len(val_keys) 47 48class Generator(object): 49 def __init__(self, gt, bbox_util, 50 batch_size, path_prefix, 51 train_keys, val_keys, image_size, 52 saturation_var=0.5, 53 brightness_var=0.5, 54 contrast_var=0.5, 55 lighting_std=0.5, 56 hflip_prob=0.5, 57 vflip_prob=0.5, 58 do_crop=True, 59 crop_area_range=[0.75, 1.0], 60 aspect_ratio_range=[3./4., 4./3.]): 61 self.gt = gt 62 self.bbox_util = bbox_util 63 self.batch_size = batch_size 64 self.path_prefix = path_prefix 65 self.train_keys = train_keys 66 self.val_keys = val_keys 67 self.train_batches = len(train_keys) 68 self.val_batches = len(val_keys) 69 self.image_size = image_size 70 self.color_jitter = [] 71 if saturation_var: 72 self.saturation_var = saturation_var 73 self.color_jitter.append(self.saturation) 74 if brightness_var: 75 self.brightness_var = brightness_var 76 self.color_jitter.append(self.brightness) 77 if contrast_var: 78 self.contrast_var = contrast_var 79 self.color_jitter.append(self.contrast) 80 self.lighting_std = lighting_std 81 self.hflip_prob = hflip_prob 82 self.vflip_prob = vflip_prob 83 self.do_crop = do_crop 84 self.crop_area_range = crop_area_range 85 self.aspect_ratio_range = aspect_ratio_range 86 87 def grayscale(self, rgb): 88 return rgb.dot([0.299, 0.587, 0.114]) 89 90 def saturation(self, rgb): 91 gs = self.grayscale(rgb) 92 alpha = 2 * np.random.random() * self.saturation_var 93 alpha += 1 - self.saturation_var 94 rgb = rgb * alpha + (1 - alpha) * gs[:, :, None] 95 return np.clip(rgb, 0, 255) 96 97 def brightness(self, rgb): 98 alpha = 2 * np.random.random() * self.brightness_var 99 alpha += 1 - self.saturation_var 100 rgb = rgb * alpha 101 return np.clip(rgb, 0, 255) 102 103 def contrast(self, rgb): 104 gs = self.grayscale(rgb).mean() * np.ones_like(rgb) 105 alpha = 2 * np.random.random() * self.contrast_var 106 alpha += 1 - self.contrast_var 107 rgb = rgb * alpha + (1 - alpha) * gs 108 return np.clip(rgb, 0, 255) 109 110 def lighting(self, img): 111 cov = np.cov(img.reshape(-1, 3) / 255.0, rowvar=False) 112 eigval, eigvec = np.linalg.eigh(cov) 113 noise = np.random.randn(3) * self.lighting_std 114 noise = eigvec.dot(eigval * noise) * 255 115 img += noise 116 return np.clip(img, 0, 255) 117 118 def horizontal_flip(self, img, y): 119 if np.random.random() < self.hflip_prob: 120 img = img[:, ::-1] 121 y[:, [0, 2]] = 1 - y[:, [2, 0]] 122 return img, y 123 124 def vertical_flip(self, img, y): 125 if np.random.random() < self.vflip_prob: 126 img = img[::-1] 127 y[:, [1, 3]] = 1 - y[:, [3, 1]] 128 return img, y 129 130 def random_sized_crop(self, img, targets): 131 img_w = img.shape[1] 132 img_h = img.shape[0] 133 img_area = img_w * img_h 134 random_scale = np.random.random() 135 random_scale *= (self.crop_area_range[1] - 136 self.crop_area_range[0]) 137 random_scale += self.crop_area_range[0] 138 target_area = random_scale * img_area 139 random_ratio = np.random.random() 140 random_ratio *= (self.aspect_ratio_range[1] - 141 self.aspect_ratio_range[0]) 142 random_ratio += self.aspect_ratio_range[0] 143 w = np.round(np.sqrt(target_area * random_ratio)) 144 h = np.round(np.sqrt(target_area / random_ratio)) 145 if np.random.random() < 0.5: 146 w, h = h, w 147 w = min(w, img_w) 148 w_rel = w / img_w 149 w = int(w) 150 h = min(h, img_h) 151 h_rel = h / img_h 152 h = int(h) 153 x = np.random.random() * (img_w - w) 154 x_rel = x / img_w 155 x = int(x) 156 y = np.random.random() * (img_h - h) 157 y_rel = y / img_h 158 y = int(y) 159 img = img[y:y+h, x:x+w] 160 new_targets = [] 161 for box in targets: 162 cx = 0.5 * (box[0] + box[2]) 163 cy = 0.5 * (box[1] + box[3]) 164 if (x_rel < cx < x_rel + w_rel and 165 y_rel < cy < y_rel + h_rel): 166 xmin = (box[0] - x_rel) / w_rel 167 ymin = (box[1] - y_rel) / h_rel 168 xmax = (box[2] - x_rel) / w_rel 169 ymax = (box[3] - y_rel) / h_rel 170 xmin = max(0, xmin) 171 ymin = max(0, ymin) 172 xmax = min(1, xmax) 173 ymax = min(1, ymax) 174 box[:4] = [xmin, ymin, xmax, ymax] 175 new_targets.append(box) 176 new_targets = np.asarray(new_targets).reshape(-1, targets.shape[1]) 177 return img, new_targets 178 179 def generate(self, train=True): 180 while True: 181 if train: 182 shuffle(self.train_keys) 183 keys = self.train_keys 184 else: 185 shuffle(self.val_keys) 186 keys = self.val_keys 187 inputs = [] 188 targets = [] 189 for key in keys: 190 img_path = self.path_prefix + key 191 img = imread(img_path).astype('float32') 192 y = self.gt[key].copy() 193 if train and self.do_crop: 194 img, y = self.random_sized_crop(img, y) 195 #img = imresize(img, self.image_size).astype('float32') 196 #img = np.array(Image.fromarray(arr).resize(self.image_size,img)).astype('float32') 197 img = np.array(Image.fromarray((img * 255).astype(np.uint8)).resize(self.image_size)).astype('float32') 198 if train: 199 shuffle(self.color_jitter) 200 for jitter in self.color_jitter: 201 img = jitter(img) 202 if self.lighting_std: 203 img = self.lighting(img) 204 if self.hflip_prob > 0: 205 img, y = self.horizontal_flip(img, y) 206 if self.vflip_prob > 0: 207 img, y = self.vertical_flip(img, y) 208 y = self.bbox_util.assign_boxes(y) 209 inputs.append(img) 210 targets.append(y) 211 if len(targets) == self.batch_size: 212 tmp_inp = np.array(inputs) 213 tmp_targets = np.array(targets) 214 inputs = [] 215 targets = [] 216 yield preprocess_input(tmp_inp), tmp_targets 217 218#画像パス 219path_prefix = 'VOCdevkit/VOC2012/JPEGImages/' 220gen = Generator(gt, bbox_util, 4, path_prefix, 221 train_keys, val_keys, 222 (input_shape[0], input_shape[1]), do_crop=False) 223 224model = SSD300(input_shape, num_classes=NUM_CLASSES) 225model.load_weights('weights_SSD300.hdf5', by_name=True) 226 227#エラー発生部分 228model = SSD300(input_shape, num_classes=NUM_CLASSES) 229model.load_weights('weights_SSD300.hdf5', by_name=True) 230 231
試したこと
・hdf5ファイルのパスの確認(ipynbファイルと同じディレクトリに置いてあります。)
・modelがNoneのため、model = SSD300の引数の確認
補足情報(FW/ツールのバージョンなど)
実行環境
Windows10
python 3.7.6
Tensorflow 2.2.0
初めての質問投稿になります。
不足している点、記載すべき点等あればご教授いただけると幸いです。
追記
コード(文字数制限によりエラー部分以降除く)追記
エラーメッセージ全文追記
コードに関しては以下を参考にダウンロード等を行いました。
https://qiita.com/slowsingle/items/64cc927bb29a49a7af14
また、KerasがTensorflow v2より同梱さえたことによるコード書き換えは以下を参考にしました。
https://techblog.cccmk.co.jp/entry/2020/05/11/094834
データセットはVOC2012を用いています。
あなたの回答
tips
プレビュー