efficientnet実装を試みているものです。
ようやく学習のところまでたどり着いたのですが、下記のerrorが出力され対応に苦慮しています。
Errorから2つの要素の入力が要求されているのに、1つしか入力されていないといった意味だと理解していますが、それが何のことなのか判然としません。ご教示いただけますでしょうか。
発生している問題・エラーメッセージ
(envtest2) C:\Users\orala\Desktop\new>python efficientnet.py Using TensorFlow backend. 2021-05-23 18:12:00.241804: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_101.dll 710 40 Found 710 images belonging to 2 classes. Found 40 images belonging to 2 classes. 0%| | 0/15 [00:00<?, ?it/s] Traceback (most recent call last): File "efficientnet.py", line 216, in <module> for img, label in tqdm.auto.tqdm(train_dir): ValueError: not enough values to unpack (expected 2, got 1)
該当のソースコード
python
1import os 2import random 3import glob 4from keras.preprocessing.image import ImageDataGenerator 5from keras.models import Sequential, Model 6from keras.layers import Input, Flatten, Dense 7from keras import optimizers 8from keras.callbacks import ModelCheckpoint, EarlyStopping 9from keras import backend 10import torch 11import torch.nn as nn 12import torch.nn.functional as F 13import torchvision 14import torchvision.transforms as transforms 15import torchvision.datasets as datasets 16import tqdm 17import matplotlib.pyplot as plt 18 19 20classes = ['cancer','normal'] 21nb_classes = len(classes) 22 23#train val dir 24train_dir = './dataset/train' 25val_dir = './dataset/valid' 26model_dir = './saved_model' 27 28#num samples 29train_samples = glob.glob(train_dir + '/*/*.jpg') 30val_samples = glob.glob(val_dir + '/*/*.jpg') 31train_samples = len(train_samples) 32val_samples = len(val_samples) 33print(train_samples) 34print(val_samples) 35 36#img size 37img_w, img_h = 224,224 38 39#keras image data generator 40train_datagen = ImageDataGenerator(rescale = 1.0/255, zoom_range=0.2,horizontal_flip=True) 41val_datagen = ImageDataGenerator(rescale=1.0 / 255) 42 43train_generator = train_datagen.flow_from_directory( 44 train_dir, 45 target_size=(img_w,img_h), 46 color_mode = 'rgb', 47 classes = classes, 48 class_mode = 'categorical', 49 batch_size = 5 50) 51 52val_generator = val_datagen.flow_from_directory( 53 val_dir, 54 target_size=(img_w,img_h), 55 color_mode = 'rgb', 56 classes = classes, 57 class_mode = 'categorical', 58 batch_size = 5 59) 60 61#TO save model 62checkpoint = ModelCheckpoint( 63 filepath = os.path.join( 64 model_dir, 65 'model_16.hdf5' 66 ), 67 save_best_only=True 68) 69 70#TO early stopping 71early_stopping = EarlyStopping(monitor='val_loss',patience=3,verbose=0,mode='auto') 72 73### model ### 74class Swish(nn.Module): 75 """x*sigmoid(x)""" 76 def __init__(self): 77 super().__init__() 78 def forward(self, x): 79 return x*torch.sigmoid(x) 80 81class DWBlock(nn.Module): 82 # 出力が同じサイズになるようにパディングはk//2 (ただしkは奇数) 83 def __init__(self, in_c, k, s, p, bias=False): 84 if p != k//2: 85 print("output may not be the same spatial size as input") 86 super().__init__() 87 self.dw = nn.Conv2d(in_c, in_c, kernel_size=k, stride=s, padding=p, groups=in_c, bias=bias) 88 self.bn = nn.BatchNorm2d(in_c) 89 self.act = Swish() 90 91 def forward(self, x): 92 out = self.act(self.bn(self.dw(x))) 93 return out 94 95class PWBlock(nn.Module): 96 def __init__(self, in_c, out_c, bias=False, act="swish"): 97 super().__init__() 98 self.dw = nn.Conv2d(in_c, out_c, kernel_size=1, bias=bias) 99 self.bn = nn.BatchNorm2d(out_c) 100 if act=='swish': 101 self.act = Swish() 102 elif act is None: 103 self.act = nn.Identity() 104 105 def forward(self, x): 106 out = self.act(self.bn(self.dw(x))) 107 return out 108 109class SEBlock(nn.Module): 110 def __init__(self, in_c, h=8): 111 super().__init__() 112 # Squeeze 113 self.gap = nn.AdaptiveAvgPool2d(1) 114 # Excitation 115 self.fc1 = nn.Linear(in_c, in_c//h, bias=False) 116 self.act1 = Swish() 117 self.fc2 = nn.Linear(in_c//h, in_c, bias=False) 118 self.act2 = nn.Sigmoid() 119 120 def forward(self, x): 121 out = self.gap(x).squeeze(-1).squeeze(-1) 122 out = self.act1(self.fc1(out)) 123 out = self.act2(self.fc2(out)).unsqueeze(-1).unsqueeze(-1) 124 return out*x 125 126class MBConv(nn.Module): 127 def __init__(self, in_c, out_c, k=5, s=1, expansion=1): 128 super().__init__() 129 self.s = s 130 self.in_c = in_c 131 self.out_c = out_c 132 self.pw1 = PWBlock(in_c, in_c*expansion, bias=False) 133 self.dw = DWBlock(in_c*expansion, k=k, s=s, p=k//2, bias=False) 134 self.se = SEBlock(in_c*expansion) 135 self.pw2 = PWBlock(in_c*expansion, out_c, bias=False, act=None) 136 137 def forward(self, x): 138 out = self.pw2(self.se(self.dw(self.pw1(x)))) 139 if self.s == 1 and self.in_c==self.out_c: 140 out = out+x 141 return out 142 143class EfficientNetB0(nn.Module): 144 def __init__(self, n_c=3, n_classes=10): 145 super().__init__() 146 self.first = nn.Sequential( 147 nn.Conv2d(n_c,32,3,1,1,bias=False), 148 nn.BatchNorm2d(32), 149 Swish() 150 ) 151 self.mb1 = MBConv(32, 16, 3, expansion=1) 152 self.mb6_1 = nn.Sequential( 153 MBConv(16, 24, k=3, s=1, expansion=6), 154 MBConv(24, 24, k=3, s=1, expansion=6) 155 ) 156 self.mb6_2 = nn.Sequential( 157 MBConv(24, 40, k=5, s=1, expansion=6), 158 MBConv(40, 40, k=5, s=1, expansion=6) 159 ) 160 #画像サイズ:32->16 161 self.mb6_3 = nn.Sequential( 162 MBConv(40, 80, k=3, s=2, expansion=6), 163 MBConv(80, 80, k=3, s=1, expansion=6), 164 MBConv(80, 80, k=3, s=1, expansion=6), 165 ) 166 self.mb6_4 = nn.Sequential( 167 MBConv(80, 112, k=5, s=1, expansion=6), 168 MBConv(112, 112, k=5, s=1, expansion=6), 169 MBConv(112, 112, k=5, s=1, expansion=6), 170 ) 171 #画像サイズ: 16->8 172 self.mb6_5 = nn.Sequential( 173 MBConv(112, 192, k=5, s=2, expansion=6), 174 MBConv(192, 192, k=5, s=1, expansion=6), 175 MBConv(192, 192, k=5, s=1, expansion=6), 176 MBConv(192, 192, k=5, s=1, expansion=6), 177 ) 178 179 self.mb6_6 = nn.Sequential( 180 MBConv(192, 320, k=3, s=1, expansion=6), 181 ) 182 self.pw = PWBlock(320, 1280) 183 self.gap = nn.AdaptiveAvgPool2d(1) 184 self.dropout = nn.Dropout(0.2) 185 self.fc = nn.Linear(1280, n_classes) 186 187 def forward(self, x): 188 out = self.first(x) 189 out = self.mb1(out) 190 out = self.mb6_1(out) 191 out = self.mb6_2(out) 192 out = self.mb6_3(out) 193 out = self.mb6_4(out) 194 out = self.mb6_5(out) 195 out = self.mb6_6(out) 196 out = self.pw(out) 197 out = self.gap(out).view(x.size(0), -1) 198 out = self.dropout(out) 199 out = self.fc(out) 200 return out 201 202net = EfficientNetB0(n_c=3, n_classes=10) 203criterion = nn.CrossEntropyLoss() 204optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4) 205scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[25, 40], gamma=0.1) 206 207#train 208history = {'loss':[], 'acc':[], 'val_loss':[], 'val_acc':[]} 209epochs = 50 210device ='cuda:0' if torch.cuda.is_available() else 'cpu' 211net = net.to(device) 212for epoch in range(epochs): 213 net.train() 214 epoch_acc, epoch_loss = 0., 0. 215 num_imgs = 0. 216 for img, label in tqdm.auto.tqdm(train_dir): 217 img, label = img.to(device), label.to(device) 218 optimizer.zero_grad() 219 output = net(img) 220 loss = criterion(output, label) 221 loss.backward() 222 optimizer.step() 223 224 epoch_loss += loss.data 225 epoch_acc += sum(label == output.argmax(0)) 226 num_imgs += img.size(0)*1.0 # floatにするため1.0を掛けている。 227 228 scheduler.step() 229 epoch_loss /= num_imgs 230 epoch_acc /= num_imgs 231 history['loss'].append(epoch_loss) 232 history['acc'].append(epoch_acc) 233 print(f'Epoch: {epoch}, loss:{epoch_loss}, acc:{epoch_acc}') 234 235 net.eval() 236 epoch_acc, epoch_loss = 0., 0. 237 num_imgs = 0. 238 for img, label in tqdm.auto.tqdm(test_dir): 239 img, label = img.to(device), label.to(device) 240 with torch.no_grad(): 241 output = net(img) 242 loss = criterion(output, label) 243 244 epoch_loss += loss.data 245 epoch_acc += sum(label == output.argmax(0)) 246 num_imgs += img.size(0)*1.0 # floatにするため1.0を掛けている。 247 248 epoch_loss /= num_imgs 249 epoch_acc /= num_imgs 250 history['val_loss'].append(epoch_loss) 251 history['val_acc'].append(epoch_acc) 252 print(f'Epoch: {epoch}, val_loss:{epoch_loss}, val_acc:{epoch_acc}') 253 254 255x = list(range(epochs)) 256plt.figure() 257plt.subplot(1,2,1) 258plt.title('Loss') 259plt.plot(x, history['loss'], 'bo') 260plt.plot(x, history['val_loss'], 'b') 261 262plt.subplot(1,2,2) 263plt.title('Acc') 264plt.plot(x, history['acc'], 'ro') 265plt.plot(x, history['val_acc'], 'r')
バッドをするには、ログインかつ
こちらの条件を満たす必要があります。
2021/05/24 02:24
2021/05/24 16:32
2021/05/27 06:51 編集