前提・実現したいこと
Python初心者です。
独自のデータを指定しデータを読み込みたいのですが、エラーの修正方法がわかりません。
"AlexNet.py"で実行しており、
データは"ADNI2"から読みこむことはできています。
欠けている情報ございましたら追加いたします。
間違っている箇所あれば指摘していただけると幸いです。
発生している問題・エラーメッセージ
$ python AlexNet.py Traceback (most recent call last): File "AlexNet.py", line 91, in <module> for i, (images, labels) in enumerate(train_loader): File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 582, in __next__ return self._process_next_batch(batch) File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/dataloader.py", line 608, in _process_next_batch raise batch.exc_type(batch.exc_msg) TypeError: Traceback (most recent call last): File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/_utils/worker.py", line 99, in _worker_loop samples = collate_fn([dataset[i] for i in batch_indices]) File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 63, in default_collate return {key: default_collate([d[key] for d in batch]) for key in batch[0]} File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 63, in <dictcomp> return {key: default_collate([d[key] for d in batch]) for key in batch[0]} File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/_utils/collate.py", line 70, in default_collate raise TypeError((error_msg_fmt.format(type(batch[0])))) TypeError: batch must contain tensors, numbers, dicts or lists; found <class 'pathlib.PosixPath'>
該当のソースコード
AlexNet.py
Python
1import torch 2import torchvision 3import torch.nn as nn 4import torch.nn.init as init 5import torch.optim as optim 6import torch.nn.functional as F 7import torchvision.transforms as transforms 8import numpy as np 9from matplotlib import pyplot as plt 10 11import dataset 12from torchvision import transforms 13 14dataset = dataset.load_data(['ADNI2']) 15transform = transforms.Compose([transforms.ToTensor()]) 16 17n_train = int(len(dataset) * 0.8) 18n_test = int(len(dataset) - n_train) 19 20train_dataset, test_dataset = torch.utils.data.random_split( 21 dataset, [n_train, n_test] 22) 23 24# set data loader 25train_loader = torch.utils.data.DataLoader( 26 dataset=train_dataset, 27 batch_size=64, 28 shuffle=True, 29 num_workers=4) 30 31test_loader = torch.utils.data.DataLoader( 32 dataset=test_dataset, 33 batch_size=64, 34 shuffle=False, 35 num_workers=4) 36 37class AlexNet(nn.Module): 38 39 def __init__(self, num_classes): 40 super(AlexNet, self).__init__() 41 self.features = nn.Sequential( 42 nn.Conv2d(3, 64, kernel_size=3, padding=1), 43 nn.ReLU(inplace=True), 44 nn.MaxPool2d(kernel_size=2, stride=2), 45 nn.Conv2d(64, 192, kernel_size=5, padding=2), 46 nn.ReLU(inplace=True), 47 nn.MaxPool2d(kernel_size=2, stride=2), 48 nn.Conv2d(192, 384, kernel_size=3, padding=1), 49 nn.ReLU(inplace=True), 50 nn.Conv2d(384, 256, kernel_size=3, padding=1), 51 nn.ReLU(inplace=True), 52 nn.Conv2d(256, 256, kernel_size=3, padding=1), 53 nn.ReLU(inplace=True), 54 nn.MaxPool2d(kernel_size=2, stride=2), 55 ) 56 self.classifier = nn.Sequential( 57 nn.Dropout(), 58 nn.Linear(256 * 4 * 4, 4096), 59 nn.ReLU(inplace=True), 60 nn.Dropout(), 61 nn.Linear(4096, 4096), 62 nn.ReLU(inplace=True), 63 nn.Linear(4096, num_classes), 64 ) 65 66 def forward(self, x): 67 x = self.features(x) 68 x = x.view(x.size(0), 256 * 4 * 4) 69 x = self.classifier(x) 70 return x 71 72# select device 73num_classes = 4 74device = 'cuda' if torch.cuda.is_available() else 'cpu' 75net = AlexNet(num_classes).to(device) 76 77# optimizing 78criterion = nn.CrossEntropyLoss() 79optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) 80 81# training 82num_epochs = 20 83train_loss_list, train_acc_list, val_loss_list, val_acc_list = [], [], [], [] 84 85### training 86for epoch in range(num_epochs): 87 train_loss, train_acc, val_loss, val_acc = 0, 0, 0, 0 88 89 # ====== train_mode ====== 90 net.train() 91 for i, (images, labels) in enumerate(train_loader): 92 images, labels = images.to(device), labels.to(device) 93 optimizer.zero_grad() 94 outputs = net(images) 95 loss = criterion(outputs, labels) 96 train_loss += loss.item() 97 train_acc += (outputs.max(1)[1] == labels).sum().item() 98 loss.backward() 99 optimizer.step() 100 101 avg_train_loss = train_loss / len(train_loader.dataset) 102 avg_train_acc = train_acc / len(train_loader.dataset) 103 104 # ====== val_mode ====== 105 net.eval() 106 with torch.no_grad(): 107 for images, labels in test_loader: 108 images = images.to(device) 109 labels = labels.to(device) 110 outputs = net(images) 111 loss = criterion(outputs, labels) 112 val_loss += loss.item() 113 val_acc += (outputs.max(1)[1] == labels).sum().item() 114 avg_val_loss = val_loss / len(test_loader.dataset) 115 avg_val_acc = val_acc / len(test_loader.dataset) 116 117 print ('Epoch [{}/{}], Loss: {loss:.4f}, val_loss: {val_loss:.4f}, val_acc: {val_acc:.4f}' 118 .format(epoch+1, num_epochs, i+1, loss=avg_train_loss, val_loss=avg_val_loss, val_acc=avg_val_acc)) 119 train_loss_list.append(avg_train_loss) 120 train_acc_list.append(avg_train_acc) 121 val_loss_list.append(avg_val_loss) 122 val_acc_list.append(avg_val_acc) 123 124 125# plot graph 126plt.figure() 127plt.plot(range(num_epochs), train_loss_list, color='blue', linestyle='-', label='train_loss') 128plt.plot(range(num_epochs), val_loss_list, color='green', linestyle='--', label='val_loss') 129plt.legend() 130plt.xlabel('epoch') 131plt.ylabel('loss') 132plt.title('Training and validation loss') 133plt.grid() 134plt.show() 135 136plt.figure() 137plt.plot(range(num_epochs), train_acc_list, color='blue', linestyle='-', label='train_acc') 138plt.plot(range(num_epochs), val_acc_list, color='green', linestyle='--', label='val_acc') 139plt.legend() 140plt.xlabel('epoch') 141plt.ylabel('acc') 142plt.title('Training and validation accuracy') 143plt.grid() 144plt.show()
あなたの回答
tips
プレビュー