前提
CNNでcifar-10のデータセットを用いて精度を出したい。ネットの記事を参考にして実行してみたがエラーが出てしまいます。
実現したいこと
エラーがでないようにしたい。
発生している問題・エラーメッセージ
Traceback (most recent call last):
File "/home/workspace/username/VGG.py", line 107, in <module>
plt.plot(accs, label='train acc')
File "/home/anaconda3/envs//lib/python3.10/site-packages/matplotlib/pyplot.py", line 2730, in plot
return gca().plot(
File "/home/anaconda3/envs/lib/python3.10/site-packages/matplotlib/axes/_axes.py", line 1662, in plot
lines = [*self._get_lines(*args, data=data, **kwargs)]
File "/home/anaconda3/envs/lib/python3.10/site-packages/matplotlib/axes/_base.py", line 311, in call
yield from self._plot_args(
File "/home/anaconda3/envs/lib/python3.10/site-packages/matplotlib/axes/_base.py", line 496, in _plot_args
x, y = index_of(xy[-1])
File "/home/anaconda3/envs/lib/python3.10/site-packages/matplotlib/cbook/init.py", line 1690, in index_of
y = _check_1d(y)
File "/home/anaconda3/envs/lib/python3.10/site-packages/matplotlib/cbook/init.py", line 1382, in _check_1d
return np.atleast_1d(x)
File "<array_function internals>", line 180, in atleast_1d
File "/home/anaconda3/envs/lib/python3.10/site-packages/numpy/core/shape_base.py", line 65, in atleast_1d
ary = asanyarray(ary)
File "/home/anaconda3/envs/lib/python3.10/site-packages/torch/_tensor.py", line 757, in array
return self.numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
該当のソースコード
python
1ソースコード 2import torch 3import torchvision 4import torch.nn as nn 5import torch.optim as optim 6import numpy as np 7from torchvision import datasets, transforms 8import matplotlib.pyplot as plt 9from torch.utils.data import DataLoader 10 11DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' 12 13 14 15transform = transforms.Compose([ 16 transforms.ToTensor(), 17 transforms.Normalize((0.5, ), (0.5,)) 18]) 19 20train_dataset = datasets.CIFAR10(root="./data", train=True, download=True, transform=transform) 21validation_dataset = datasets.CIFAR10(root="./data", train=False, download=True, transform=transform) 22 23train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True) 24validation_dataloader = DataLoader(validation_dataset, batch_size=32, shuffle=False) 25 26class CNN(nn.Module): 27 def __init__(self, num_classes): 28 super().__init__() 29 self.features = nn.Sequential( 30 nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5, padding=2), 31 nn.ReLU(inplace=True), 32 nn.MaxPool2d(kernel_size=2), 33 nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), 34 nn.ReLU(inplace=True), 35 nn.MaxPool2d(kernel_size=2), 36 nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), 37 nn.ReLU(inplace=True), 38 nn.MaxPool2d(kernel_size=2), 39 nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, padding=1), 40 nn.ReLU(inplace=True), 41 ) 42 self.classifier = nn.Linear(in_features=4 * 4 * 128, out_features=num_classes) 43 44 def forward(self, x): 45 x = self.features(x) 46 x = x.view(x.size(0), -1) 47 x = self.classifier(x) 48 return x 49 50 51model = CNN(10) 52model.to(DEVICE) 53 54criterion = nn.CrossEntropyLoss() 55optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) 56 57num_epocs = 15 58losses = [] 59accs = [] 60val_losses = [] 61val_accs = [] 62for epoch in range(num_epocs): 63 # 学習 64 running_loss = 0.0 65 running_acc = 0.0 66 for imgs, labels in train_dataloader: 67 imgs = imgs.to(DEVICE) 68 labels = labels.to(DEVICE) 69 optimizer.zero_grad() 70 output = model(imgs) 71 loss = criterion(output, labels) 72 loss.backward() 73 running_loss += loss.item() 74 pred = torch.argmax(output, dim=1) 75 running_acc += torch.mean(pred.eq(labels).float()) 76 optimizer.step() 77 running_loss /= len(train_dataloader) 78 running_acc /= len(train_dataloader) 79 losses.append(running_loss) 80 accs.append(running_acc) 81 82 # 検証 83 val_running_loss = 0.0 84 val_running_acc = 0.0 85 for val_imgs, val_labels in validation_dataloader: 86 val_imgs = val_imgs.to(DEVICE) 87 val_labels = val_labels.to(DEVICE) 88 val_output = model(val_imgs) 89 val_loss = criterion(val_output, val_labels) 90 val_running_loss += val_loss.item() 91 val_pred = torch.argmax(val_output, dim=1) 92 val_running_acc += torch.mean(val_pred.eq(val_labels).float()) 93 val_running_loss /= len(validation_dataloader) 94 val_running_acc /= len(validation_dataloader) 95 val_losses.append(val_running_loss) 96 val_accs.append(val_running_acc) 97 print("epoch: {}, loss: {}, acc: {} " \ 98 "val_epoch: {}, val_loss: {}, val_acc: {}".format(epoch, running_loss, running_acc, epoch, val_running_loss, val_running_acc)) 99 100 101 102plt.style.use('ggplot') 103plt.plot(losses, label='train loss') 104plt.plot(val_losses, label='validation loss') 105plt.legend() 106 107plt.style.use('ggplot') 108plt.plot(accs, label='train acc') 109plt.plot(val_accs, label='validation acc') 110plt.legend() 111 112### 試したこと 113 114色々と調べてみたのですが人によって解決策が違うためわかりません。 115 116### 補足情報(FW/ツールのバージョンなど) 117 118ここにより詳細な情報を記載してください。
