前提・実現したいこと
Python初心者です。
独自のデータセット'/home/selen/downloads/'を用いて
PyTorchでファインチューニングを行いたいと思っています。
発生している問題・エラーメッセージ
Traceback (most recent call last):
File "Arashi/arashi.py", line 52, in <module>
datasets = torch.utils.data.TensorDataset(img_datas, labels)
File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/dataset.py", line 36, in init
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
File "/home/selen/.pyenv/versions/3.7.3/lib/python3.7/site-packages/torch/utils/data/dataset.py", line 36, in <genexpr>
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
AttributeError: 'list' object has no attribute 'size'
該当のソースコード
import torch, torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from PIL import Image
import glob
fold_path = '/home/selen/downloads/'
imgs = []
for imgs_path in glob.glob(fold_path + ''):
imgs.append(glob.glob(imgs_path + '/'))
from torchvision.models import resnet18
resnet = resnet18(pretrained=True)
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
labels = []
img_datas = torch.tensor([])
for i,imgs_arr in enumerate(imgs):
for img_path in imgs_arr: labels.append(i) img = Image.open(img_path) tensor_img = transform(img) tensor_img = tensor_img.unsqueeze(0) img_datas = torch.cat([img_datas, tensor_img],dim=0)
datasets = torch.utils.data.TensorDataset(img_datas, labels)
n_train = int(len(datasets) * 0.85)
n_val = len(datasets) - n_train
torch.manual_seed(0)
train,val = torch.utils.data.random_split(datasets,[n_train,n_val])
class TrainNet(pl.LightningModule):
@pl.data_loader
def train_dataloader(self):
return torch.utils.data.DataLoader(train, self.batch_size,shuffle=True)
def training_step(self, batch, batch_nb): x, t = batch y = self.forward(x) loss = self.lossfun(y, t) results = {'loss': loss} return results
class ValidationNet(pl.LightningModule):
@pl.data_loader def val_dataloader(self): return torch.utils.data.DataLoader(val, self.batch_size) def validation_step(self, batch, batch_nb): x, t = batch y = self.forward(x) loss = self.lossfun(y, t) y_label = torch.argmax(y, dim=1) acc = torch.sum(t == y_label) * 1.0 / len(t) results = {'val_loss': loss, 'val_acc': acc} return results def validation_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean() results = {'val_loss': avg_loss, 'val_acc': avg_acc} return results
class FineTuningNet(TrainNet, ValidationNet):
def __init__(self, batch_size=256): super().__init__() self.batch_size = batch_size self.conv = resnet18(pretrained=True) self.fc1 = nn.Linear(1000, 100) self.fc2 = nn.Linear(100, 5) for param in self.conv.parameters(): param.requires_grad = False def lossfun(self, y, t): return F.cross_entropy(y, t) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01) def forward(self, x): x = self.conv(x) x = self.fc1(x) x = F.relu(x) x = self.fc2(x) return x
### 試したこと いろいろ調べてみましたがよくわかりません。 間違っている箇所あれば指摘していただけますと幸いです。
回答1件
あなたの回答
tips
プレビュー