質問編集履歴

1

修正・追加依頼に基づく改変

2022/01/03 20:18

投稿

k_0fall
k_0fall

スコア7

test CHANGED
File without changes
test CHANGED
@@ -14,338 +14,266 @@
14
14
 
15
15
  Traceback (most recent call last):
16
16
 
17
- File "/Users/○○○○○/Desktop/●●_data/cnn.py", line 150, in <module>
17
+ File "/Users/○○○/Desktop/△△△_data/cnn.py", line 125, in <module>
18
+
19
+ train_loss_list, test_loss_list = run(30, optimizer, criterion, device)
20
+
21
+ File "/Users/○○○/Desktop/△△△_data/cnn.py", line 73, in run
22
+
23
+ train_loss = train_epoch(model, optimizer, criterion, train_loader, device)
24
+
25
+ File "/Users/○○○/Desktop/△△△_data/cnn.py", line 46, in train_epoch
26
+
27
+ outputs = model(images)
28
+
29
+ File "/usr/local/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
30
+
31
+ return forward_call(*input, **kwargs)
32
+
33
+ File "/Users/○○○/Desktop/△△△_data/cnn.py", line 33, in forward
34
+
35
+ x = x.view(-1, 16*5*5)
36
+
37
+ RuntimeError: shape '[-1, 400]' is invalid for input of size 2080832
38
+
39
+ ```
40
+
41
+
42
+
43
+ ### 該当のソースコード
44
+
45
+
46
+
47
+ ```python3
48
+
49
+ from pathlib import Path
50
+
51
+
52
+
53
+ from torch.utils.data import DataLoader, Dataset
54
+
55
+ from torchvision import transforms
56
+
57
+ from PIL import Image
58
+
59
+
60
+
61
+ import torch
62
+
63
+ import torch.nn as nn
64
+
65
+ import torch.nn.functional as F
66
+
67
+ import torch.optim as optim
68
+
69
+ import torchvision.transforms as transforms
70
+
71
+
72
+
73
+ import matplotlib.pyplot as plt
74
+
75
+
76
+
77
+ from torchvision.datasets import ImageFolder
78
+
79
+
80
+
81
+ class Net(nn.Module):
82
+
83
+ def __init__(self):
84
+
85
+ super().__init__()
86
+
87
+ self.conv1 = nn.Conv2d(3, 6, 5) # 畳み込み層:(入力チャンネル数, フィルタ数、フィルタサイズ)
88
+
89
+ # 出力画像サイズ28
90
+
91
+ self.pool = nn.MaxPool2d(2, 2) # プーリング層:(領域のサイズ, ストライド)
92
+
93
+ # 出力画像サイズ14
94
+
95
+ self.conv2 = nn.Conv2d(6, 16, 5)
96
+
97
+ # 出力画像サイズ5
98
+
99
+ self.fc1 = nn.Linear(16*5*5, 256) # 全結合層
100
+
101
+ self.dropout = nn.Dropout(p=0.5) # ドロップアウト:(p=ドロップアウト率)
102
+
103
+ self.fc2 = nn.Linear(256, 10)
104
+
105
+
106
+
107
+ def forward(self, x):
108
+
109
+ x = self.pool(F.relu(self.conv1(x)))
110
+
111
+ x = self.pool(F.relu(self.conv2(x)))
112
+
113
+ x = x.view(-1, 16*5*5)
114
+
115
+ x = F.relu(self.fc1(x))
116
+
117
+ x = self.dropout(x)
118
+
119
+ x = self.fc2(x)
120
+
121
+ return x
122
+
123
+
124
+
125
+
126
+
127
+ def train_epoch(model, optimizer, criterion, dataloader, device):
128
+
129
+ train_loss = 0
130
+
131
+ model.train()
132
+
133
+ for i, (images, labels) in enumerate(dataloader):
134
+
135
+ images, labels = images.to(device), labels.to(device)
136
+
137
+ optimizer.zero_grad()
138
+
139
+ outputs = model(images)
140
+
141
+ loss = criterion(outputs, labels)
142
+
143
+ loss.backward()
144
+
145
+ optimizer.step()
146
+
147
+ train_loss += loss.item()
148
+
149
+ train_loss = train_loss / len(train_loader.dataset)
150
+
151
+ return train_loss
152
+
153
+
154
+
155
+
156
+
157
+ def inference(model, optimizer, criterion, dataloader, device):
158
+
159
+ model.eval()
160
+
161
+ test_loss=0
162
+
163
+
164
+
165
+ with torch.no_grad():
166
+
167
+ for i, (images, labels) in enumerate(test_loader):
168
+
169
+ images, labels = images.to(device), labels.to(device)
170
+
171
+ outputs = model(images)
172
+
173
+ loss = criterion(outputs, labels)
174
+
175
+ test_loss += loss.item()
176
+
177
+ test_loss = test_loss / len(test_loader.dataset)
178
+
179
+ return test_loss
180
+
181
+
182
+
183
+
184
+
185
+ def run(num_epochs, optimizer, criterion, device):
186
+
187
+ train_loss_list = []
188
+
189
+ test_loss_list = []
190
+
191
+ for epoch in range(num_epochs):
192
+
193
+ train_loss = train_epoch(model, optimizer, criterion, train_loader, device)
194
+
195
+ test_loss = inference(model, optimizer, criterion, test_loader, device)
196
+
197
+
198
+
199
+ print(f'Epoch [{epoch+1}], train_Loss : {train_loss:.4f}, test_Loss : {test_loss:.4f}')
200
+
201
+ train_loss_list.append(train_loss)
202
+
203
+ test_loss_list.append(test_loss)
204
+
205
+ return train_loss_list, test_loss_list
206
+
207
+
208
+
209
+
210
+
211
+ if __name__ == '__main__':
212
+
213
+ # Transform を作成する。
214
+
215
+ transform = transforms.Compose([transforms.Resize(256), transforms.ToTensor()])
216
+
217
+ # Dataset を作成する。
218
+
219
+ dataset = ImageFolder('/Users/○○○/Desktop/△△△_data/□□□_dataset', transform)
220
+
221
+ # DataLoader を作成する。
222
+
223
+ dataloader = DataLoader(dataset, batch_size=3)
224
+
225
+
226
+
227
+ #for batch in dataloader:
228
+
229
+ # print(batch.shape)
230
+
231
+
232
+
233
+ # グラフのスタイルを指定
234
+
235
+ plt.style.use('seaborn-darkgrid')
236
+
237
+
238
+
239
+ # 正規化
240
+
241
+ normalize = transforms.Normalize(mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0))
242
+
243
+ # Tensor化
244
+
245
+ to_tensor = transforms.ToTensor()
246
+
247
+
248
+
249
+ train_ratio = 0.8
250
+
251
+ train_size = int(train_ratio * len(dataset))
252
+
253
+ val_size = len(dataset) - train_size
254
+
255
+ data_size = {"train":train_size, "val":val_size}
256
+
257
+ train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
258
+
259
+
260
+
261
+ transform_train = transforms.Compose([to_tensor, normalize])
262
+
263
+ transform_test = transforms.Compose([to_tensor, normalize])
264
+
265
+
266
+
267
+ batch_size = 64
18
268
 
19
269
  train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
20
270
 
21
- File "/usr/local/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 268, in __init__
22
-
23
- sampler = RandomSampler(dataset, generator=generator)
24
-
25
- File "/usr/local/lib/python3.9/site-packages/torch/utils/data/sampler.py", line 102, in __init__
26
-
27
- raise ValueError("num_samples should be a positive integer "
28
-
29
- ValueError: num_samples should be a positive integer value, but got num_samples=0
30
-
31
- ```
32
-
33
-
34
-
35
- ### 該当のソースコード
36
-
37
-
38
-
39
- ```python3
40
-
41
- from pathlib import Path
42
-
43
-
44
-
45
- from torch.utils.data import DataLoader, Dataset
46
-
47
- from torchvision import transforms
48
-
49
- from PIL import Image
50
-
51
-
52
-
53
- import torch
54
-
55
- import torch.nn as nn
56
-
57
- import torch.nn.functional as F
58
-
59
- import torch.optim as optim
60
-
61
- import torchvision
62
-
63
- import torchvision.transforms as transforms
64
-
65
-
66
-
67
- import numpy as np
68
-
69
- import matplotlib.pyplot as plt
70
-
71
-
72
-
73
-
74
-
75
- class ImageFolder(Dataset):
76
-
77
- IMG_EXTENSIONS = [".jpg", ".jpeg", ".png", ".bmp"]
78
-
79
-
80
-
81
- def __init__(self, img_dir, transform=None):
82
-
83
- # 画像ファイルのパス一覧を取得する。
84
-
85
- self.img_paths = self._get_img_paths(img_dir)
86
-
87
- self.transform = transform
88
-
89
-
90
-
91
- def __getitem__(self, index):
92
-
93
- path = self.img_paths[index]
94
-
95
-
96
-
97
- # 画像を読み込む。
98
-
99
- img = Image.open(path)
100
-
101
-
102
-
103
- if self.transform is not None:
104
-
105
- # 前処理がある場合は行う。
106
-
107
- img = self.transform(img)
108
-
109
-
110
-
111
- return img
112
-
113
-
114
-
115
- def _get_img_paths(self, img_dir):
116
-
117
- """指定したディレクトリ内の画像ファイルのパス一覧を取得する。
118
-
119
- """
120
-
121
- img_dir = Path(img_dir)
122
-
123
- img_paths = [
124
-
125
- p for p in img_dir.iterdir() if p.suffix in ImageFolder.IMG_EXTENSIONS
126
-
127
- ]
128
-
129
-
130
-
131
- return img_paths
132
-
133
-
134
-
135
- def __len__(self):
136
-
137
- """ディレクトリ内の画像ファイルの数を返す。
138
-
139
- """
140
-
141
- return len(self.img_paths)
142
-
143
-
144
-
145
-
146
-
147
- class Net(nn.Module):
148
-
149
- def __init__(self):
150
-
151
- super().__init__()
152
-
153
- self.conv1 = nn.Conv2d(3, 6, 5) # 畳み込み層:(入力チャンネル数, フィルタ数、フィルタサイズ)
154
-
155
- # 出力画像サイズ28
156
-
157
- self.pool = nn.MaxPool2d(2, 2) # プーリング層:(領域のサイズ, ストライド)
158
-
159
- # 出力画像サイズ14
160
-
161
- self.conv2 = nn.Conv2d(6, 16, 5)
162
-
163
- # 出力画像サイズ5
164
-
165
- self.fc1 = nn.Linear(16*5*5, 256) # 全結合層
166
-
167
- self.dropout = nn.Dropout(p=0.5) # ドロップアウト:(p=ドロップアウト率)
168
-
169
- self.fc2 = nn.Linear(256, 10)
170
-
171
-
172
-
173
- def forward(self, x):
174
-
175
- x = self.pool(F.relu(self.conv1(x)))
176
-
177
- x = self.pool(F.relu(self.conv2(x)))
178
-
179
- x = x.view(-1, 16*5*5)
180
-
181
- x = F.relu(self.fc1(x))
182
-
183
- x = self.dropout(x)
184
-
185
- x = self.fc2(x)
186
-
187
- return x
188
-
189
-
190
-
191
-
192
-
193
- def train_epoch(model, optimizer, criterion, dataloader, device):
194
-
195
- train_loss = 0
196
-
197
- model.train()
198
-
199
- for i, (images, labels) in enumerate(dataloader):
200
-
201
- images, labels = images.to(device), labels.to(device)
202
-
203
- optimizer.zero_grad()
204
-
205
- outputs = model(images)
206
-
207
- loss = criterion(outputs, labels)
208
-
209
- loss.backward()
210
-
211
- optimizer.step()
212
-
213
- train_loss += loss.item()
214
-
215
- train_loss = train_loss / len(train_loader.dataset)
216
-
217
- return train_loss
218
-
219
-
220
-
221
-
222
-
223
- def inference(model, optimizer, criterion, dataloader, device):
224
-
225
- model.eval()
226
-
227
- test_loss=0
228
-
229
-
230
-
231
- with torch.no_grad():
232
-
233
- for i, (images, labels) in enumerate(test_loader):
234
-
235
- images, labels = images.to(device), labels.to(device)
236
-
237
- outputs = model(images)
238
-
239
- loss = criterion(outputs, labels)
240
-
241
- test_loss += loss.item()
242
-
243
- test_loss = test_loss / len(test_loader.dataset)
244
-
245
- return test_loss
246
-
247
-
248
-
249
-
250
-
251
- def run(num_epochs, optimizer, criterion, device):
252
-
253
- train_loss_list = []
254
-
255
- test_loss_list = []
256
-
257
- for epoch in range(num_epochs):
258
-
259
- train_loss = train_epoch(model, optimizer, criterion, train_loader, device)
260
-
261
- test_loss = inference(model, optimizer, criterion, test_loader, device)
262
-
263
-
264
-
265
- print(f'Epoch [{epoch+1}], train_Loss : {train_loss:.4f}, test_Loss : {test_loss:.4f}')
266
-
267
- train_loss_list.append(train_loss)
268
-
269
- test_loss_list.append(test_loss)
270
-
271
- return train_loss_list, test_loss_list
272
-
273
-
274
-
275
-
276
-
277
- if __name__ == '__main__':
278
-
279
- # Transform を作成する。
280
-
281
- transform = transforms.Compose([transforms.Resize(256), transforms.ToTensor()])
282
-
283
- # Dataset を作成する。
284
-
285
- dataset = ImageFolder('/Users/○○○○○/Desktop/●●_data/△△△△△_dataset', transform)
286
-
287
- # DataLoader を作成する。
288
-
289
- dataloader = DataLoader(dataset, batch_size=3)
290
-
291
-
292
-
293
- for batch in dataloader:
294
-
295
- print(batch.shape)
296
-
297
-
298
-
299
-
300
-
301
- # グラフのスタイルを指定
302
-
303
- plt.style.use('seaborn-darkgrid')
304
-
305
-
306
-
307
- # 正規化
308
-
309
- normalize = transforms.Normalize(mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0))
310
-
311
- # Tensor化
312
-
313
- to_tensor = transforms.ToTensor()
314
-
315
-
316
-
317
- train_ratio = 0.8
318
-
319
- train_size = int(train_ratio * len(dataset))
320
-
321
- # int()で整数に。
322
-
323
- val_size = len(dataset) - train_size
324
-
325
- data_size = {"train":train_size, "val":val_size}
326
-
327
- train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
328
-
329
-
330
-
331
- transform_train = transforms.Compose([to_tensor, normalize])
332
-
333
- transform_test = transforms.Compose([to_tensor, normalize])
334
-
335
-
336
-
337
- batch_size = 64
338
-
339
- train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
340
-
341
271
  test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
342
272
 
343
273
  dataloaders = {"train":train_loader, "val":test_loader}
344
274
 
345
275
 
346
276
 
347
-
348
-
349
277
  model = Net()
350
278
 
351
279
 
@@ -412,6 +340,46 @@
412
340
 
413
341
 
414
342
 
415
-
416
-
417
- ```
343
+ ```
344
+
345
+
346
+
347
+ ### 該当のデータセット
348
+
349
+ ```
350
+
351
+ ./□□□_dataset
352
+
353
+ ├── 0
354
+
355
+ │ ├── 0_0.png
356
+
357
+ │ ├── 0_1.png
358
+
359
+ │ ├── ・・・
360
+
361
+ │ ├── 0_10.png
362
+
363
+ ├── 1
364
+
365
+ │ ├── 1_0.png
366
+
367
+ │ ├── 1_1.png
368
+
369
+ │ ├── ・・・
370
+
371
+ │ ├── 1_10.png
372
+
373
+ ├── 2
374
+
375
+ │ ├── 2_0.png
376
+
377
+ │ ├── 2_1.png
378
+
379
+ │ ├── ・・・
380
+
381
+ │ ├── 2_10.png
382
+
383
+
384
+
385
+ ```