質問編集履歴
2
errorとコードを編集しました。
test
CHANGED
File without changes
|
test
CHANGED
@@ -34,7 +34,7 @@
|
|
34
34
|
|
35
35
|
```
|
36
36
|
|
37
|
-
RuntimeError:
|
37
|
+
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [16, 3, 3, 3], but got 2-dimensional input of size [32, 196608] instead
|
38
38
|
|
39
39
|
```
|
40
40
|
|
@@ -48,6 +48,36 @@
|
|
48
48
|
|
49
49
|
```python
|
50
50
|
|
51
|
+
import torch
|
52
|
+
|
53
|
+
import torch.nn as nn
|
54
|
+
|
55
|
+
import torch.nn.functional as F
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
import torch.optim as optim
|
60
|
+
|
61
|
+
import model,dataset
|
62
|
+
|
63
|
+
from model import *
|
64
|
+
|
65
|
+
|
66
|
+
|
67
|
+
from tqdm import tqdm
|
68
|
+
|
69
|
+
from torch.autograd import Variable
|
70
|
+
|
71
|
+
|
72
|
+
|
73
|
+
|
74
|
+
|
75
|
+
|
76
|
+
|
77
|
+
|
78
|
+
|
79
|
+
#一つの機能を作ったら→pritで確認
|
80
|
+
|
51
81
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
52
82
|
|
53
83
|
|
@@ -122,7 +152,15 @@
|
|
122
152
|
|
123
153
|
|
124
154
|
|
125
|
-
|
155
|
+
# batch = len(next(iter(train_loader))) #2
|
156
|
+
|
157
|
+
# for i in train_loader:
|
158
|
+
|
159
|
+
# print(i)
|
160
|
+
|
161
|
+
# for i in valid_loader:
|
162
|
+
|
163
|
+
# print(i)
|
126
164
|
|
127
165
|
|
128
166
|
|
@@ -142,79 +180,135 @@
|
|
142
180
|
|
143
181
|
#start epoch
|
144
182
|
|
145
|
-
#
|
183
|
+
#2エポック
|
184
|
+
|
146
|
-
|
185
|
+
num_epochs = 2
|
186
|
+
|
187
|
+
|
188
|
+
|
189
|
+
#最後にlossとaccuracyのグラフをプロットするためのリスト
|
190
|
+
|
191
|
+
train_loss_list = []
|
192
|
+
|
193
|
+
train_acc_list = []
|
194
|
+
|
195
|
+
val_loss_list = []
|
196
|
+
|
197
|
+
val_acc_list = []
|
198
|
+
|
199
|
+
|
200
|
+
|
147
|
-
for epoch
|
201
|
+
for epoch in range(num_epochs):
|
148
|
-
|
149
|
-
|
150
|
-
|
202
|
+
|
151
|
-
#
|
203
|
+
#エポックごとに初期化
|
152
|
-
|
204
|
+
|
153
|
-
r
|
205
|
+
train_loss = 0
|
206
|
+
|
154
|
-
|
207
|
+
train_acc = 0
|
208
|
+
|
155
|
-
|
209
|
+
val_loss = 0
|
210
|
+
|
156
|
-
|
211
|
+
val_acc = 0
|
212
|
+
|
213
|
+
#train==============================
|
214
|
+
|
215
|
+
#訓練モードへ切り替え
|
216
|
+
|
217
|
+
model.train()
|
218
|
+
|
219
|
+
#ミニバッチで分割して読み込む
|
220
|
+
|
157
|
-
for i,
|
221
|
+
for i, (images, labels) in enumerate(train_loader):
|
222
|
+
|
158
|
-
|
223
|
+
#viewで縦横32ピクセルで3チャンネルの画像を1次元に変換
|
159
|
-
|
160
|
-
|
224
|
+
|
161
|
-
#
|
225
|
+
#toでgpuに転送
|
162
|
-
|
163
|
-
|
226
|
+
|
164
|
-
|
165
|
-
inputs, labels = data
|
166
|
-
|
167
|
-
# Variableに変形
|
168
|
-
|
169
|
-
# wrap them in Variable
|
170
|
-
|
171
|
-
i
|
227
|
+
images, labels = images.view(images.shape[0], -1).to(device), labels.to(device)
|
172
|
-
|
173
|
-
|
174
|
-
|
228
|
+
|
229
|
+
|
230
|
+
|
175
|
-
#
|
231
|
+
#勾配をリセット
|
176
|
-
|
177
|
-
# zero the parameter gradients
|
178
232
|
|
179
233
|
optimizer.zero_grad()
|
180
234
|
|
181
|
-
|
182
|
-
|
183
|
-
#
|
235
|
+
#順伝播の計算
|
184
|
-
|
185
|
-
|
236
|
+
|
186
|
-
|
187
|
-
outputs = model(i
|
237
|
+
outputs = model(images)
|
188
|
-
|
189
|
-
|
190
|
-
|
238
|
+
|
191
|
-
#
|
239
|
+
#lossの計算
|
192
240
|
|
193
241
|
loss = criterion(outputs, labels)
|
194
242
|
|
243
|
+
#lossのミニバッチ分を溜め込む
|
244
|
+
|
245
|
+
train_loss += loss.item()
|
246
|
+
|
247
|
+
#accuracyをミニバッチ分を溜め込む
|
248
|
+
|
249
|
+
#正解ラベル(labels)と予測値のtop1(outputs.max(1))が合っている場合に1が返ってきます。
|
250
|
+
|
251
|
+
train_acc += (outputs.max(1)[1] == labels).sum().item()
|
252
|
+
|
253
|
+
#逆伝播の計算
|
254
|
+
|
195
255
|
loss.backward()
|
196
256
|
|
257
|
+
#重みの更新
|
258
|
+
|
197
259
|
optimizer.step()
|
198
260
|
|
199
|
-
|
200
|
-
|
201
|
-
# ロスの表示
|
202
|
-
|
203
|
-
|
261
|
+
#平均lossと平均accuracyを計算
|
262
|
+
|
204
|
-
|
263
|
+
avg_train_loss = train_loss / len(train_loader.dataset)
|
264
|
+
|
265
|
+
avg_train_acc = train_acc / len(train_loader.dataset)
|
266
|
+
|
267
|
+
|
268
|
+
|
269
|
+
#val==============================
|
270
|
+
|
271
|
+
#評価モードへ切り替え
|
272
|
+
|
273
|
+
model.eval()
|
274
|
+
|
275
|
+
#評価するときに必要のない計算が走らないようにtorch.no_gradを使用しています。
|
276
|
+
|
277
|
+
with torch.no_grad():
|
278
|
+
|
279
|
+
for images, labels in valid_loader:
|
280
|
+
|
281
|
+
images, labels = images.view(-1, 32*32*3).to(device), labels.to(device)
|
282
|
+
|
283
|
+
outputs = model(images)
|
284
|
+
|
285
|
+
loss = criterion(outputs, labels)
|
286
|
+
|
205
|
-
|
287
|
+
val_loss += loss.item()
|
288
|
+
|
206
|
-
|
289
|
+
val_acc += (outputs.max(1)[1] == labels).sum().item()
|
290
|
+
|
291
|
+
avg_val_loss = val_loss / len(valid_loader.dataset)
|
292
|
+
|
207
|
-
|
293
|
+
avg_val_acc = val_acc / len(valid_loader.dataset)
|
208
|
-
|
294
|
+
|
295
|
+
|
296
|
+
|
209
|
-
|
297
|
+
#訓練データのlossと検証データのlossとaccuracyをログで出しています。
|
298
|
+
|
210
|
-
|
299
|
+
print ('Epoch [{}/{}], Loss: {loss:.4f}, val_loss: {val_loss:.4f}, val_acc: {val_acc:.4f}'
|
300
|
+
|
211
|
-
(epoch
|
301
|
+
.format(epoch+1, num_epochs, i+1, loss=avg_train_loss, val_loss=avg_val_loss, val_acc=avg_val_acc))
|
212
|
-
|
302
|
+
|
213
|
-
|
303
|
+
#最後にグラフをプロットするようにリストに格納
|
214
|
-
|
215
|
-
|
216
|
-
|
304
|
+
|
217
|
-
|
305
|
+
train_loss_list.append(avg_train_loss)
|
306
|
+
|
307
|
+
train_acc_list.append(avg_train_acc)
|
308
|
+
|
309
|
+
val_loss_list.append(avg_val_loss)
|
310
|
+
|
311
|
+
val_acc_list.append(avg_val_acc)
|
218
312
|
|
219
313
|
```
|
220
314
|
|
1
追記しました。
test
CHANGED
File without changes
|
test
CHANGED
@@ -368,6 +368,46 @@
|
|
368
368
|
|
369
369
|
```
|
370
370
|
|
371
|
+
model.pyの出力結果を示します。
|
372
|
+
|
373
|
+
ここでは最後のLinearでの数値とあっていないように思えます。
|
374
|
+
|
375
|
+
```ここに言語を入力
|
376
|
+
|
377
|
+
Net(
|
378
|
+
|
379
|
+
(relu): ReLU()
|
380
|
+
|
381
|
+
(conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1))
|
382
|
+
|
383
|
+
(pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
|
384
|
+
|
385
|
+
(conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
|
386
|
+
|
387
|
+
(pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
|
388
|
+
|
389
|
+
(conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
|
390
|
+
|
391
|
+
(pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
|
392
|
+
|
393
|
+
(conv4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1))
|
394
|
+
|
395
|
+
(pool4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
|
396
|
+
|
397
|
+
(conv5): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))
|
398
|
+
|
399
|
+
(pool5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
|
400
|
+
|
401
|
+
(fc1): Linear(in_features=2304, out_features=120, bias=True)
|
402
|
+
|
403
|
+
(fc2): Linear(in_features=120, out_features=84, bias=True)
|
404
|
+
|
405
|
+
(fc3): Linear(in_features=84
|
406
|
+
|
407
|
+
)
|
408
|
+
|
409
|
+
```
|
410
|
+
|
371
411
|
|
372
412
|
|
373
413
|
### まとめ
|