質問編集履歴

1

コードの修正

2022/10/11 05:51

投稿

shinww
shinww

スコア4

test CHANGED
File without changes
test CHANGED
@@ -24,137 +24,6 @@
24
24
 
25
25
  ### 該当のソースコード
26
26
 
27
- ```ここに言語名を入力 python
28
- ソースコード
29
- ``` train_imagenet.py
30
-
31
- from __future__ import print_function
32
- import argparse
33
- import datetime
34
- import json
35
- import multiprocessing
36
- import os
37
- import random
38
- import sys
39
- import threading
40
- import time
41
-
42
- import numpy as np
43
- from PIL import Image
44
- import six
45
- import six.moves.cPickle as pickle
46
- from six.moves import queue
47
-
48
- from chainer import computational_graph
49
- from chainer import cuda
50
- from chainer import optimizers
51
- from chainer import serializers
52
-
53
-
54
- parser = argparse.ArgumentParser(description='Learning convnet from ILSVRC2012 dataset')
55
-
56
- parser.add_argument('--train', help=r'D:\numadalab\2020\実験データ\20201114')
57
- parser.add_argument('--val', help=r'D:\numadalab\2020\実験データ\20201207_day')
58
-
59
- parser.add_argument('--mean', '-m', default='mean.npy',
60
- help='Path to the mean file (computed by compute_mean.py)')
61
-
62
- parser.add_argument('--arch', '-a', default='nin',
63
- help='Convnet architecture \
64
- (nin, alex, alexbn, googlenet, googlenetbn)')
65
- parser.add_argument('--batchsize', '-B', type=int, default=32,
66
- help='Learning minibatch size')
67
- parser.add_argument('--val_batchsize', '-b', type=int, default=250,
68
- help='Validation minibatch size')
69
- parser.add_argument('--epoch', '-E', default=50, type=int,
70
- help='Number of epochs to learn')
71
- parser.add_argument('--gpu', '-g', default=-1, type=int,
72
- help='GPU ID (negative value indicates CPU)')
73
- parser.add_argument('--loaderjob', '-j', default=20, type=int,
74
- help='Number of parallel data loading processes')
75
- parser.add_argument('--root', '-r', default='.',
76
- help='Root directory path of image files')
77
- parser.add_argument('--out', '-o', default='model',
78
- help='Path to save model on each validation')
79
- parser.add_argument('--outstate', '-s', default='state',
80
- help='Path to save optimizer state on each validation')
81
- parser.add_argument('--initmodel', default='',
82
- help='Initialize the model from given file')
83
- parser.add_argument('--resume', default='',
84
- help='Resume the optimization from snapshot')
85
- parser.add_argument('--test', dest='test', action='store_true')
86
- parser.set_defaults(test=False)
87
-
88
- #args = parser.parse_args(args=[])
89
- args = parser.parse_args()
90
-
91
- if args.gpu >= 0:
92
- cuda.check_cuda_available()
93
- xp = cuda.cupy if args.gpu >= 0 else np
94
-
95
- assert 50000 % args.val_batchsize == 0
96
-
97
- if args.test:
98
- denominator = 1
99
- else:
100
- denominator = 100000
101
-
102
-
103
- def load_image_list(path, root):
104
- tuples = []
105
- for line in open(path):
106
- pair = line.strip().split()
107
- tuples.append((os.path.join(root, pair[0]), np.int32(pair[1])))
108
- return tuples
109
-
110
- # Prepare dataset
111
- print(args.train)
112
- train_list = load_image_list(args.train, args.root)
113
-
114
- val_list = load_image_list(args.val, args.root)
115
-
116
-
117
- mean_image = np.load(args.mean,allow_pickle=True)
118
-
119
-
120
- **# Prepare model **
121
- if args.arch == 'nin':
122
- import nin
123
- model = nin.NIN()
124
- elif args.arch == 'i2vvgg':
125
- import i2vvgg
126
- model = i2vvgg.i2vVGG()
127
- elif args.arch == 'alex':
128
- import alex
129
- model = alex.Alex()
130
- elif args.arch == 'alexbn':
131
- import alexbn
132
- model = alexbn.AlexBN()
133
- elif args.arch == 'googlenet':
134
- import googlenet
135
- model = googlenet.GoogLeNet()
136
- elif args.arch == 'googlenetbn':
137
- import googlenetbn
138
- model = googlenetbn.GoogLeNetBN()
139
- else:
140
- raise ValueError('Invalid architecture name')
141
-
142
- if args.gpu >= 0:
143
- cuda.get_device(args.gpu).use()
144
- model.to_gpu()
145
-
146
-
147
- # Setup optimizer
148
- optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
149
- optimizer.setup(model)
150
-
151
- # Init/Resume
152
- if args.initmodel:
153
- print('Load model from', args.initmodel)
154
- serializers.load_npz(args.initmodel, model)
155
- if args.resume:
156
- print('Load optimizer state from', args.resume)
157
- serializers.load_npz(args.resume, optimizer)
158
27
 
159
28
  ```ここに言語を入力 python
160
29
  コード nin.py
@@ -190,42 +59,4 @@
190
59
  self.loss = None
191
60
  self.accuracy = None
192
61
 
193
- def __call__(self, x, t):
62
+
194
- self.clear()
195
- h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
196
- h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
197
- h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
198
- h = self.mlpconv4(F.dropout(h, train=self.train))
199
- h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000))
200
-
201
- self.loss = F.softmax_cross_entropy(h, t)
202
- self.accuracy = F.accuracy(h, t)
203
- return self.loss
204
-
205
- def predict(self, x_data, train=False):
206
- x = chainer.Variable(x_data, volatile=True)
207
-
208
- h = F.relu(self.conv1(x))
209
- h = F.relu(self.conv1a(h))
210
- h = F.relu(self.conv1b(h))
211
- h = F.max_pooling_2d(h, 3, stride=2)
212
- h = F.relu(self.conv2(h))
213
- h = F.relu(self.conv2a(h))
214
- h = F.relu(self.conv2b(h))
215
- h = F.max_pooling_2d(h, 3, stride=2)
216
- h = F.relu(self.conv3(h))
217
- h = F.relu(self.conv3a(h))
218
- h = F.relu(self.conv3b(h))
219
- h = F.max_pooling_2d(h, 3, stride=2)
220
- h = F.dropout(h, train=train)
221
- h = F.relu(self.conv4(h))
222
- h = F.relu(self.conv4a(h))
223
- h = F.relu(self.conv4b(h))
224
- h = F.reshape(F.average_pooling_2d(h, 6), (x_data.shape[0], 1000))
225
- return F.softmax(h)
226
-
227
-
228
- ### 補足情報(FW/ツールのバージョンなど)
229
-
230
- ここにより詳細な情報を記載してください。
231
-