回答編集履歴
2
加筆
test
CHANGED
@@ -5,3 +5,183 @@
|
|
5
5
|
ハードウェアの助けを借りて力技でゴリ押すならマルチスレッド(SIMDもアリ)、
|
6
6
|
|
7
7
|
もっと欲張るならOpenCLとかCUDAとか。
|
8
|
+
|
9
|
+
|
10
|
+
|
11
|
+
[追記] ...ってことで、マルチスレッドとCUDAでやってみたわけよ。
|
12
|
+
|
13
|
+
|
14
|
+
|
15
|
+
```C++
|
16
|
+
|
17
|
+
#include <thread>
|
18
|
+
|
19
|
+
#include <vector>
|
20
|
+
|
21
|
+
#include <iostream>
|
22
|
+
|
23
|
+
#include <chrono>
|
24
|
+
|
25
|
+
#include <cassert>
|
26
|
+
|
27
|
+
#include <thread>
|
28
|
+
|
29
|
+
#include <cuda_runtime.h>
|
30
|
+
|
31
|
+
#include <device_launch_parameters.h>
|
32
|
+
|
33
|
+
|
34
|
+
|
35
|
+
template<typename Function>
|
36
|
+
|
37
|
+
float time_call(Function&& f) {
|
38
|
+
|
39
|
+
using namespace std::chrono;
|
40
|
+
|
41
|
+
auto start = high_resolution_clock::now();
|
42
|
+
|
43
|
+
f();
|
44
|
+
|
45
|
+
auto stop = high_resolution_clock::now();
|
46
|
+
|
47
|
+
return duration_cast<microseconds>(stop - start).count() / 1000.0f;
|
48
|
+
|
49
|
+
}
|
50
|
+
|
51
|
+
|
52
|
+
|
53
|
+
void Addition_single(std::vector<int>& C, const std::vector<int>& A, const std::vector<int>& B, int N) {
|
54
|
+
|
55
|
+
for ( int i = 0; i < N; ++i ) {
|
56
|
+
|
57
|
+
C[i] = A[i] + B[i];
|
58
|
+
|
59
|
+
}
|
60
|
+
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
|
65
|
+
void Addition_multi(std::vector<int>& C, const std::vector<int>& A, const std::vector<int>& B, int N) {
|
66
|
+
|
67
|
+
const int nthr = 4;
|
68
|
+
|
69
|
+
std::thread threads[nthr];
|
70
|
+
|
71
|
+
for ( int t = 0; t < nthr; ++t ) {
|
72
|
+
|
73
|
+
threads[t] = std::thread(
|
74
|
+
|
75
|
+
[&](int begin, int end, int stride) {
|
76
|
+
|
77
|
+
for ( int i = begin; i < end; i += stride ) {
|
78
|
+
|
79
|
+
C[i] = A[i] + B[i];
|
80
|
+
|
81
|
+
}
|
82
|
+
|
83
|
+
},
|
84
|
+
|
85
|
+
t, N, nthr);
|
86
|
+
|
87
|
+
}
|
88
|
+
|
89
|
+
for ( auto& thr : threads ) thr.join();
|
90
|
+
|
91
|
+
}
|
92
|
+
|
93
|
+
|
94
|
+
|
95
|
+
__global__ void kernel(int* pC, const int* pA, const int* pB, int size) {
|
96
|
+
|
97
|
+
int i = blockDim.x * blockIdx.x + threadIdx.x;
|
98
|
+
|
99
|
+
if ( i < size ) {
|
100
|
+
|
101
|
+
pC[i] = pA[i] + pB[i];
|
102
|
+
|
103
|
+
}
|
104
|
+
|
105
|
+
}
|
106
|
+
|
107
|
+
|
108
|
+
|
109
|
+
void Addition_cuda(std::vector<int>& C, const std::vector<int>& A, const std::vector<int>& B, int N) {
|
110
|
+
|
111
|
+
int* pA;
|
112
|
+
|
113
|
+
int* pB;
|
114
|
+
|
115
|
+
int* pC;
|
116
|
+
|
117
|
+
cudaMalloc(&pA, N*sizeof(int));
|
118
|
+
|
119
|
+
cudaMalloc(&pB, N*sizeof(int));
|
120
|
+
|
121
|
+
cudaMalloc(&pC, N*sizeof(int));
|
122
|
+
|
123
|
+
cudaMemcpy(pA, A.data(), N*sizeof(int), cudaMemcpyHostToDevice);
|
124
|
+
|
125
|
+
cudaMemcpy(pB, B.data(), N*sizeof(int), cudaMemcpyHostToDevice);
|
126
|
+
|
127
|
+
kernel<<<(N+255)/256,256>>>(pC, pA, pB, N);
|
128
|
+
|
129
|
+
cudaMemcpy(C.data(), pC, N*sizeof(int), cudaMemcpyDeviceToHost);
|
130
|
+
|
131
|
+
}
|
132
|
+
|
133
|
+
|
134
|
+
|
135
|
+
int main() {
|
136
|
+
|
137
|
+
int N = 2000000;
|
138
|
+
|
139
|
+
std::vector<int> A(N);
|
140
|
+
|
141
|
+
std::vector<int> B(N);
|
142
|
+
|
143
|
+
for ( int i = 0; i < N; ++i ) {
|
144
|
+
|
145
|
+
A[i] = i; B[i] = -i;
|
146
|
+
|
147
|
+
}
|
148
|
+
|
149
|
+
std::vector<int> C(N);
|
150
|
+
|
151
|
+
|
152
|
+
|
153
|
+
float single = time_call([&]() { Addition_single(C, A, B, N); });
|
154
|
+
|
155
|
+
for ( int i = 0; i < N; ++i ) assert( T[i] == 0 );
|
156
|
+
|
157
|
+
std::cout << "single = " << single << "[ms]\n";
|
158
|
+
|
159
|
+
|
160
|
+
|
161
|
+
float multi = time_call([&]() { Addition_multi(C, A, B, N); });
|
162
|
+
|
163
|
+
for ( int i = 0; i < N; ++i ) assert( T[i] == 0 );
|
164
|
+
|
165
|
+
std::cout << "multi = " << multi << "[ms]\n" ;
|
166
|
+
|
167
|
+
|
168
|
+
|
169
|
+
float cuda = time_call([&]() { Addition_cuda(C, A, B, N); });
|
170
|
+
|
171
|
+
for ( int i = 0; i < N; ++i ) assert( T[i] == 0 );
|
172
|
+
|
173
|
+
std::cout << "cuda = " << cuda << "[ms]\n" ;
|
174
|
+
|
175
|
+
}
|
176
|
+
|
177
|
+
```
|
178
|
+
|
179
|
+
|
180
|
+
|
181
|
+
結果: ぜーんぜん速くならんです。
|
182
|
+
|
183
|
+
thread/CUDAのオーバヘッドが速くなった分を食ってしまう。
|
184
|
+
|
185
|
+
時間計算量 O(N) なのを頑張って速くしようと考えん方がむしろ幸せなんじゃないでしょか。
|
186
|
+
|
187
|
+
|
1
加筆
test
CHANGED
@@ -2,6 +2,6 @@
|
|
2
2
|
|
3
3
|
ですがより速い"アルゴリズム"はなさそうです。
|
4
4
|
|
5
|
-
ハードウェアの助けを借りて力技でゴリ押すならマルチスレッド、
|
5
|
+
ハードウェアの助けを借りて力技でゴリ押すならマルチスレッド(SIMDもアリ)、
|
6
6
|
|
7
7
|
もっと欲張るならOpenCLとかCUDAとか。
|