批改娘 10103. Advanced Matrix Calculator (CUDA)

contents

  1. 1. 題目描述
    1. 1.1. sequence.c
  2. 2. 輸入格式
  3. 3. 輸出格式
  4. 4. 範例輸入 1
  5. 5. 範例輸出 1
  6. 6. 編譯參數
  7. 7. Solution

題目描述

小明的數學作業要計算方陣,現在請你幫幫他!

題目給定數個 $N \times N$ 的矩陣和 $Q$ 小題,每一小題只由加法和乘法構成。

sequence.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#include <stdio.h>
#include <stdint.h>
// #define DEBUG
#define UINT uint32_t
#define MAXN 1024
void multiply(int N, UINT A[][MAXN], UINT B[][MAXN], UINT C[][MAXN]) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
UINT sum = 0; // overflow, let it go.
for (int k = 0; k < N; k++)
sum += A[i][k] * B[k][j];
C[i][j] = sum;
}
}
}
void add(int N, UINT A[][MAXN], UINT B[][MAXN], UINT C[][MAXN]) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
C[i][j] = A[i][j] + B[i][j];
}
}
void rand_gen(UINT c, int N, UINT A[][MAXN]) {
UINT x = 2, n = N*N;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
x = (x * x + c + i + j)%n;
A[i][j] = x;
}
}
}
void print_matrix(int N, UINT A[][MAXN]) {
for (int i = 0; i < N; i++) {
fprintf(stderr, "[");
for (int j = 0; j < N; j++)
fprintf(stderr, " %u", A[i][j]);
fprintf(stderr, " ]\n");
}
}
UINT signature(int N, UINT A[][MAXN]) {
UINT h = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
h = (h + A[i][j]) * 2654435761LU;
}
return h;
}
UINT IN[6][MAXN][MAXN], TMP[6][MAXN][MAXN];
int main() {
int N, S[6];
scanf("%d", &N);
for (int i = 0; i < 6; i++) {
scanf("%d", &S[i]);
rand_gen(S[i], N, IN[i]);
}
// AB
multiply(N, IN[0], IN[1], TMP[0]);
// CD
multiply(N, IN[2], IN[3], TMP[1]);
// AB+CD
add(N, TMP[0], TMP[1], TMP[2]);
printf("%u\n", signature(N, TMP[2]));
// ABE
multiply(N, TMP[0], IN[4], TMP[3]);
// CDF
multiply(N, TMP[1], IN[5], TMP[4]);
// ABE+CDF
add(N, TMP[3], TMP[4], TMP[5]);
printf("%u\n", signature(N, TMP[5]));
return 0;
}

輸入格式

測資只有一組,第一行會有兩個整數 $M,N$,表示題目給定 $M$ 個 $N \times N$ 矩陣,第二行上會有 $N$ 個整數 $S_i$ 個第 $i$ 個矩陣生成種子。最後會有一行一個整數 $Q$,表示接下來有 $Q$ 行詢問,每一行上會有一個字串 $E$ 表示接下來要處理的矩陣表達式,$E$ 只包含 A-Z 以及 +

  • $1 \le M \le 26$
  • $1 \le N \le 1024$
  • $0 \le S_i \le 2^{31}$
  • $1 \le Q \le 100$
  • $|E| \le 26$

輸出格式

對於每一組測資輸出一行。

範例輸入 1

1
2
3
4
5
6 2
0 1 2 3 4 5
2
AB+CD
ABE+CDF

範例輸出 1

1
2
2385860290
1374821695

編譯參數

1
2
$ nvcc -Xcompiler "-O2 -fopenmp" main.cu -o main
$ ./main

Solution

跟 OpenCL 的做法類似,讓三個 device 共同合作,每一個表達式都交給一個 device 完成,所以目標分配這些表達式使得計算最長時間最小化。處理手法都差不多,經過調校比 OpenCL 還要快上一些。

由於 CUDA 要藉由 cudaSetDevice(p); 設定計算裝置,那麼相信這個 global function 設置變數是採用 __thread 保留字完成,因此用 OpenMP 建立三條 thread,分別設置就不會相互影響,設置 __thread 的變數在各自 thread 下是獨立不受影響的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
#include <stdio.h>
#include <assert.h>
#include <inttypes.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <CL/cl.h>
#include <omp.h>
#define MAXGPU 3
#define MAXN 1024
#define MAXM 32
#define MAXMID 32
#define CheckErr(status) { gpuAssert((status), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, int abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
uint32_t hostMtx[MAXM][MAXN*MAXN];
uint32_t hostMid[MAXGPU][MAXN*MAXN];
int N = MAXN, M, Q;
int clNeedDevCnt = 3;
__global__ void matrixMul(uint32_t A[], uint32_t B[], uint32_t C[], int N) {
int r = blockIdx.x * blockDim.x + threadIdx.x;
int x = r / N, y = r % N;
uint32_t sum = 0;
for (int i = 0; i < N; i++)
sum += A[x*N + i] * B[i*N + y];
C[x * N + y] = sum;
}
__global__ void matrixAdd(uint32_t A[], uint32_t B[], uint32_t C[]) {
int r = blockIdx.x * blockDim.x + threadIdx.x;
C[r] = A[r] + B[r];
}
void matrix_multiply(uint32_t *cuMtxA, uint32_t *cuMtxB, uint32_t *cuMtxC) {
int localSz = 1;
for (int i = 1; i <= 1024; i++) {
if (N*N % i == 0)
localSz = i;
}
dim3 cuBlock(localSz);
dim3 cuGrid(N*N/localSz);
matrixMul<<<cuGrid, cuBlock>>>(cuMtxA, cuMtxB, cuMtxC, N);
CheckErr(cudaGetLastError());
}
void matrix_add(uint32_t *cuMtxA, uint32_t *cuMtxB, uint32_t *cuMtxC) {
int localSz = 1;
for (int i = 1; i <= 1024; i++) {
if (N*N % i == 0)
localSz = i;
}
dim3 cuBlock(localSz);
dim3 cuGrid(N*N/localSz);
matrixAdd<<<cuGrid, cuBlock>>>(cuMtxA, cuMtxB, cuMtxC);
CheckErr(cudaGetLastError());
}
char expr[1024];
typedef struct Node {
struct Node *l, *r;
int opcode;
uint32_t *hostV, *cuV;
int regNeed, regId;
int pid, mid;
long long h;
} Node;
void replaceReg(Node *u, int a, int b) {
if (u->l == NULL) return ;
if (u->regId == a)
u->regId = b;
replaceReg(u->l, a, b);
replaceReg(u->r, a, b);
}
void updateNode(Node *u, Node *l, Node *r, int opcode) {
u->l = l, u->r = r, u->opcode = opcode;
if (opcode == '+') {
u->h = u->l->h + u->r->h + N;
// -- register allocation
if (u->l->regNeed == u->r->regNeed) {
u->regNeed = u->l->regNeed + 1;
u->regId = u->regNeed;
replaceReg(u->r, u->r->regId, u->regId);
} else {
u->regNeed = u->l->regNeed > u->r->regNeed ? u->l->regNeed : u->r->regNeed;
u->regId = u->regNeed;
}
} else if (opcode == '*') {
u->h = u->l->h + u->r->h + N*N;
// -- register allocation
if (abs(u->l->regNeed - u->r->regNeed) == 1) {
u->regNeed = u->l->regNeed + 1;
u->regId = u->regNeed;
} else if (u->l->regNeed == u->r->regNeed) {
u->regNeed = u->l->regNeed + 2;
u->regId = u->regNeed;
replaceReg(u->r, u->r->regId, u->regId-1);
} else {
u->regNeed = u->l->regNeed > u->r->regNeed ? u->l->regNeed : u->r->regNeed;
u->regId = u->regNeed;
int a, b;
if (u->l->regId == u->regId) {
a = u->l->regId, b = u->l->regId-1;
replaceReg(u->l, a, -1);
replaceReg(u->l, b, a);
replaceReg(u->l, -1, b);
} else {
a = u->r->regId, b = u->r->regId-1;
replaceReg(u->r, a, -1);
replaceReg(u->r, b, a);
replaceReg(u->r, -1, b);
}
}
}
assert(u->regId < MAXMID);
}
Node* parseExpr(int l, int r, char expr[], int procId) {
Node *u = (Node *) calloc(1, sizeof(Node));
u->pid = procId;
if (l == r) {
int idx = expr[l] - 'A';
u->mid = idx;
u->h = 0;
return u;
}
int cnt = 0;
for (int i = l; i <= r; i++) {
if (expr[i] == '(') {
cnt++;
} else if (expr[i] == ')') {
cnt--;
} else if (expr[i] == '+' && cnt == 0) {
updateNode(u, parseExpr(l, i-1, expr, procId), parseExpr(i+1, r, expr, procId), '+');
return u;
}
}
for (int i = l; i <= r; i++) {
if (expr[i] == '(') {
if (cnt == 0 && i != l) {
updateNode(u, parseExpr(l, i-1, expr, procId), parseExpr(i, r, expr, procId), '*');
return u;
}
cnt++;
} else if (expr[i] == ')') {
cnt--;
} else if (expr[i] >= 'A' && expr[i] <= 'Z' && cnt == 0 && i != l) {
updateNode(u, parseExpr(l, i-1, expr, procId), parseExpr(i, r, expr, procId), '*');
return u;
}
}
free(u);
return parseExpr(l+1, r-1, expr, procId);
}
uint32_t writeOut(uint32_t *hostC) {
uint32_t h = 0;
uint32_t *Cend = hostC + N*N, *C = hostC;
for (; C != Cend; C++)
h = (h + *C) * 2654435761LU;
return h;
}
uint32_t *cuMemMid[MAXGPU][MAXMID];
uint32_t *cuMemIn[MAXGPU][MAXM];
void memRelocation(Node *u, int did, Node *nodes[], int *offset) {
if (u->l == NULL) {
nodes[*offset] = u, (*offset)++;
return ;
}
u->cuV = cuMemMid[did][u->regId];
if (u->l->regNeed > u->r->regNeed) {
memRelocation(u->l, did, nodes, offset);
memRelocation(u->r, did, nodes, offset);
} else {
memRelocation(u->r, did, nodes, offset);
memRelocation(u->l, did, nodes, offset);
}
fprintf(stderr, "reg%d = %s ", u->regId, u->opcode == '+' ? "add" : "mul");
if (u->l->l == NULL) fprintf(stderr, "%c ", u->l->mid + 'A');
else fprintf(stderr, "reg%d ", u->l->regId);
if (u->r->l == NULL) fprintf(stderr, "%c\n", u->r->mid + 'A');
else fprintf(stderr, "reg%d\n", u->r->regId);
nodes[*offset] = u, (*offset)++;
return ;
}
int executeGPU(Node *workQue[][128], int workQueSz[], uint32_t resultBuff[]) {
Node* nodes[MAXGPU][128];
int offset[MAXGPU] = {};
uint32_t memSz = N*N*sizeof(uint32_t);
int memDeploy[MAXGPU][MAXM] = {};
int regDeploy[MAXGPU][MAXMID] = {};
// -- execute multi-device
#pragma omp parallel for
for (int p = 0; p < clNeedDevCnt; p++) {
cudaSetDevice(p);
for (int q = 0; q < workQueSz[p]; q++) {
// -- flatten binary tree
offset[p] = 0;
memRelocation(workQue[p][q], p, nodes[p], &offset[p]);
// -- execute in order
for (int i = 0; i < offset[p]; i++) {
Node *u = nodes[p][i];
// -- is leaf, deploy memory copy
if (u->l == NULL) {
if (!memDeploy[p][u->mid]) {
cudaMalloc((void **) &cuMemIn[p][u->mid], memSz);
cudaMemcpy(cuMemIn[p][u->mid], hostMtx[u->mid], memSz, cudaMemcpyHostToDevice);
memDeploy[p][u->mid] = 1;
}
u->cuV = cuMemIn[p][u->mid];
continue;
}
// -- inner node using minimum #buffer
if (!regDeploy[p][u->regId])
cudaMalloc((void **) &cuMemMid[p][u->regId], memSz);
if (u->cuV == NULL)
u->cuV = cuMemMid[p][u->regId];
if (u->opcode == '*')
matrix_multiply(u->l->cuV, u->r->cuV, u->cuV);
else
matrix_add(u->l->cuV, u->r->cuV, u->cuV);
}
// -- read back and store answer
Node *root = workQue[p][q];
fprintf(stderr, "register need %d\n", root->regNeed);
cudaMemcpy(hostMid[p], root->cuV, memSz, cudaMemcpyDeviceToHost);
uint32_t ret = writeOut(hostMid[p]);
resultBuff[root->pid] = ret;
// -- free inner node buffer
for (int i = 0; i < offset[p]; i++) {
Node *u = nodes[p][i];
if (u->l != NULL && u->hostV)
free(u->hostV);
free(u);
}
}
// -- free buffer
cudaSetDevice(p);
for (int i = 0; i < MAXMID; i++) {
cudaFree(cuMemMid[p][i]);
}
for (int i = 0; i < M; i++) {
cudaFree(cuMemIn[p][i]);
}
}
return 1;
}
int readIn() {
if (scanf("%s", expr) != 1)
return 0;
return 1;
}
int balance_cmp(const void *a, const void *b) {
Node *x = *(Node **) a;
Node *y = *(Node **) b;
if (x->h == y->h) return 0;
if (x->h < y->h) return 1;
return -1;
}
void onStart() {
int S[64];
assert(scanf("%d %d", &M, &N) == 2);
for (int i = 0; i < M; i++)
assert(scanf("%d", &S[i]) == 1);
#pragma omp parallel for
for (int p = 0; p < M; p++) {
uint32_t x = 2, n = N*N;
uint32_t c = S[p];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
x = (x * x + c + i + j)%n;
hostMtx[p][i*N+j] = x;
}
}
}
Node *procBuff[128];
if (scanf("%d", &Q) != 1)
return ;
for (int i = 0; i < Q; i++) {
readIn();
int expr_len = strlen(expr);
procBuff[i] = parseExpr(0, expr_len-1, expr, i);
}
qsort(procBuff, Q, sizeof(Node*), balance_cmp);
float gpuSpeed[MAXGPU] = {1.f, 1.8f, 2.0f};
long long workload[MAXGPU] = {};
int workQueSz[MAXGPU] = {};
uint32_t resultBuff[MAXGPU] = {};
Node *workQue[MAXGPU][128];
for (int i = 0; i < Q; i++) {
int mn = 0;
for (int j = 0; j < clNeedDevCnt; j++) {
if (workload[j]*gpuSpeed[j] < workload[mn]*gpuSpeed[mn])
mn = j;
}
workload[mn] += procBuff[i]->h;
workQue[mn][workQueSz[mn]++] = procBuff[i];
}
executeGPU(workQue, workQueSz, resultBuff);
for (int i = 0; i < Q; i++)
printf("%u\n", resultBuff[i]);
}
int main(int argc, char *argv[]) {
onStart();
return 0;
}