1 | #include "ep_lib.hpp" |
---|
2 | |
---|
3 | #include <stdio.h> |
---|
4 | #include <assert.h> |
---|
5 | #include "ep_declaration.hpp" |
---|
6 | #include <omp.h> |
---|
7 | #include <time.h> /* time */ |
---|
8 | #include <ctime> |
---|
9 | #include <ratio> |
---|
10 | #include <chrono> |
---|
11 | |
---|
12 | using namespace ep_lib; |
---|
13 | using namespace std::chrono; |
---|
14 | |
---|
15 | |
---|
16 | |
---|
17 | int main(int argc, char **argv) |
---|
18 | { |
---|
19 | srand (time(NULL)); |
---|
20 | |
---|
21 | //printf("Testing ep_lib\n"); |
---|
22 | int required=3, provided; |
---|
23 | |
---|
24 | MPI_Init_thread(&argc, &argv, required, &provided); |
---|
25 | |
---|
26 | assert(required==provided); |
---|
27 | |
---|
28 | int mpi_rank; |
---|
29 | int mpi_size; |
---|
30 | |
---|
31 | MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); |
---|
32 | MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); |
---|
33 | |
---|
34 | #pragma omp parallel default(shared) |
---|
35 | { |
---|
36 | MPI_Comm_rank(MPI_COMM_WORLD , &mpi_rank); |
---|
37 | |
---|
38 | int num_ep = omp_get_num_threads(); |
---|
39 | MPI_Info info; |
---|
40 | |
---|
41 | //printf("mpi_rank = %d, thread_num = %d\n", mpi_rank, omp_get_thread_num()); |
---|
42 | |
---|
43 | MPI_Comm *ep_comm; |
---|
44 | #pragma omp master |
---|
45 | { |
---|
46 | MPI_Comm *ep_comm; |
---|
47 | MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); |
---|
48 | passage = ep_comm; |
---|
49 | } |
---|
50 | |
---|
51 | #pragma omp barrier |
---|
52 | |
---|
53 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
54 | |
---|
55 | comm = passage[omp_get_thread_num()]; |
---|
56 | |
---|
57 | // TEST OF COMM_SPLIT |
---|
58 | { |
---|
59 | |
---|
60 | MPI_Barrier(comm); |
---|
61 | int rank, size; |
---|
62 | MPI_Comm_rank(comm, &rank); |
---|
63 | MPI_Comm_size(comm, &size); |
---|
64 | |
---|
65 | int tab_color[16] = {2, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1}; // used for config 1 |
---|
66 | int tab_key[16] = {2, 1, 4, 1, 0, 4, 3, 3, 4, 3, 4, 4, 4, 0, 0, 2}; // used for config 1 |
---|
67 | |
---|
68 | int color = tab_color[rank]; |
---|
69 | int key = tab_key[rank]; |
---|
70 | |
---|
71 | |
---|
72 | MPI_Comm split_comm; |
---|
73 | MPI_Comm_split(comm, color, key, &split_comm); |
---|
74 | |
---|
75 | |
---|
76 | |
---|
77 | int split_rank, split_size; |
---|
78 | MPI_Comm_rank(split_comm, &split_rank); |
---|
79 | MPI_Comm_size(split_comm, &split_size); |
---|
80 | |
---|
81 | #ifdef _Memory_check |
---|
82 | printf("rank = %d, color = %d, key = %d, split_rank = %d, local_rank=%d\n", rank, color, key, split_rank, split_comm->ep_comm_ptr->size_rank_info[1].first); |
---|
83 | #endif |
---|
84 | |
---|
85 | MPI_Barrier(comm); |
---|
86 | MPI_Barrier(comm); |
---|
87 | MPI_Barrier(comm); |
---|
88 | |
---|
89 | if(rank == 0) printf(" \t MPI_Comm_split \t OK\n"); |
---|
90 | |
---|
91 | |
---|
92 | int local_leader = 0; |
---|
93 | int remote_leader = color==2? 13: 4; // used for config 1 |
---|
94 | |
---|
95 | MPI_Comm peer_comm = comm; |
---|
96 | |
---|
97 | MPI_Comm inter_comm; |
---|
98 | MPI_Intercomm_create(split_comm, local_leader, peer_comm, remote_leader, 99, &inter_comm); |
---|
99 | |
---|
100 | int inter_rank, inter_size, remote_size; |
---|
101 | MPI_Comm_rank(inter_comm, &inter_rank); |
---|
102 | MPI_Comm_size(inter_comm, &inter_size); |
---|
103 | MPI_Comm_remote_size(inter_comm, &remote_size); |
---|
104 | |
---|
105 | |
---|
106 | MPI_Barrier(comm); |
---|
107 | MPI_Barrier(comm); |
---|
108 | |
---|
109 | if(rank == 0) printf(" \t MPI_Intercomm_create \t OK\n"); |
---|
110 | |
---|
111 | //printf("rank = %d, split_rank = %d, split_size = %d, inter_rank=%d, inter_size=%d, remote_size=%d\n", rank, split_rank, split_size, inter_rank, inter_size, remote_size); |
---|
112 | |
---|
113 | |
---|
114 | MPI_Comm_free(&split_comm); |
---|
115 | |
---|
116 | |
---|
117 | MPI_Barrier(comm); |
---|
118 | |
---|
119 | |
---|
120 | if(color==2 && split_rank==0) |
---|
121 | { |
---|
122 | double sendbuf[9]={1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}; |
---|
123 | MPI_Request send_request; |
---|
124 | MPI_Status send_status; |
---|
125 | MPI_Isend(sendbuf, 9, MPI_DOUBLE, 0, 10, inter_comm, &send_request); |
---|
126 | MPI_Wait(&send_request, &send_status); |
---|
127 | } |
---|
128 | |
---|
129 | if(color==1 && split_rank==0) |
---|
130 | { |
---|
131 | double recvbuf[9]; |
---|
132 | MPI_Request recv_request; |
---|
133 | MPI_Status recv_status; |
---|
134 | MPI_Irecv(recvbuf, 9, MPI_DOUBLE, 0, 10, inter_comm, &recv_request); |
---|
135 | MPI_Wait(&recv_request, &recv_status); |
---|
136 | for(int i=0; i<9; i++) |
---|
137 | { |
---|
138 | printf("recvbuf[%d] = %lf\n", i, recvbuf[i]); |
---|
139 | } |
---|
140 | } |
---|
141 | |
---|
142 | MPI_Barrier(comm); |
---|
143 | MPI_Barrier(comm); |
---|
144 | |
---|
145 | if(rank == 0) printf(" \t Test iP2P for intercomm \t OK\n"); |
---|
146 | |
---|
147 | if(color==2 && split_rank==0) |
---|
148 | { |
---|
149 | double sendbuf[9]={1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}; |
---|
150 | MPI_Request send_request; |
---|
151 | MPI_Status send_status; |
---|
152 | MPI_Isend(sendbuf, 9, MPI_DOUBLE, 0, 10, inter_comm, &send_request); |
---|
153 | MPI_Wait(&send_request, &send_status); |
---|
154 | } |
---|
155 | |
---|
156 | if(color==1 && split_rank==0) |
---|
157 | { |
---|
158 | double recvbuf[9]; |
---|
159 | MPI_Request recv_request; |
---|
160 | MPI_Status recv_status; |
---|
161 | int probed = false; |
---|
162 | MPI_Message message; |
---|
163 | while(!probed) |
---|
164 | { |
---|
165 | MPI_Improbe(0, 10, inter_comm, &probed, &message, &recv_status); |
---|
166 | } |
---|
167 | MPI_Mrecv(recvbuf, 9, MPI_DOUBLE, &message, &recv_status); |
---|
168 | for(int i=0; i<9; i++) |
---|
169 | { |
---|
170 | printf("==========recvbuf[%d] = %lf\n", i, recvbuf[i]); |
---|
171 | } |
---|
172 | } |
---|
173 | |
---|
174 | |
---|
175 | |
---|
176 | |
---|
177 | MPI_Comm inter_comm_dup; |
---|
178 | MPI_Comm_dup(inter_comm, &inter_comm_dup); |
---|
179 | int inter_dup_rank, inter_dup_size; |
---|
180 | MPI_Comm_rank(inter_comm_dup, &inter_dup_rank); |
---|
181 | MPI_Comm_size(inter_comm_dup, &inter_dup_size); |
---|
182 | |
---|
183 | //printf("rank = %d, split_rank = %d, split_size = %d, inter_dup_rank=%d, inter_dup_size=%d\n", rank, split_rank, split_size, inter_dup_rank, inter_dup_size); |
---|
184 | |
---|
185 | |
---|
186 | |
---|
187 | MPI_Comm inter_comm_dup_merged; |
---|
188 | bool high; |
---|
189 | if(color==2) high=true; |
---|
190 | else high = false; |
---|
191 | |
---|
192 | MPI_Intercomm_merge(inter_comm_dup, high, &inter_comm_dup_merged); |
---|
193 | int merged_rank, merged_size; |
---|
194 | MPI_Comm_rank(inter_comm_dup_merged, &merged_rank); |
---|
195 | MPI_Comm_size(inter_comm_dup_merged, &merged_size); |
---|
196 | |
---|
197 | //printf("merged_rank=%d, merged_rank_loc=%d, root_ep_loc = %d\n", merged_rank, inter_comm_dup_merged->ep_comm_ptr->size_rank_info[1].first, inter_comm_dup_merged->ep_rank_map->at(0).first); |
---|
198 | |
---|
199 | MPI_Barrier(comm); |
---|
200 | MPI_Barrier(comm); |
---|
201 | |
---|
202 | int sendbuf=0; |
---|
203 | if(merged_rank == 0) sendbuf = 99; |
---|
204 | MPI_Bcast(&sendbuf, 1, MPI_INT, 0, inter_comm_dup_merged); |
---|
205 | printf("merged_rank = %d, sendbuf = %d\n", merged_rank, sendbuf); |
---|
206 | |
---|
207 | |
---|
208 | |
---|
209 | MPI_Barrier(comm); |
---|
210 | |
---|
211 | MPI_Comm_free(&inter_comm_dup_merged); |
---|
212 | MPI_Comm_free(&inter_comm_dup); |
---|
213 | |
---|
214 | |
---|
215 | |
---|
216 | |
---|
217 | MPI_Comm_free(&inter_comm); |
---|
218 | |
---|
219 | } |
---|
220 | |
---|
221 | |
---|
222 | |
---|
223 | MPI_Barrier(comm); |
---|
224 | MPI_Comm_free(&comm); |
---|
225 | } |
---|
226 | |
---|
227 | //MPI_Finalize(); |
---|
228 | //return 0; |
---|
229 | |
---|
230 | |
---|
231 | |
---|
232 | |
---|
233 | |
---|
234 | |
---|
235 | #pragma omp parallel default(shared) |
---|
236 | { |
---|
237 | MPI_Comm_rank(MPI_COMM_WORLD , &mpi_rank); |
---|
238 | |
---|
239 | int num_ep = omp_get_num_threads(); |
---|
240 | MPI_Info info; |
---|
241 | |
---|
242 | //printf("mpi_rank = %d, thread_num = %d\n", mpi_rank, omp_get_thread_num()); |
---|
243 | |
---|
244 | MPI_Comm *ep_comm; |
---|
245 | #pragma omp master |
---|
246 | { |
---|
247 | MPI_Comm *ep_comm; |
---|
248 | MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); |
---|
249 | passage = ep_comm; |
---|
250 | } |
---|
251 | |
---|
252 | #pragma omp barrier |
---|
253 | |
---|
254 | |
---|
255 | MPI_Comm comm_for_dup; // this should act as EP_COMM_WORLD |
---|
256 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
257 | |
---|
258 | comm_for_dup = passage[omp_get_thread_num()]; |
---|
259 | MPI_Comm_dup(comm_for_dup, &comm); |
---|
260 | |
---|
261 | MPI_Comm_free(&comm_for_dup); |
---|
262 | |
---|
263 | MPI_Barrier(comm); |
---|
264 | |
---|
265 | int rank, size; |
---|
266 | MPI_Comm_rank(comm, &rank); |
---|
267 | MPI_Comm_size(comm, &size); |
---|
268 | |
---|
269 | if(rank == 0) printf(" \t test MPI_Comm_dup \t OK \n"); |
---|
270 | |
---|
271 | // TIMING SYCHRONIZATION |
---|
272 | { |
---|
273 | int n=100000; |
---|
274 | |
---|
275 | MPI_Barrier(comm); |
---|
276 | |
---|
277 | high_resolution_clock::time_point t1 = high_resolution_clock::now(); |
---|
278 | |
---|
279 | for(int i=0; i<n; i++) |
---|
280 | MPI_Barrier_local(comm); |
---|
281 | |
---|
282 | high_resolution_clock::time_point t2 = high_resolution_clock::now(); |
---|
283 | duration<double> time_span = duration_cast<duration<double>>(t2 - t1); |
---|
284 | #pragma omp master |
---|
285 | std::cout << "proc "<< mpi_rank <<" ep_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
286 | |
---|
287 | t1 = high_resolution_clock::now(); |
---|
288 | |
---|
289 | for(int i=0; i<n; i++) |
---|
290 | { |
---|
291 | #pragma omp barrier |
---|
292 | } |
---|
293 | |
---|
294 | t2 = high_resolution_clock::now(); |
---|
295 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
296 | |
---|
297 | #pragma omp master |
---|
298 | std::cout << "proc "<< mpi_rank <<" omp_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
299 | |
---|
300 | t1 = high_resolution_clock::now(); |
---|
301 | |
---|
302 | for(int i=0; i<n; i++) |
---|
303 | { |
---|
304 | //#pragma omp barrier |
---|
305 | } |
---|
306 | |
---|
307 | t2 = high_resolution_clock::now(); |
---|
308 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
309 | |
---|
310 | MPI_Barrier(comm); |
---|
311 | |
---|
312 | #pragma omp master |
---|
313 | std::cout << "proc "<< mpi_rank <<" for_loop "<< time_span.count() << " seconds."<<std::endl; |
---|
314 | }// END TIMING SYCHRONIZATION |
---|
315 | |
---|
316 | // TEST of p2p blocking communication |
---|
317 | { |
---|
318 | MPI_Barrier(comm); |
---|
319 | MPI_Barrier(comm); |
---|
320 | |
---|
321 | MPI_Comm equal_comm = comm; |
---|
322 | |
---|
323 | |
---|
324 | |
---|
325 | |
---|
326 | double sendbuf[10]; |
---|
327 | double recvbuf[20]; |
---|
328 | |
---|
329 | int sender; |
---|
330 | if(rank == 0) sender = rand() % size; |
---|
331 | MPI_Bcast(&sender, 1, MPI_INT, 0, comm); |
---|
332 | |
---|
333 | int receiver = sender; |
---|
334 | if(rank == 0) while(sender == receiver) {receiver = rand() % size;} |
---|
335 | MPI_Bcast(&receiver, 1, MPI_INT, 0, comm); |
---|
336 | |
---|
337 | |
---|
338 | |
---|
339 | |
---|
340 | if(rank == sender) |
---|
341 | { |
---|
342 | for(int i=0; i<10; i++) sendbuf[i] = 99.99; |
---|
343 | MPI_Send(sendbuf, 10, MPI_DOUBLE, receiver, 99, equal_comm); |
---|
344 | for(int i=0; i<10; i++) sendbuf[i] = -99.99; |
---|
345 | MPI_Send(sendbuf, 10, MPI_DOUBLE, receiver, 11, equal_comm); |
---|
346 | } |
---|
347 | |
---|
348 | if(rank == receiver) |
---|
349 | { |
---|
350 | MPI_Status status; |
---|
351 | for(int i=0; i<20; i++) recvbuf[i] = 0.0; |
---|
352 | MPI_Recv(&recvbuf[10], 10, MPI_DOUBLE, sender, 99, comm, &status); |
---|
353 | MPI_Recv(recvbuf, 10, MPI_DOUBLE, sender, 11, comm, &status); |
---|
354 | |
---|
355 | for(int i=0; i<20; i++) std::cout << "recvbuf["<< i <<"] = "<< recvbuf[i] << std::endl; |
---|
356 | printf("sender = %d\nreceiver = %d \tTEST of p2p blocking communication\tOK\n", sender, receiver); |
---|
357 | } |
---|
358 | |
---|
359 | MPI_Barrier(comm); |
---|
360 | |
---|
361 | }//TEST of p2p blocking communication |
---|
362 | |
---|
363 | // TEST of p2p non-blocking communication |
---|
364 | { |
---|
365 | MPI_Barrier(comm); |
---|
366 | MPI_Barrier(comm); |
---|
367 | |
---|
368 | double sendbuf[10]; |
---|
369 | double recvbuf[20]; |
---|
370 | |
---|
371 | int sender; |
---|
372 | if(rank == 0) sender = rand() % size; |
---|
373 | MPI_Bcast(&sender, 1, MPI_INT, 0, comm); |
---|
374 | |
---|
375 | int receiver = sender; |
---|
376 | if(rank == 0) receiver = rand() % size; |
---|
377 | MPI_Bcast(&receiver, 1, MPI_INT, 0, comm); |
---|
378 | |
---|
379 | |
---|
380 | |
---|
381 | MPI_Request request[2]; |
---|
382 | |
---|
383 | if(rank == sender) |
---|
384 | { |
---|
385 | |
---|
386 | for(int i=0; i<10; i++) sendbuf[i] = 99.99; |
---|
387 | MPI_Isend(sendbuf, 10, MPI_DOUBLE, receiver, 99, comm, &request[0]); |
---|
388 | for(int i=0; i<10; i++) sendbuf[i] = -99.99; |
---|
389 | MPI_Isend(sendbuf, 10, MPI_DOUBLE, receiver, 11, comm, &request[1]); |
---|
390 | } |
---|
391 | |
---|
392 | if(rank == receiver) |
---|
393 | { |
---|
394 | for(int i=0; i<20; i++) recvbuf[i] = 0.0; |
---|
395 | MPI_Irecv(&recvbuf[10], 10, MPI_DOUBLE, sender, 11, comm, &request[0]); |
---|
396 | MPI_Irecv(recvbuf, 10, MPI_DOUBLE, sender, 99, comm, &request[1]); |
---|
397 | } |
---|
398 | |
---|
399 | MPI_Barrier(comm); |
---|
400 | |
---|
401 | if(rank == receiver || rank == sender) |
---|
402 | { |
---|
403 | MPI_Status status[2]; |
---|
404 | MPI_Waitall(2, request, status); |
---|
405 | } |
---|
406 | |
---|
407 | MPI_Barrier(comm); |
---|
408 | |
---|
409 | if(rank == receiver) |
---|
410 | { |
---|
411 | for(int i=0; i<20; i++) std::cout << "recvbuf["<< i <<"] = "<< recvbuf[i] << std::endl; |
---|
412 | printf("sender = %d\nreceiver = %d \tTEST of p2p non-blocking communication\tOK\n", sender, receiver); |
---|
413 | } |
---|
414 | |
---|
415 | }//TEST of p2p blocking communication |
---|
416 | |
---|
417 | |
---|
418 | // TEST OF BCAST FROM A RANDOM ROOT |
---|
419 | { |
---|
420 | int bcast_root; |
---|
421 | |
---|
422 | if(rank == 0) bcast_root = rand() % size; |
---|
423 | |
---|
424 | MPI_Bcast(&bcast_root, 1, MPI_INT, 0, comm); |
---|
425 | |
---|
426 | int sendbuf[2]; |
---|
427 | |
---|
428 | sendbuf[0] = rank; |
---|
429 | sendbuf[1] = size; |
---|
430 | |
---|
431 | MPI_Bcast(sendbuf, 2, MPI_INT, bcast_root, comm); |
---|
432 | |
---|
433 | int bcast_test = 0; |
---|
434 | if(sendbuf[0] == bcast_root && sendbuf[1] == size) bcast_test = 1; |
---|
435 | |
---|
436 | int bcast_result; |
---|
437 | |
---|
438 | MPI_Reduce(&bcast_test, &bcast_result, 1, MPI_INT, MPI_MIN, bcast_root, comm); |
---|
439 | |
---|
440 | if(bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t OK\n", bcast_root); |
---|
441 | if(!bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t FAILED %d\n", bcast_root, bcast_result); |
---|
442 | } |
---|
443 | |
---|
444 | MPI_Barrier(comm); |
---|
445 | |
---|
446 | // TEST OF GATHER FROM A RAMDOM ROOT |
---|
447 | { |
---|
448 | int gather_root; |
---|
449 | |
---|
450 | if(rank == 0) gather_root = rand() % size; |
---|
451 | |
---|
452 | MPI_Bcast(&gather_root, 1, MPI_INT, 0, comm); |
---|
453 | |
---|
454 | double sendbuf[2]; |
---|
455 | sendbuf[0] = rank * 1.0; |
---|
456 | sendbuf[1] = size * (-1.0); |
---|
457 | |
---|
458 | std::vector<double>recvbuf(2*size, 0); |
---|
459 | |
---|
460 | MPI_Gather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, gather_root, comm); |
---|
461 | |
---|
462 | bool gather_result = true; |
---|
463 | |
---|
464 | if(rank == gather_root) |
---|
465 | { |
---|
466 | for(int i=0; i<size; i++) |
---|
467 | { |
---|
468 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
469 | { |
---|
470 | gather_result = false; |
---|
471 | break; |
---|
472 | } |
---|
473 | } |
---|
474 | |
---|
475 | if(gather_result) printf("root = %d : \t test MPI_Gather \t OK \n", gather_root); |
---|
476 | else printf("root = %d : \t test MPI_Gather \t FAILED\n", gather_root); |
---|
477 | } |
---|
478 | } |
---|
479 | |
---|
480 | MPI_Barrier(comm); |
---|
481 | |
---|
482 | // TEST OF GATHERV FROM A RAMDOM ROOT |
---|
483 | { |
---|
484 | int gatherv_root; |
---|
485 | |
---|
486 | if(rank == 0) gatherv_root = rand() % size; |
---|
487 | |
---|
488 | MPI_Bcast(&gatherv_root, 1, MPI_INT, 0, comm); |
---|
489 | |
---|
490 | int sendbuf[2]; |
---|
491 | sendbuf[0] = rank; |
---|
492 | sendbuf[1] = -size; |
---|
493 | |
---|
494 | std::vector<int>recvbuf(2*size, 0); |
---|
495 | |
---|
496 | std::vector<int>recvcounts(size, 2); |
---|
497 | std::vector<int>displs(size, 0); |
---|
498 | |
---|
499 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
500 | |
---|
501 | MPI_Gatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, gatherv_root, comm); |
---|
502 | |
---|
503 | bool gatherv_result = true; |
---|
504 | |
---|
505 | if(rank == gatherv_root) |
---|
506 | { |
---|
507 | for(int i=0; i<size; i++) |
---|
508 | { |
---|
509 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
510 | { |
---|
511 | gatherv_result = false; printf("%lf %lf root = %d, i = %d\n", recvbuf[2*i], recvbuf[2*i+1], gatherv_root, i); |
---|
512 | break; |
---|
513 | } |
---|
514 | } |
---|
515 | |
---|
516 | //for(int i=0; i<size*2; i++) printf("%lf\t", recvbuf[i]); |
---|
517 | //printf("\n"); |
---|
518 | |
---|
519 | if(gatherv_result) printf("root = %d : \t test MPI_Gatherv \t OK\n", gatherv_root); |
---|
520 | else printf("root = %d : \t test MPI_Gatherv \t FAILED\n", gatherv_root); |
---|
521 | } |
---|
522 | } |
---|
523 | |
---|
524 | MPI_Barrier(comm); |
---|
525 | |
---|
526 | // TEST OF ALLGATHER |
---|
527 | { |
---|
528 | double sendbuf[2]; |
---|
529 | sendbuf[0] = rank * 1.0; |
---|
530 | sendbuf[1] = size * (-1.0); |
---|
531 | |
---|
532 | std::vector<double>recvbuf(2*size, 0); |
---|
533 | |
---|
534 | MPI_Allgather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, comm); |
---|
535 | |
---|
536 | int allgather_test = 1; |
---|
537 | |
---|
538 | for(int i=0; i<size; i++) |
---|
539 | { |
---|
540 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
541 | { |
---|
542 | allgather_test = 0; |
---|
543 | break; |
---|
544 | } |
---|
545 | } |
---|
546 | |
---|
547 | int allgather_result; |
---|
548 | MPI_Reduce(&allgather_test, &allgather_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
549 | |
---|
550 | if(rank == 0 && allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
551 | if(rank == 0 && !allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
552 | |
---|
553 | } |
---|
554 | |
---|
555 | MPI_Barrier(comm); |
---|
556 | |
---|
557 | // TEST OF ALLGATHERV |
---|
558 | { |
---|
559 | int sendbuf[2]; |
---|
560 | sendbuf[0] = rank; |
---|
561 | sendbuf[1] = -size; |
---|
562 | |
---|
563 | std::vector<int>recvbuf(2*size, 0); |
---|
564 | |
---|
565 | std::vector<int>recvcounts(size, 2); |
---|
566 | std::vector<int>displs(size, 0); |
---|
567 | |
---|
568 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
569 | |
---|
570 | MPI_Allgatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, comm); |
---|
571 | |
---|
572 | int allgatherv_test = 1; |
---|
573 | |
---|
574 | |
---|
575 | |
---|
576 | for(int i=0; i<size; i++) |
---|
577 | { |
---|
578 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
579 | { |
---|
580 | allgatherv_test = 0; printf("ID : %d %d %d %d %d\n", rank, recvbuf[2*i], recvbuf[2*i+1] , recvbuf[2*i] - (size-1-i), recvbuf[2*i+1] + size); |
---|
581 | break; |
---|
582 | } |
---|
583 | } |
---|
584 | |
---|
585 | |
---|
586 | int allgatherv_result; |
---|
587 | MPI_Reduce(&allgatherv_test, &allgatherv_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
588 | |
---|
589 | if(rank == 0 && allgatherv_result) printf(" \t test MPI_Allgatherv \t OK \n"); |
---|
590 | if(rank == 0 && !allgatherv_result) printf(" \t test MPI_Allgatherv \t FAILED %d\n", allgatherv_result); |
---|
591 | |
---|
592 | } |
---|
593 | |
---|
594 | MPI_Barrier(comm); |
---|
595 | |
---|
596 | // TEST OF REDUCE |
---|
597 | { |
---|
598 | int reduce_root; |
---|
599 | |
---|
600 | if(rank == 0) reduce_root = rand() % size; |
---|
601 | |
---|
602 | MPI_Bcast(&reduce_root, 1, MPI_INT, 0, comm); |
---|
603 | |
---|
604 | int sendbuf[2]; |
---|
605 | sendbuf[0] = rank; |
---|
606 | sendbuf[1] = -size; |
---|
607 | |
---|
608 | std::vector<int>recvbuf(2, 0); |
---|
609 | |
---|
610 | MPI_Op op = MPI_MIN; |
---|
611 | |
---|
612 | MPI_Reduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, reduce_root, comm); |
---|
613 | |
---|
614 | |
---|
615 | bool reduce_result = true; |
---|
616 | |
---|
617 | if(rank == reduce_root) |
---|
618 | { |
---|
619 | for(int i=0; i<2; i++) |
---|
620 | { |
---|
621 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
622 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
623 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
624 | { |
---|
625 | reduce_result = false; printf("%d %d root = %d, i = %d\n", recvbuf[0], recvbuf[1], reduce_root, i); |
---|
626 | break; |
---|
627 | } |
---|
628 | } |
---|
629 | } |
---|
630 | |
---|
631 | if(rank == reduce_root && reduce_result) printf("root = %d : \t test MPI_Reduce \t OK\n", reduce_root); |
---|
632 | if(rank == reduce_root && !reduce_result) printf("root = %d : \t test MPI_Reduce \t FAILED\n", reduce_root); |
---|
633 | } |
---|
634 | |
---|
635 | |
---|
636 | MPI_Barrier(comm); |
---|
637 | |
---|
638 | // TEST OF ALLREDUCE |
---|
639 | { |
---|
640 | |
---|
641 | int sendbuf[2]; |
---|
642 | sendbuf[0] = rank; |
---|
643 | sendbuf[1] = -size; |
---|
644 | |
---|
645 | std::vector<int>recvbuf(2, 0); |
---|
646 | |
---|
647 | MPI_Op op = MPI_MIN; |
---|
648 | |
---|
649 | MPI_Allreduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, comm); |
---|
650 | |
---|
651 | |
---|
652 | int allreduce_test = 1; |
---|
653 | |
---|
654 | |
---|
655 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
656 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
657 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
658 | { |
---|
659 | allreduce_test = 0; printf("%d %d\n", recvbuf[0], recvbuf[1]); |
---|
660 | } |
---|
661 | |
---|
662 | |
---|
663 | int allreduce_result; |
---|
664 | MPI_Reduce(&allreduce_test, &allreduce_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
665 | |
---|
666 | if(rank == 0 && allreduce_result) printf(" \t test MPI_Allreduce \t OK\n"); |
---|
667 | if(rank == 0 && !allreduce_result) printf(" \t test MPI_Allreduce \t FAILED\n"); |
---|
668 | } |
---|
669 | |
---|
670 | |
---|
671 | MPI_Barrier(comm); |
---|
672 | |
---|
673 | // TEST OF REDUCE_SCATTER |
---|
674 | { |
---|
675 | |
---|
676 | std::vector<int>sendbuf(2*size, rank); |
---|
677 | std::vector<int>recvbuf(2, -1); |
---|
678 | std::vector<int>recvcounts(size, 2); |
---|
679 | |
---|
680 | MPI_Op op = MPI_MIN; |
---|
681 | |
---|
682 | MPI_Reduce_scatter(sendbuf.data(), recvbuf.data(), recvcounts.data(), MPI_INT, op, comm); |
---|
683 | |
---|
684 | |
---|
685 | int reduce_scatter_test = 1; |
---|
686 | |
---|
687 | |
---|
688 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[0]-(size-1)*size/2) > 1.e-10) ) || |
---|
689 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1]-(size-1)) > 1.e-10) ) || |
---|
690 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] ) > 1.e-10) ) ) |
---|
691 | { |
---|
692 | reduce_scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
693 | } |
---|
694 | |
---|
695 | |
---|
696 | int reduce_scatter_result; |
---|
697 | MPI_Reduce(&reduce_scatter_test, &reduce_scatter_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
698 | |
---|
699 | if(rank == 0 && reduce_scatter_result) printf(" \t test MPI_Reduce_scatter OK\n"); |
---|
700 | if(rank == 0 && !reduce_scatter_result) printf(" \t test MPI_Reduce_scatter FAILED\n"); |
---|
701 | } |
---|
702 | |
---|
703 | MPI_Barrier(comm); |
---|
704 | |
---|
705 | // TEST OF SCATTER |
---|
706 | { |
---|
707 | |
---|
708 | int scatter_root; |
---|
709 | |
---|
710 | if(rank == 0) scatter_root = rand() % size; |
---|
711 | |
---|
712 | MPI_Bcast(&scatter_root, 1, MPI_INT, 0, comm); |
---|
713 | |
---|
714 | std::vector<int>sendbuf(2*size, rank); |
---|
715 | std::vector<int>recvbuf(2, -1); |
---|
716 | std::vector<int>recvcounts(size, 2); |
---|
717 | |
---|
718 | if(rank == scatter_root) |
---|
719 | { |
---|
720 | for(int i=0; i<size; i++) |
---|
721 | { |
---|
722 | sendbuf[2*i] = i; |
---|
723 | sendbuf[2*i+1] = size; |
---|
724 | } |
---|
725 | //for(int i=0; i<size*2; i++) printf("%d\t", sendbuf[i]); |
---|
726 | } |
---|
727 | |
---|
728 | |
---|
729 | MPI_Scatter(sendbuf.data(), 2, MPI_INT, recvbuf.data(), 2, MPI_INT, scatter_root, comm); |
---|
730 | |
---|
731 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
732 | |
---|
733 | int scatter_test = 1; |
---|
734 | |
---|
735 | |
---|
736 | if( abs(recvbuf[0]-rank) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
737 | { |
---|
738 | scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
739 | } |
---|
740 | |
---|
741 | |
---|
742 | int scatter_result; |
---|
743 | MPI_Reduce(&scatter_test, &scatter_result, 1, MPI_INT, MPI_MIN, scatter_root, comm); |
---|
744 | |
---|
745 | if(rank == scatter_root && scatter_result) printf("root = %d : \t test MPI_Scatter \t OK\n", scatter_root); |
---|
746 | if(rank == scatter_root && !scatter_result) printf("root = %d : \t test MPI_Scatter \t FAILED\n", scatter_root); |
---|
747 | } |
---|
748 | |
---|
749 | MPI_Barrier(comm); |
---|
750 | |
---|
751 | // TEST OF SCATTERV |
---|
752 | { |
---|
753 | |
---|
754 | int scatterv_root; |
---|
755 | |
---|
756 | if(rank == 0) scatterv_root = rand() % size; |
---|
757 | |
---|
758 | MPI_Bcast(&scatterv_root, 1, MPI_INT, 0, comm); |
---|
759 | |
---|
760 | std::vector<int>sendbuf(2*size, rank); |
---|
761 | std::vector<int>recvbuf(2, -1); |
---|
762 | std::vector<int>sendcounts(size, 2); |
---|
763 | std::vector<int>displs(size, 0); |
---|
764 | |
---|
765 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
766 | |
---|
767 | if(rank == scatterv_root) |
---|
768 | { |
---|
769 | for(int i=0; i<size; i++) |
---|
770 | { |
---|
771 | sendbuf[2*i] = i; |
---|
772 | sendbuf[2*i+1] = size; |
---|
773 | } |
---|
774 | } |
---|
775 | |
---|
776 | |
---|
777 | MPI_Scatterv(sendbuf.data(), sendcounts.data(), displs.data(), MPI_INT, recvbuf.data(), 2, MPI_INT, scatterv_root, comm); |
---|
778 | |
---|
779 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
780 | |
---|
781 | int scatterv_test = 1; |
---|
782 | |
---|
783 | |
---|
784 | if( abs(recvbuf[0]-(size-1-rank)) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
785 | { |
---|
786 | scatterv_test = 0; printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
787 | } |
---|
788 | |
---|
789 | |
---|
790 | int scatterv_result; |
---|
791 | MPI_Reduce(&scatterv_test, &scatterv_result, 1, MPI_INT, MPI_MIN, scatterv_root, comm); |
---|
792 | |
---|
793 | if(rank == scatterv_root && scatterv_result) printf("root = %d : \t test MPI_Scatterv \t OK\n", scatterv_root); |
---|
794 | if(rank == scatterv_root && !scatterv_result) printf("root = %d : \t test MPI_Scatterv \t FAILED\n", scatterv_root); |
---|
795 | } |
---|
796 | |
---|
797 | MPI_Barrier(comm); |
---|
798 | |
---|
799 | // TEST OF ALLTOALL |
---|
800 | { |
---|
801 | |
---|
802 | std::vector<int>sendbuf(size, rank); |
---|
803 | std::vector<int>recvbuf(size, -1); |
---|
804 | |
---|
805 | |
---|
806 | MPI_Alltoall(sendbuf.data(), 1, MPI_INT, recvbuf.data(), 1, MPI_INT, comm); |
---|
807 | |
---|
808 | int alltoall_result = 1; |
---|
809 | |
---|
810 | |
---|
811 | for(int i=0; i<size; i++) |
---|
812 | if( abs(recvbuf[i]-i) > 1.e-10 ) |
---|
813 | { |
---|
814 | alltoall_result = 0; printf("%d id = %d\n", recvbuf[i], rank); |
---|
815 | } |
---|
816 | |
---|
817 | if(rank == 0 && alltoall_result) printf(" \t test MPI_Alltoall \t OK\n"); |
---|
818 | if(rank == 0 && !alltoall_result) printf(" \t test MPI_Alltoall \t FAILED\n"); |
---|
819 | } |
---|
820 | |
---|
821 | // TEST OF SCAN |
---|
822 | { |
---|
823 | |
---|
824 | std::vector<int>sendbuf(2, rank); |
---|
825 | std::vector<int>recvbuf(2, -1); |
---|
826 | |
---|
827 | MPI_Op op = MPI_SUM; |
---|
828 | |
---|
829 | |
---|
830 | MPI_Scan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
831 | |
---|
832 | int scan_test = 1; |
---|
833 | |
---|
834 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
835 | |
---|
836 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank+1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank+1)/2) > 1.e-10) ) || |
---|
837 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
838 | (op == MPI_MAX && (abs(recvbuf[0] - rank) > 1.e-10 || abs(recvbuf[1] - rank) > 1.e-10) ) ) |
---|
839 | { |
---|
840 | scan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
841 | } |
---|
842 | |
---|
843 | int scan_result; |
---|
844 | MPI_Reduce(&scan_test, &scan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
845 | |
---|
846 | if(rank == 0 && scan_result) printf(" \t test MPI_Scan \t\t OK\n"); |
---|
847 | if(rank == 0 && !scan_result) printf(" \t test MPI_Scan \t\t FAILED\n"); |
---|
848 | } |
---|
849 | |
---|
850 | |
---|
851 | // TEST OF EXSCAN |
---|
852 | { |
---|
853 | |
---|
854 | std::vector<int>sendbuf(2, rank); |
---|
855 | std::vector<int>recvbuf(2, -1); |
---|
856 | |
---|
857 | MPI_Op op = MPI_SUM; |
---|
858 | |
---|
859 | |
---|
860 | MPI_Exscan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
861 | |
---|
862 | int exscan_test = 1; |
---|
863 | |
---|
864 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
865 | |
---|
866 | if(rank >0) |
---|
867 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank-1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank-1)/2) > 1.e-10) ) || |
---|
868 | (op == MPI_MIN && (abs(recvbuf[0] ) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
869 | (op == MPI_MAX && (abs(recvbuf[0] - rank+1) > 1.e-10 || abs(recvbuf[1] - rank+1) > 1.e-10) ) ) |
---|
870 | { |
---|
871 | exscan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
872 | } |
---|
873 | |
---|
874 | int exscan_result; |
---|
875 | MPI_Reduce(&exscan_test, &exscan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
876 | |
---|
877 | if(rank == 0 && exscan_result) printf(" \t test MPI_Exscan \t OK\n"); |
---|
878 | if(rank == 0 && !exscan_result) printf(" \t test MPI_Exscan \t FAILED\n"); |
---|
879 | } |
---|
880 | |
---|
881 | |
---|
882 | |
---|
883 | // TEST OF COMM_SPLIT |
---|
884 | { |
---|
885 | |
---|
886 | MPI_Barrier(comm); |
---|
887 | int rank, size; |
---|
888 | MPI_Comm_rank(comm, &rank); |
---|
889 | MPI_Comm_size(comm, &size); |
---|
890 | |
---|
891 | int color = rand()%3; |
---|
892 | int key = rand()%5; |
---|
893 | |
---|
894 | int color2 = rand()%3; |
---|
895 | int key2 = rand()%5; |
---|
896 | |
---|
897 | MPI_Comm split_comm; |
---|
898 | MPI_Comm_split(comm, color, key, &split_comm); |
---|
899 | |
---|
900 | |
---|
901 | MPI_Comm split_comm2; |
---|
902 | MPI_Comm_split(comm, color2, key2, &split_comm2); |
---|
903 | |
---|
904 | |
---|
905 | |
---|
906 | int split_rank, split_size; |
---|
907 | MPI_Comm_rank(split_comm, &split_rank); |
---|
908 | MPI_Comm_size(split_comm, &split_size); |
---|
909 | |
---|
910 | #ifdef _Memory_check |
---|
911 | printf("rank = %d, color = %d, key = %d, split_rank = %d, local_rank=%d\n", rank, color, key, split_rank, split_comm->ep_comm_ptr->size_rank_info[1].first); |
---|
912 | #endif |
---|
913 | |
---|
914 | MPI_Barrier(comm); |
---|
915 | MPI_Barrier(comm); |
---|
916 | MPI_Barrier(comm); |
---|
917 | |
---|
918 | if(rank == 0) printf(" \t MPI_Comm_split \t OK\n"); |
---|
919 | |
---|
920 | MPI_Barrier(comm); |
---|
921 | MPI_Barrier(comm); |
---|
922 | |
---|
923 | int bcast_buf_size=100; |
---|
924 | |
---|
925 | std::vector<int> bcast_buf(bcast_buf_size, 0); |
---|
926 | |
---|
927 | if(split_rank==0) bcast_buf.assign(bcast_buf_size, (color+1)*split_size); |
---|
928 | |
---|
929 | MPI_Bcast(bcast_buf.data(), bcast_buf_size, MPI_INT, 0, split_comm); |
---|
930 | |
---|
931 | int bcast_test = 0; |
---|
932 | for(int i=0; i<bcast_buf_size; i++) |
---|
933 | { |
---|
934 | if(bcast_buf[i] != (color+1)*split_size) |
---|
935 | bcast_test = 1; |
---|
936 | } |
---|
937 | |
---|
938 | int bcast_result; |
---|
939 | |
---|
940 | MPI_Reduce(&bcast_test, &bcast_result, 1, MPI_INT, MPI_SUM, 0, comm); |
---|
941 | |
---|
942 | MPI_Barrier(split_comm); |
---|
943 | MPI_Comm_free(&split_comm); |
---|
944 | |
---|
945 | MPI_Barrier(split_comm2); |
---|
946 | MPI_Comm_free(&split_comm2); |
---|
947 | |
---|
948 | |
---|
949 | |
---|
950 | if(bcast_result == 0 && rank == 0) printf(" \t test MPI_Bcast for split comm\t OK\n"); |
---|
951 | if(bcast_result != 0 && rank == 0) printf(" \t test MPI_Bcast for split comm\t FAILED %d\n", bcast_result); |
---|
952 | |
---|
953 | } |
---|
954 | |
---|
955 | MPI_Barrier(comm); |
---|
956 | MPI_Barrier(comm); |
---|
957 | MPI_Barrier(comm); |
---|
958 | |
---|
959 | // TESE OF INTERCOMM_CREATE |
---|
960 | { |
---|
961 | MPI_Barrier(comm); |
---|
962 | |
---|
963 | int rank, size; |
---|
964 | MPI_Comm_rank(comm, &rank); |
---|
965 | MPI_Comm_size(comm, &size); |
---|
966 | |
---|
967 | int config = 1; |
---|
968 | if(config=1) |
---|
969 | { |
---|
970 | assert(size == 16 && omp_get_num_threads()==4); |
---|
971 | } |
---|
972 | |
---|
973 | int tab_color[16] = {2, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1}; // used for config 1 |
---|
974 | int tab_key[16] = {2, 1, 4, 1, 0, 4, 3, 3, 4, 3, 4, 4, 4, 0, 0, 2}; // used for config 1 |
---|
975 | |
---|
976 | int color = tab_color[rank]; |
---|
977 | int key = tab_key[rank]; |
---|
978 | |
---|
979 | |
---|
980 | MPI_Comm split_comm; |
---|
981 | MPI_Comm_split(comm, color, key, &split_comm); |
---|
982 | |
---|
983 | |
---|
984 | int split_rank, split_size; |
---|
985 | MPI_Comm_rank(split_comm, &split_rank); |
---|
986 | MPI_Comm_size(split_comm, &split_size); |
---|
987 | |
---|
988 | |
---|
989 | |
---|
990 | |
---|
991 | MPI_Barrier(comm); |
---|
992 | MPI_Barrier(comm); |
---|
993 | |
---|
994 | if(rank == 0) printf(" \t MPI_Comm_split \t OK\n"); |
---|
995 | |
---|
996 | MPI_Barrier(comm); |
---|
997 | MPI_Barrier(comm); |
---|
998 | |
---|
999 | |
---|
1000 | |
---|
1001 | |
---|
1002 | |
---|
1003 | int local_leader = 0; |
---|
1004 | int remote_leader = color==2? 13: 4; // used for config 1 |
---|
1005 | |
---|
1006 | MPI_Comm peer_comm = comm; |
---|
1007 | |
---|
1008 | MPI_Comm inter_comm; |
---|
1009 | MPI_Intercomm_create(split_comm, local_leader, peer_comm, remote_leader, 99, &inter_comm); |
---|
1010 | |
---|
1011 | int inter_rank, inter_size, remote_size; |
---|
1012 | MPI_Comm_rank(inter_comm, &inter_rank); |
---|
1013 | MPI_Comm_size(inter_comm, &inter_size); |
---|
1014 | MPI_Comm_remote_size(inter_comm, &remote_size); |
---|
1015 | |
---|
1016 | |
---|
1017 | MPI_Barrier(comm); |
---|
1018 | MPI_Barrier(comm); |
---|
1019 | |
---|
1020 | if(rank == 0) printf(" \t MPI_Intercomm_create \t OK\n"); |
---|
1021 | |
---|
1022 | printf("rank = %d, split_rank = %d, split_size = %d, inter_rank=%d, inter_size=%d, remote_size=%d\n", rank, split_rank, split_size, inter_rank, inter_size, remote_size); |
---|
1023 | |
---|
1024 | |
---|
1025 | MPI_Barrier(comm); |
---|
1026 | MPI_Barrier(comm); |
---|
1027 | |
---|
1028 | |
---|
1029 | if(color==2 && split_rank==0) |
---|
1030 | { |
---|
1031 | double sendbuf[9]={1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}; |
---|
1032 | MPI_Request send_request; |
---|
1033 | MPI_Status send_status; |
---|
1034 | MPI_Isend(sendbuf, 9, MPI_DOUBLE, 0, 10, inter_comm, &send_request); |
---|
1035 | MPI_Wait(&send_request, &send_status); |
---|
1036 | } |
---|
1037 | |
---|
1038 | if(color==1 && split_rank==0) |
---|
1039 | { |
---|
1040 | double recvbuf[9]; |
---|
1041 | MPI_Request recv_request; |
---|
1042 | MPI_Status recv_status; |
---|
1043 | MPI_Irecv(recvbuf, 9, MPI_DOUBLE, 0, 10, inter_comm, &recv_request); |
---|
1044 | MPI_Wait(&recv_request, &recv_status); |
---|
1045 | for(int i=0; i<9; i++) |
---|
1046 | { |
---|
1047 | printf("recvbuf[%d] = %lf\n", i, recvbuf[i]); |
---|
1048 | } |
---|
1049 | } |
---|
1050 | |
---|
1051 | MPI_Barrier(comm); |
---|
1052 | MPI_Barrier(comm); |
---|
1053 | |
---|
1054 | if(rank == 0) printf(" \t Test iP2P for intercomm \t OK\n"); |
---|
1055 | |
---|
1056 | |
---|
1057 | |
---|
1058 | |
---|
1059 | MPI_Barrier(comm); |
---|
1060 | MPI_Barrier(comm); |
---|
1061 | |
---|
1062 | MPI_Barrier(split_comm); |
---|
1063 | MPI_Comm_free(&split_comm); |
---|
1064 | |
---|
1065 | |
---|
1066 | MPI_Barrier(comm); |
---|
1067 | MPI_Barrier(comm); |
---|
1068 | |
---|
1069 | MPI_Comm inter_comm_dup; |
---|
1070 | MPI_Comm_dup(inter_comm, &inter_comm_dup); |
---|
1071 | |
---|
1072 | int inter_comm_dup_size; |
---|
1073 | int inter_comm_dup_remote_size; |
---|
1074 | MPI_Comm_size(inter_comm_dup, &inter_comm_dup_size); |
---|
1075 | |
---|
1076 | MPI_Comm_remote_size(inter_comm_dup, &inter_comm_dup_remote_size); |
---|
1077 | |
---|
1078 | bool high = inter_comm_dup_size>inter_comm_dup_remote_size; |
---|
1079 | |
---|
1080 | |
---|
1081 | printf("rank = %d, high = %d, inter_comm_dup_size = %d, inter_comm_dup_remote_size = %d\n", rank, high, inter_comm_dup_size, inter_comm_dup_remote_size); |
---|
1082 | |
---|
1083 | MPI_Comm inter_comm_dup_merged; |
---|
1084 | |
---|
1085 | MPI_Intercomm_merge(inter_comm_dup, high, &inter_comm_dup_merged); |
---|
1086 | |
---|
1087 | int inter_comm_dup_merged_rank; |
---|
1088 | MPI_Comm_rank(inter_comm_dup_merged, &inter_comm_dup_merged_rank); |
---|
1089 | |
---|
1090 | int inter_comm_dup_merged_size; |
---|
1091 | MPI_Comm_size(inter_comm_dup_merged, &inter_comm_dup_merged_size); |
---|
1092 | |
---|
1093 | //printf("rank = %d, inter_comm_dup_merged_rank = %d, inter_comm_dup_merged_size = %d\n", rank, inter_comm_dup_merged_rank, inter_comm_dup_merged_size); |
---|
1094 | |
---|
1095 | |
---|
1096 | MPI_Barrier(inter_comm_dup_merged); |
---|
1097 | |
---|
1098 | // TEST OF GATHER |
---|
1099 | { |
---|
1100 | int inter_comm_dup_merged_rank; |
---|
1101 | MPI_Comm_rank(inter_comm_dup_merged, &inter_comm_dup_merged_rank); |
---|
1102 | |
---|
1103 | int inter_comm_dup_merged_size; |
---|
1104 | MPI_Comm_size(inter_comm_dup_merged, &inter_comm_dup_merged_size); |
---|
1105 | |
---|
1106 | |
---|
1107 | |
---|
1108 | int gather_root = 99; |
---|
1109 | |
---|
1110 | if(inter_comm_dup_merged_rank == 0) |
---|
1111 | { |
---|
1112 | gather_root = rand() % inter_comm_dup_merged_size; |
---|
1113 | } |
---|
1114 | |
---|
1115 | |
---|
1116 | |
---|
1117 | MPI_Bcast(&gather_root, 1, MPI_INT, 0, inter_comm_dup_merged); |
---|
1118 | |
---|
1119 | printf("rank = %d, inter_comm_dup_merged_rank = %d, inter_comm_dup_merged_size = %d, gather_root = %d\n", rank, inter_comm_dup_merged_rank, inter_comm_dup_merged_size, gather_root); |
---|
1120 | |
---|
1121 | |
---|
1122 | double sendbuf[2]; |
---|
1123 | sendbuf[0] = inter_comm_dup_merged_rank * 1.0; |
---|
1124 | sendbuf[1] = inter_comm_dup_merged_size * (-1.0); |
---|
1125 | |
---|
1126 | std::vector<double>recvbuf(2*inter_comm_dup_merged_size, 0); |
---|
1127 | |
---|
1128 | MPI_Gather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, gather_root, inter_comm_dup_merged); |
---|
1129 | |
---|
1130 | bool gather_result = true; |
---|
1131 | |
---|
1132 | if(inter_comm_dup_merged_rank == gather_root) |
---|
1133 | { |
---|
1134 | for(int i=0; i<inter_comm_dup_merged_size; i++) |
---|
1135 | { |
---|
1136 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + inter_comm_dup_merged_size) > 1.e-10) |
---|
1137 | { |
---|
1138 | gather_result = false; |
---|
1139 | break; |
---|
1140 | } |
---|
1141 | } |
---|
1142 | |
---|
1143 | if(gather_result) printf("root = %d : \t test MPI_Gather for merged comm\t OK \n", gather_root); |
---|
1144 | else printf("root = %d : \t test MPI_Gather for merged comm\t FAILED\n", gather_root); |
---|
1145 | } |
---|
1146 | } |
---|
1147 | |
---|
1148 | MPI_Barrier(inter_comm_dup_merged); |
---|
1149 | |
---|
1150 | // TEST OF ALLREDUCE |
---|
1151 | { |
---|
1152 | |
---|
1153 | int sendbuf[2]; |
---|
1154 | sendbuf[0] = inter_comm_dup_merged_rank; |
---|
1155 | sendbuf[1] = -inter_comm_dup_merged_size; |
---|
1156 | |
---|
1157 | std::vector<int>recvbuf(2, 0); |
---|
1158 | |
---|
1159 | MPI_Op op = MPI_MIN; |
---|
1160 | |
---|
1161 | MPI_Allreduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, inter_comm_dup_merged); |
---|
1162 | |
---|
1163 | |
---|
1164 | int allreduce_test = 1; |
---|
1165 | |
---|
1166 | |
---|
1167 | if((op == MPI_SUM && (abs(recvbuf[0]-(inter_comm_dup_merged_size-1)*inter_comm_dup_merged_size/2) > 1.e-10 || abs(recvbuf[1] + inter_comm_dup_merged_size * inter_comm_dup_merged_size) > 1.e-10) ) || |
---|
1168 | (op == MPI_MAX && (abs(recvbuf[0]-(inter_comm_dup_merged_size-1)) > 1.e-10 || abs(recvbuf[1] + inter_comm_dup_merged_size) > 1.e-10) ) || |
---|
1169 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + inter_comm_dup_merged_size) > 1.e-10) ) ) |
---|
1170 | { |
---|
1171 | allreduce_test = 0; printf("%d %d\n", recvbuf[0], recvbuf[1]); |
---|
1172 | } |
---|
1173 | |
---|
1174 | |
---|
1175 | int allreduce_result; |
---|
1176 | MPI_Reduce(&allreduce_test, &allreduce_result, 1, MPI_INT, MPI_MIN, 0, inter_comm_dup_merged); |
---|
1177 | |
---|
1178 | if(inter_comm_dup_merged_rank == 0 && allreduce_result) printf(" \t test MPI_Allreduce for merged comm \t OK\n"); |
---|
1179 | if(inter_comm_dup_merged_rank == 0 && !allreduce_result) printf(" \t test MPI_Allreduce for merged comm \t FAILED\n"); |
---|
1180 | |
---|
1181 | } |
---|
1182 | |
---|
1183 | MPI_Barrier(inter_comm_dup_merged); |
---|
1184 | |
---|
1185 | // TEST OF EXSCAN |
---|
1186 | { |
---|
1187 | |
---|
1188 | std::vector<int>sendbuf(2, inter_comm_dup_merged_rank); |
---|
1189 | std::vector<int>recvbuf(2, -1); |
---|
1190 | |
---|
1191 | MPI_Op op = MPI_SUM; |
---|
1192 | |
---|
1193 | |
---|
1194 | MPI_Exscan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, inter_comm_dup_merged); |
---|
1195 | |
---|
1196 | int exscan_test = 1; |
---|
1197 | |
---|
1198 | if(inter_comm_dup_merged_rank >0) |
---|
1199 | if((op == MPI_SUM && (abs(recvbuf[0]-inter_comm_dup_merged_rank*(inter_comm_dup_merged_rank-1)/2) > 1.e-10 || abs(recvbuf[1]-inter_comm_dup_merged_rank*(inter_comm_dup_merged_rank-1)/2) > 1.e-10) ) || |
---|
1200 | (op == MPI_MIN && (abs(recvbuf[0] ) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
1201 | (op == MPI_MAX && (abs(recvbuf[0] - inter_comm_dup_merged_rank+1) > 1.e-10 || abs(recvbuf[1] - inter_comm_dup_merged_rank+1) > 1.e-10) ) ) |
---|
1202 | { |
---|
1203 | exscan_test = 0; |
---|
1204 | } |
---|
1205 | |
---|
1206 | int exscan_result; |
---|
1207 | MPI_Reduce(&exscan_test, &exscan_result, 1, MPI_INT, MPI_MIN, 0, inter_comm_dup_merged); |
---|
1208 | |
---|
1209 | if(inter_comm_dup_merged_rank == 0 && exscan_result) printf(" \t test MPI_Exscan for merged comm \t OK\n"); |
---|
1210 | if(inter_comm_dup_merged_rank == 0 && !exscan_result) printf(" \t test MPI_Exscan for merged comm \t FAILED %d\n", exscan_result); |
---|
1211 | } |
---|
1212 | |
---|
1213 | MPI_Barrier(inter_comm_dup_merged); |
---|
1214 | |
---|
1215 | // TEST OF SCATTERV |
---|
1216 | { |
---|
1217 | |
---|
1218 | int scatterv_root; |
---|
1219 | |
---|
1220 | if(inter_comm_dup_merged_rank == 0) scatterv_root = rand() % inter_comm_dup_merged_size; |
---|
1221 | |
---|
1222 | MPI_Bcast(&scatterv_root, 1, MPI_INT, 0, inter_comm_dup_merged); |
---|
1223 | |
---|
1224 | std::vector<int>sendbuf(2*inter_comm_dup_merged_size, inter_comm_dup_merged_rank); |
---|
1225 | std::vector<int>recvbuf(2, -1); |
---|
1226 | std::vector<int>sendcounts(inter_comm_dup_merged_size, 2); |
---|
1227 | std::vector<int>displs(inter_comm_dup_merged_size, 0); |
---|
1228 | |
---|
1229 | for(int i=0; i<inter_comm_dup_merged_size; i++) displs[i] = 2*(inter_comm_dup_merged_size-1-i); |
---|
1230 | |
---|
1231 | if(inter_comm_dup_merged_rank == scatterv_root) |
---|
1232 | { |
---|
1233 | for(int i=0; i<inter_comm_dup_merged_size; i++) |
---|
1234 | { |
---|
1235 | sendbuf[2*i] = i; |
---|
1236 | sendbuf[2*i+1] = inter_comm_dup_merged_size; |
---|
1237 | } |
---|
1238 | } |
---|
1239 | |
---|
1240 | |
---|
1241 | MPI_Scatterv(sendbuf.data(), sendcounts.data(), displs.data(), MPI_INT, recvbuf.data(), 2, MPI_INT, scatterv_root, inter_comm_dup_merged); |
---|
1242 | |
---|
1243 | int scatterv_test = 1; |
---|
1244 | |
---|
1245 | |
---|
1246 | if( abs(recvbuf[0]-(inter_comm_dup_merged_size-1-inter_comm_dup_merged_rank)) > 1.e-10 || abs(recvbuf[1]-inter_comm_dup_merged_size) > 1.e-10 ) |
---|
1247 | { |
---|
1248 | scatterv_test = 0; printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], inter_comm_dup_merged_rank); |
---|
1249 | } |
---|
1250 | |
---|
1251 | |
---|
1252 | int scatterv_result; |
---|
1253 | MPI_Reduce(&scatterv_test, &scatterv_result, 1, MPI_INT, MPI_MIN, scatterv_root, inter_comm_dup_merged); |
---|
1254 | |
---|
1255 | if(inter_comm_dup_merged_rank == scatterv_root && scatterv_result) printf("root = %d : \t test MPI_Scatterv for merged comm \t OK\n", scatterv_root); |
---|
1256 | if(inter_comm_dup_merged_rank == scatterv_root && !scatterv_result) printf("root = %d : \t test MPI_Scatterv for merged comm \t FAILED\n", scatterv_root); |
---|
1257 | } |
---|
1258 | |
---|
1259 | MPI_Barrier(inter_comm_dup_merged); |
---|
1260 | |
---|
1261 | // TEST OF TESTALL |
---|
1262 | { |
---|
1263 | int merged_size, merged_rank; |
---|
1264 | MPI_Comm_rank(inter_comm_dup_merged, &merged_rank); |
---|
1265 | MPI_Comm_size(inter_comm_dup_merged, &merged_size); |
---|
1266 | |
---|
1267 | int left = merged_rank>0? merged_rank-1 : merged_size-1; |
---|
1268 | int right = (merged_rank+1) % merged_size; |
---|
1269 | |
---|
1270 | int N=10000; |
---|
1271 | int *left_buf = new int[N]; |
---|
1272 | int *right_buf = new int[N]; |
---|
1273 | |
---|
1274 | for(int i=0; i<N; i++) |
---|
1275 | { |
---|
1276 | right_buf[i] = merged_rank*100000+i; |
---|
1277 | } |
---|
1278 | |
---|
1279 | MPI_Request request[2]; |
---|
1280 | MPI_Status status[2]; |
---|
1281 | |
---|
1282 | MPI_Irecv(left_buf, N, MPI_INT, left, 0, inter_comm_dup_merged, &request[0]); |
---|
1283 | MPI_Isend(right_buf, N, MPI_INT, right, 0, inter_comm_dup_merged, &request[1]); |
---|
1284 | |
---|
1285 | |
---|
1286 | int flag; |
---|
1287 | MPI_Testall(2, request, &flag, status); |
---|
1288 | |
---|
1289 | printf("rank = %d, flag = %d\n", merged_rank, flag); |
---|
1290 | |
---|
1291 | while(!flag) MPI_Testall(2, request, &flag, status); |
---|
1292 | |
---|
1293 | int recv_count; |
---|
1294 | MPI_Get_count(&status[0], MPI_INT, &recv_count); |
---|
1295 | |
---|
1296 | MPI_Barrier(inter_comm_dup_merged); |
---|
1297 | |
---|
1298 | printf("rank = %d, recv_count = %d, left_buf[5-10] = %d\t%d\t%d\t%d\t%d\t%d\n", merged_rank, recv_count, left_buf[5], left_buf[6], left_buf[7], left_buf[8], left_buf[9], left_buf[10]); |
---|
1299 | |
---|
1300 | delete[] left_buf; |
---|
1301 | delete[] right_buf; |
---|
1302 | } |
---|
1303 | |
---|
1304 | MPI_Barrier(inter_comm_dup_merged); |
---|
1305 | |
---|
1306 | // TEST WAITALL |
---|
1307 | { |
---|
1308 | int merged_rank; |
---|
1309 | int merged_size; |
---|
1310 | |
---|
1311 | MPI_Comm_rank(inter_comm_dup_merged, &merged_rank); |
---|
1312 | MPI_Comm_size(inter_comm_dup_merged, &merged_size); |
---|
1313 | |
---|
1314 | int left = merged_rank>0? merged_rank-1 : merged_size-1; |
---|
1315 | int right = (merged_rank+1)%merged_size; |
---|
1316 | |
---|
1317 | printf("merged_rank = %d, left = %d, right = %d\n", merged_rank, left, right); |
---|
1318 | |
---|
1319 | int NN=10000; |
---|
1320 | |
---|
1321 | double *left_buf = new double[NN]; |
---|
1322 | double *right_buf = new double[NN]; |
---|
1323 | |
---|
1324 | for(int i=0; i<NN; i++) |
---|
1325 | { |
---|
1326 | right_buf[i] = merged_rank*1000000 + i; |
---|
1327 | } |
---|
1328 | |
---|
1329 | MPI_Request request[2]; |
---|
1330 | MPI_Status status[2]; |
---|
1331 | |
---|
1332 | MPI_Irecv(left_buf, NN, MPI_DOUBLE, left, 0, inter_comm_dup_merged, &request[0]); |
---|
1333 | MPI_Isend(right_buf, NN, MPI_DOUBLE, right, 0, inter_comm_dup_merged, &request[1]); |
---|
1334 | |
---|
1335 | |
---|
1336 | MPI_Waitall(2, request, status); |
---|
1337 | |
---|
1338 | printf("merged_rank = %d, left_buf[0-4] = %lf\t%lf\t%lf\t%lf\t%lf\n", merged_rank, left_buf[0], left_buf[1], left_buf[2], left_buf[3], left_buf[4]); |
---|
1339 | |
---|
1340 | |
---|
1341 | delete left_buf; |
---|
1342 | delete right_buf; |
---|
1343 | } |
---|
1344 | |
---|
1345 | |
---|
1346 | MPI_Barrier(comm); |
---|
1347 | MPI_Barrier(comm); |
---|
1348 | |
---|
1349 | |
---|
1350 | MPI_Comm_free(&inter_comm_dup); |
---|
1351 | |
---|
1352 | MPI_Barrier(comm); |
---|
1353 | MPI_Barrier(comm); |
---|
1354 | |
---|
1355 | |
---|
1356 | MPI_Comm_free(&inter_comm_dup_merged); |
---|
1357 | |
---|
1358 | |
---|
1359 | |
---|
1360 | MPI_Barrier(comm); |
---|
1361 | MPI_Barrier(comm); |
---|
1362 | |
---|
1363 | MPI_Barrier(inter_comm); |
---|
1364 | MPI_Comm_free(&inter_comm); |
---|
1365 | |
---|
1366 | } |
---|
1367 | |
---|
1368 | // TESE OF INTERCOMM_CREATE |
---|
1369 | { |
---|
1370 | MPI_Barrier(comm); |
---|
1371 | |
---|
1372 | int rank, size; |
---|
1373 | MPI_Comm_rank(comm, &rank); |
---|
1374 | MPI_Comm_size(comm, &size); |
---|
1375 | |
---|
1376 | int config = 1; |
---|
1377 | if(config=1) |
---|
1378 | { |
---|
1379 | assert(size == 16 && omp_get_num_threads()==4); |
---|
1380 | } |
---|
1381 | |
---|
1382 | int tab_color[16] = {2, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 1}; // used for config 1 |
---|
1383 | int tab_key[16] = {2, 1, 4, 1, 0, 4, 3, 3, 4, 3, 4, 4, 4, 0, 0, 2}; // used for config 1 |
---|
1384 | |
---|
1385 | int color = tab_color[rank]; |
---|
1386 | int key = tab_key[rank]; |
---|
1387 | |
---|
1388 | |
---|
1389 | MPI_Comm split_comm; |
---|
1390 | MPI_Comm_split(comm, color, key, &split_comm); |
---|
1391 | |
---|
1392 | |
---|
1393 | int split_rank, split_size; |
---|
1394 | MPI_Comm_rank(split_comm, &split_rank); |
---|
1395 | MPI_Comm_size(split_comm, &split_size); |
---|
1396 | |
---|
1397 | |
---|
1398 | |
---|
1399 | |
---|
1400 | MPI_Barrier(comm); |
---|
1401 | MPI_Barrier(comm); |
---|
1402 | |
---|
1403 | if(rank == 0) printf(" \t MPI_Comm_split \t OK\n"); |
---|
1404 | |
---|
1405 | MPI_Barrier(comm); |
---|
1406 | MPI_Barrier(comm); |
---|
1407 | |
---|
1408 | |
---|
1409 | int local_leader = 5; |
---|
1410 | int remote_leader = color==2? 5: 2; // used for config 1 |
---|
1411 | |
---|
1412 | MPI_Comm peer_comm = comm; |
---|
1413 | |
---|
1414 | MPI_Comm inter_comm; |
---|
1415 | MPI_Intercomm_create(split_comm, local_leader, peer_comm, remote_leader, 99, &inter_comm); |
---|
1416 | |
---|
1417 | int inter_rank; |
---|
1418 | int inter_size; |
---|
1419 | MPI_Comm_rank(inter_comm, &inter_rank); |
---|
1420 | MPI_Comm_size(inter_comm, &inter_size); |
---|
1421 | |
---|
1422 | printf("split_rank = %d, inter_rank = %d / %d\n", split_rank, inter_rank, inter_size); |
---|
1423 | |
---|
1424 | MPI_Barrier(split_comm); |
---|
1425 | MPI_Comm_free(&split_comm); |
---|
1426 | |
---|
1427 | MPI_Barrier(inter_comm); |
---|
1428 | MPI_Comm_free(&inter_comm); |
---|
1429 | |
---|
1430 | } |
---|
1431 | |
---|
1432 | MPI_Barrier(comm); |
---|
1433 | MPI_Comm_free(&comm); |
---|
1434 | } |
---|
1435 | |
---|
1436 | /* |
---|
1437 | int num_threads; |
---|
1438 | if(mpi_rank < mpi_size-2) |
---|
1439 | { |
---|
1440 | printf("Proc %d is client\n", mpi_rank); |
---|
1441 | num_threads = 2;//+mpi_rank; |
---|
1442 | } |
---|
1443 | else |
---|
1444 | { |
---|
1445 | printf("Proc %d is server\n", mpi_rank); |
---|
1446 | num_threads = 1; |
---|
1447 | } |
---|
1448 | |
---|
1449 | omp_set_num_threads(num_threads); |
---|
1450 | |
---|
1451 | #pragma omp parallel default(shared) firstprivate(num_threads) |
---|
1452 | { |
---|
1453 | int num_ep = num_threads; |
---|
1454 | MPI_Info info; |
---|
1455 | |
---|
1456 | //printf("omp_get_thread_num() = %d, omp_get_num_threads() = %d, num_threads = %d\n", omp_get_thread_num(), omp_get_num_threads(), num_threads); |
---|
1457 | MPI_Comm *ep_comm; |
---|
1458 | #pragma omp master |
---|
1459 | { |
---|
1460 | MPI_Comm *ep_comm; |
---|
1461 | MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); |
---|
1462 | passage = ep_comm; |
---|
1463 | } |
---|
1464 | |
---|
1465 | #pragma omp barrier |
---|
1466 | |
---|
1467 | |
---|
1468 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
1469 | comm = passage[omp_get_thread_num()]; |
---|
1470 | |
---|
1471 | int rank, size; |
---|
1472 | MPI_Comm_rank(comm, &rank); |
---|
1473 | MPI_Comm_size(comm, &size); |
---|
1474 | |
---|
1475 | |
---|
1476 | |
---|
1477 | bool isClient = false; |
---|
1478 | bool isServer = false; |
---|
1479 | |
---|
1480 | if(omp_get_num_threads()>1) isClient = true; |
---|
1481 | else isServer = true; |
---|
1482 | |
---|
1483 | printf("mpi_rank = %d, ep_rank = %d, isClient = %d\n", mpi_rank, rank, isClient); |
---|
1484 | |
---|
1485 | MPI_Win ep_win; |
---|
1486 | MPI_Aint buf_size=1; |
---|
1487 | int buf = rank; |
---|
1488 | int local_buf = rank; |
---|
1489 | int result_buf = -1; |
---|
1490 | MPI_Win_create(&buf, buf_size, sizeof(int), info, comm, &ep_win); |
---|
1491 | MPI_Barrier(comm); |
---|
1492 | |
---|
1493 | // MPI_Win_fence(MPI_MODE_NOPRECEDE, ep_win); |
---|
1494 | |
---|
1495 | MPI_Barrier(comm); |
---|
1496 | sleep(0.2); |
---|
1497 | MPI_Barrier(comm); |
---|
1498 | |
---|
1499 | MPI_Win_fence(0, ep_win); |
---|
1500 | |
---|
1501 | if(rank == 0) |
---|
1502 | { |
---|
1503 | local_buf = 99; |
---|
1504 | MPI_Aint displs=0; |
---|
1505 | MPI_Put(&local_buf, 1, MPI_INT, size-1, displs, 1, MPI_INT, ep_win); |
---|
1506 | } |
---|
1507 | |
---|
1508 | if(rank == size-2) |
---|
1509 | { |
---|
1510 | MPI_Aint displs(0); |
---|
1511 | MPI_Get(&local_buf, 1, MPI_INT, 2, displs, 1, MPI_INT, ep_win); |
---|
1512 | } |
---|
1513 | |
---|
1514 | MPI_Win_fence(0, ep_win); |
---|
1515 | |
---|
1516 | if(rank == 1) |
---|
1517 | { |
---|
1518 | MPI_Aint displs=0; |
---|
1519 | MPI_Accumulate(&local_buf, 1, MPI_INT, size-1, displs, 1, MPI_INT, MPI_REPLACE, ep_win); |
---|
1520 | } |
---|
1521 | |
---|
1522 | |
---|
1523 | MPI_Barrier(comm); |
---|
1524 | |
---|
1525 | MPI_Win_fence(0, ep_win); |
---|
1526 | |
---|
1527 | if(rank == 2) |
---|
1528 | { |
---|
1529 | MPI_Aint displs = 0; |
---|
1530 | MPI_Get_accumulate(&local_buf, 1, MPI_INT, &result_buf, |
---|
1531 | 1, MPI_INT, size-2, displs, |
---|
1532 | 1, MPI_INT, MPI_SUM, ep_win); |
---|
1533 | } |
---|
1534 | |
---|
1535 | MPI_Win_fence(0, ep_win); |
---|
1536 | |
---|
1537 | if(rank == 6) |
---|
1538 | { |
---|
1539 | MPI_Aint displs = 0; |
---|
1540 | MPI_Fetch_and_op(&local_buf, &result_buf, MPI_INT, size-1, displs, |
---|
1541 | MPI_SUM, ep_win); |
---|
1542 | } |
---|
1543 | |
---|
1544 | MPI_Win_fence(0, ep_win); |
---|
1545 | |
---|
1546 | if(rank == 7) |
---|
1547 | { |
---|
1548 | MPI_Aint displs = 0; |
---|
1549 | MPI_Compare_and_swap(&local_buf, &buf, &result_buf, MPI_INT, size-1, displs, ep_win); |
---|
1550 | } |
---|
1551 | |
---|
1552 | MPI_Win_fence(0, ep_win); |
---|
1553 | |
---|
1554 | //::MPI_Compare_and_swap(origin_addr, compare_addr, result_addr, to_mpi_type(datatype), target_mpi_rank, to_mpi_aint(target_disp), to_mpi_win(win.server_win[target_local_rank])); |
---|
1555 | |
---|
1556 | MPI_Win ep_win_allocated; |
---|
1557 | int* baseptr = new int[10]; |
---|
1558 | MPI_Aint base_size = 4; |
---|
1559 | |
---|
1560 | MPI_Win_allocate (base_size, sizeof(int), info, comm, baseptr, &ep_win_allocated); |
---|
1561 | |
---|
1562 | MPI_Win_fence(0, ep_win_allocated); |
---|
1563 | |
---|
1564 | MPI_Win_free(&ep_win_allocated); |
---|
1565 | delete[] baseptr; |
---|
1566 | |
---|
1567 | MPI_Win_free(&ep_win); |
---|
1568 | printf("rank = %d, buf = %d, local_buf = %d, result_buf = %d\n", rank, buf, local_buf, result_buf); |
---|
1569 | |
---|
1570 | MPI_Comm_free(&comm); |
---|
1571 | |
---|
1572 | } |
---|
1573 | */ |
---|
1574 | MPI_Finalize(); |
---|
1575 | |
---|
1576 | } |
---|
1577 | |
---|