1 | #include "ep_lib.hpp" |
---|
2 | #include <mpi.h> |
---|
3 | #include "ep_declaration.hpp" |
---|
4 | #include "ep_mpi.hpp" |
---|
5 | |
---|
6 | using namespace std; |
---|
7 | |
---|
8 | extern std::map<std::pair<int, int>, MPI_Group* > * tag_group_map; |
---|
9 | |
---|
10 | extern std::map<int, std::pair<ep_lib::MPI_Comm*, std::pair<int, int> > > * tag_comm_map; |
---|
11 | |
---|
12 | extern MPI_Group MPI_GROUP_WORLD; |
---|
13 | |
---|
14 | namespace ep_lib |
---|
15 | { |
---|
16 | |
---|
17 | int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) |
---|
18 | { |
---|
19 | if(!local_comm->is_ep) return MPI_Intercomm_create_mpi(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); |
---|
20 | |
---|
21 | int ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first; |
---|
22 | |
---|
23 | |
---|
24 | // check if local leaders are in the same mpi proc |
---|
25 | // by checking their mpi_rank in peer_comm |
---|
26 | |
---|
27 | int mpi_rank_of_leader[2]; |
---|
28 | |
---|
29 | if(ep_rank == local_leader) |
---|
30 | { |
---|
31 | mpi_rank_of_leader[0] = peer_comm->ep_comm_ptr->size_rank_info[2].first; |
---|
32 | mpi_rank_of_leader[1] = peer_comm->ep_rank_map->at(remote_leader).second; |
---|
33 | } |
---|
34 | |
---|
35 | MPI_Bcast(mpi_rank_of_leader, 2, MPI_INT, local_leader, local_comm); |
---|
36 | |
---|
37 | if(mpi_rank_of_leader[0] != mpi_rank_of_leader[1]) |
---|
38 | { |
---|
39 | Debug("calling MPI_Intercomm_create_kernel\n"); |
---|
40 | return MPI_Intercomm_create_endpoint(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); |
---|
41 | } |
---|
42 | else |
---|
43 | { |
---|
44 | printf("local leaders are in the same MPI proc. Routine not yet implemented\n"); |
---|
45 | MPI_Abort(local_comm, 0); |
---|
46 | } |
---|
47 | } |
---|
48 | |
---|
49 | |
---|
50 | |
---|
51 | int MPI_Intercomm_create_endpoint(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) |
---|
52 | { |
---|
53 | int ep_rank, ep_rank_loc, mpi_rank; |
---|
54 | int ep_size, num_ep, mpi_size; |
---|
55 | |
---|
56 | ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first; |
---|
57 | ep_rank_loc = local_comm->ep_comm_ptr->size_rank_info[1].first; |
---|
58 | mpi_rank = local_comm->ep_comm_ptr->size_rank_info[2].first; |
---|
59 | ep_size = local_comm->ep_comm_ptr->size_rank_info[0].second; |
---|
60 | num_ep = local_comm->ep_comm_ptr->size_rank_info[1].second; |
---|
61 | mpi_size = local_comm->ep_comm_ptr->size_rank_info[2].second; |
---|
62 | |
---|
63 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
---|
64 | // step 1 : local leaders exchange ep_size, leader_rank_in_peer, leader_rank_in_peer_mpi, leader_rank_in_world. // |
---|
65 | // local leaders bcast results to all ep in local_comm // |
---|
66 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
---|
67 | |
---|
68 | bool is_local_leader = ep_rank==local_leader? true: false; |
---|
69 | |
---|
70 | |
---|
71 | int local_leader_rank_in_peer; |
---|
72 | int local_leader_rank_in_peer_mpi; |
---|
73 | int local_leader_rank_in_world; |
---|
74 | |
---|
75 | int remote_ep_size; |
---|
76 | int remote_leader_rank_in_peer; |
---|
77 | int remote_leader_rank_in_peer_mpi; |
---|
78 | int remote_leader_rank_in_world; |
---|
79 | |
---|
80 | int send_quadruple[4]; |
---|
81 | int recv_quadruple[4]; |
---|
82 | |
---|
83 | |
---|
84 | if(is_local_leader) |
---|
85 | { |
---|
86 | MPI_Comm_rank(peer_comm, &local_leader_rank_in_peer); |
---|
87 | ::MPI_Comm_rank(to_mpi_comm(peer_comm->mpi_comm), &local_leader_rank_in_peer_mpi); |
---|
88 | ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &local_leader_rank_in_world); |
---|
89 | |
---|
90 | send_quadruple[0] = ep_size; |
---|
91 | send_quadruple[1] = local_leader_rank_in_peer; |
---|
92 | send_quadruple[2] = local_leader_rank_in_peer_mpi; |
---|
93 | send_quadruple[3] = local_leader_rank_in_world; |
---|
94 | |
---|
95 | MPI_Request request; |
---|
96 | MPI_Status status; |
---|
97 | |
---|
98 | |
---|
99 | if(remote_leader > local_leader_rank_in_peer) |
---|
100 | { |
---|
101 | MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
102 | MPI_Wait(&request, &status); |
---|
103 | |
---|
104 | MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
105 | MPI_Wait(&request, &status); |
---|
106 | } |
---|
107 | else |
---|
108 | { |
---|
109 | MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
110 | MPI_Wait(&request, &status); |
---|
111 | |
---|
112 | MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
113 | MPI_Wait(&request, &status); |
---|
114 | } |
---|
115 | |
---|
116 | remote_ep_size = recv_quadruple[0]; |
---|
117 | remote_leader_rank_in_peer = recv_quadruple[1]; |
---|
118 | remote_leader_rank_in_peer_mpi = recv_quadruple[2]; |
---|
119 | remote_leader_rank_in_world = recv_quadruple[3]; |
---|
120 | #ifdef _showinfo |
---|
121 | printf("peer_rank = %d, packed exchange OK\n", local_leader_rank_in_peer); |
---|
122 | #endif |
---|
123 | } |
---|
124 | |
---|
125 | MPI_Bcast(send_quadruple, 4, MPI_INT, local_leader, local_comm); |
---|
126 | MPI_Bcast(recv_quadruple, 4, MPI_INT, local_leader, local_comm); |
---|
127 | |
---|
128 | if(!is_local_leader) |
---|
129 | { |
---|
130 | local_leader_rank_in_peer = send_quadruple[1]; |
---|
131 | local_leader_rank_in_peer_mpi = send_quadruple[2]; |
---|
132 | local_leader_rank_in_world = send_quadruple[3]; |
---|
133 | |
---|
134 | remote_ep_size = recv_quadruple[0]; |
---|
135 | remote_leader_rank_in_peer = recv_quadruple[1]; |
---|
136 | remote_leader_rank_in_peer_mpi = recv_quadruple[2]; |
---|
137 | remote_leader_rank_in_world = recv_quadruple[3]; |
---|
138 | } |
---|
139 | |
---|
140 | |
---|
141 | #ifdef _showinfo |
---|
142 | MPI_Barrier(peer_comm); |
---|
143 | MPI_Barrier(peer_comm); |
---|
144 | printf("peer_rank = %d, ep_size = %d, remote_ep_size = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_size, remote_ep_size); |
---|
145 | MPI_Barrier(peer_comm); |
---|
146 | MPI_Barrier(peer_comm); |
---|
147 | #endif |
---|
148 | |
---|
149 | /////////////////////////////////////////////////////////////////// |
---|
150 | // step 2 : gather ranks in world for both local and remote comm // |
---|
151 | /////////////////////////////////////////////////////////////////// |
---|
152 | |
---|
153 | int rank_in_world; |
---|
154 | ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &rank_in_world); |
---|
155 | |
---|
156 | int *ranks_in_world_local = new int[ep_size]; |
---|
157 | int *ranks_in_world_remote = new int[remote_ep_size]; |
---|
158 | |
---|
159 | MPI_Allgather(&rank_in_world, 1, MPI_INT, ranks_in_world_local, 1, MPI_INT, local_comm); |
---|
160 | |
---|
161 | if(is_local_leader) |
---|
162 | { |
---|
163 | MPI_Request request; |
---|
164 | MPI_Status status; |
---|
165 | |
---|
166 | if(remote_leader > local_leader_rank_in_peer) |
---|
167 | { |
---|
168 | MPI_Isend(ranks_in_world_local, ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
169 | MPI_Wait(&request, &status); |
---|
170 | |
---|
171 | MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
172 | MPI_Wait(&request, &status); |
---|
173 | } |
---|
174 | else |
---|
175 | { |
---|
176 | MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
177 | MPI_Wait(&request, &status); |
---|
178 | |
---|
179 | MPI_Isend(ranks_in_world_local, ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
180 | MPI_Wait(&request, &status); |
---|
181 | } |
---|
182 | #ifdef _showinfo |
---|
183 | printf("peer_rank = %d, ranks_in_world exchange OK\n", local_leader_rank_in_peer); |
---|
184 | #endif |
---|
185 | } |
---|
186 | |
---|
187 | MPI_Bcast(ranks_in_world_remote, remote_ep_size, MPI_INT, local_leader, local_comm); |
---|
188 | |
---|
189 | #ifdef _showinfo |
---|
190 | |
---|
191 | MPI_Barrier(peer_comm); |
---|
192 | MPI_Barrier(peer_comm); |
---|
193 | |
---|
194 | if(remote_leader == 4) |
---|
195 | { |
---|
196 | for(int i=0; i<ep_size; i++) |
---|
197 | { |
---|
198 | if(ep_rank == i) |
---|
199 | { |
---|
200 | printf("peer_rank = %d, ranks_in_world_local = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); |
---|
201 | for(int i=0; i<ep_size; i++) |
---|
202 | { |
---|
203 | printf("%d\t", ranks_in_world_local[i]); |
---|
204 | } |
---|
205 | |
---|
206 | printf("\npeer_rank = %d, ranks_in_world_remote = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); |
---|
207 | for(int i=0; i<remote_ep_size; i++) |
---|
208 | { |
---|
209 | printf("%d\t", ranks_in_world_remote[i]); |
---|
210 | } |
---|
211 | printf("\n"); |
---|
212 | |
---|
213 | } |
---|
214 | |
---|
215 | MPI_Barrier(local_comm); |
---|
216 | MPI_Barrier(local_comm); |
---|
217 | MPI_Barrier(local_comm); |
---|
218 | } |
---|
219 | } |
---|
220 | |
---|
221 | MPI_Barrier(peer_comm); |
---|
222 | MPI_Barrier(peer_comm); |
---|
223 | MPI_Barrier(peer_comm); |
---|
224 | |
---|
225 | if(remote_leader == 13) |
---|
226 | { |
---|
227 | for(int i=0; i<ep_size; i++) |
---|
228 | { |
---|
229 | if(ep_rank == i) |
---|
230 | { |
---|
231 | printf("peer_rank = %d, ranks_in_world_local = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); |
---|
232 | for(int i=0; i<ep_size; i++) |
---|
233 | { |
---|
234 | printf("%d\t", ranks_in_world_local[i]); |
---|
235 | } |
---|
236 | |
---|
237 | printf("\npeer_rank = %d, ranks_in_world_remote = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); |
---|
238 | for(int i=0; i<remote_ep_size; i++) |
---|
239 | { |
---|
240 | printf("%d\t", ranks_in_world_remote[i]); |
---|
241 | } |
---|
242 | printf("\n"); |
---|
243 | |
---|
244 | } |
---|
245 | |
---|
246 | MPI_Barrier(local_comm); |
---|
247 | MPI_Barrier(local_comm); |
---|
248 | MPI_Barrier(local_comm); |
---|
249 | } |
---|
250 | } |
---|
251 | |
---|
252 | MPI_Barrier(peer_comm); |
---|
253 | MPI_Barrier(peer_comm); |
---|
254 | |
---|
255 | #endif |
---|
256 | |
---|
257 | ////////////////////////////////////////////////////////////// |
---|
258 | // step 3 : determine the priority and ownership of each ep // |
---|
259 | ////////////////////////////////////////////////////////////// |
---|
260 | |
---|
261 | bool priority = local_leader_rank_in_peer < remote_leader_rank_in_peer? true : false; |
---|
262 | |
---|
263 | |
---|
264 | int ownership = priority; |
---|
265 | |
---|
266 | if(rank_in_world == ranks_in_world_local[local_leader]) ownership = 1; |
---|
267 | if(rank_in_world == remote_leader_rank_in_world) ownership = 0; |
---|
268 | |
---|
269 | |
---|
270 | #ifdef _showinfo |
---|
271 | MPI_Barrier(peer_comm); |
---|
272 | MPI_Barrier(peer_comm); |
---|
273 | printf("peer_rank = %d, priority = %d, local_leader_rank_in_peer = %d, remote_leader_rank_in_peer = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, local_leader_rank_in_peer, remote_leader_rank_in_peer); |
---|
274 | MPI_Barrier(peer_comm); |
---|
275 | MPI_Barrier(peer_comm); |
---|
276 | #endif |
---|
277 | |
---|
278 | |
---|
279 | #ifdef _showinfo |
---|
280 | MPI_Barrier(peer_comm); |
---|
281 | MPI_Barrier(peer_comm); |
---|
282 | printf("peer_rank = %d, priority = %d, ownership = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, ownership); |
---|
283 | MPI_Barrier(peer_comm); |
---|
284 | MPI_Barrier(peer_comm); |
---|
285 | #endif |
---|
286 | |
---|
287 | ////////////////////////////////////////////////////// |
---|
288 | // step 4 : extract local_comm and create intercomm // |
---|
289 | ////////////////////////////////////////////////////// |
---|
290 | |
---|
291 | bool is_involved = is_local_leader || (!is_local_leader && ep_rank_loc == 0 && rank_in_world != local_leader_rank_in_world); |
---|
292 | |
---|
293 | #ifdef _showinfo |
---|
294 | |
---|
295 | MPI_Barrier(peer_comm); |
---|
296 | MPI_Barrier(peer_comm); |
---|
297 | printf("peer_rank = %d, is_involved = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, is_involved); |
---|
298 | MPI_Barrier(peer_comm); |
---|
299 | MPI_Barrier(peer_comm); |
---|
300 | |
---|
301 | #endif |
---|
302 | |
---|
303 | if(is_involved) |
---|
304 | { |
---|
305 | ::MPI_Group local_group; |
---|
306 | ::MPI_Group extracted_group; |
---|
307 | ::MPI_Comm extracted_comm; |
---|
308 | |
---|
309 | |
---|
310 | ::MPI_Comm_group(to_mpi_comm(local_comm->mpi_comm), &local_group); |
---|
311 | |
---|
312 | int *ownership_list = new int[mpi_size]; |
---|
313 | int *mpi_rank_list = new int[mpi_size]; |
---|
314 | |
---|
315 | ::MPI_Allgather(&ownership, 1, to_mpi_type(MPI_INT), ownership_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm)); |
---|
316 | ::MPI_Allgather(&mpi_rank, 1, to_mpi_type(MPI_INT), mpi_rank_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm)); |
---|
317 | |
---|
318 | |
---|
319 | int n=0; |
---|
320 | for(int i=0; i<mpi_size; i++) |
---|
321 | { |
---|
322 | n+=ownership_list[i]; |
---|
323 | } |
---|
324 | |
---|
325 | int *new_mpi_rank_list = new int[n]; |
---|
326 | int j=0; |
---|
327 | for(int i=0; i<mpi_size; i++) |
---|
328 | { |
---|
329 | if(ownership_list[i] !=0) |
---|
330 | { |
---|
331 | new_mpi_rank_list[j++] = mpi_rank_list[i]; |
---|
332 | } |
---|
333 | } |
---|
334 | |
---|
335 | |
---|
336 | ::MPI_Group_incl(local_group, n, new_mpi_rank_list, &extracted_group); |
---|
337 | |
---|
338 | ::MPI_Comm_create(to_mpi_comm(local_comm->mpi_comm), extracted_group, &extracted_comm); |
---|
339 | |
---|
340 | ::MPI_Comm mpi_inter_comm; |
---|
341 | |
---|
342 | int local_leader_rank_in_extracted_comm; |
---|
343 | |
---|
344 | if(is_local_leader) |
---|
345 | { |
---|
346 | ::MPI_Comm_rank(extracted_comm, &local_leader_rank_in_extracted_comm); |
---|
347 | } |
---|
348 | |
---|
349 | ::MPI_Bcast(&local_leader_rank_in_extracted_comm, 1, to_mpi_type(MPI_INT), local_comm->ep_rank_map->at(local_leader).second, to_mpi_comm(local_comm->mpi_comm)); |
---|
350 | |
---|
351 | ::MPI_Comm *intracomm = new ::MPI_Comm; |
---|
352 | bool is_real_involved = ownership && extracted_comm != to_mpi_comm(MPI_COMM_NULL->mpi_comm); |
---|
353 | |
---|
354 | if(is_real_involved) |
---|
355 | { |
---|
356 | ::MPI_Intercomm_create(extracted_comm, local_leader_rank_in_extracted_comm, to_mpi_comm(peer_comm->mpi_comm), remote_leader_rank_in_peer_mpi, tag, &mpi_inter_comm); |
---|
357 | ::MPI_Intercomm_merge(mpi_inter_comm, !priority, intracomm); |
---|
358 | } |
---|
359 | |
---|
360 | |
---|
361 | |
---|
362 | //////////////////////////////////// |
---|
363 | // step 5 :: determine new num_ep // |
---|
364 | //////////////////////////////////// |
---|
365 | |
---|
366 | int num_ep_count=0; |
---|
367 | |
---|
368 | for(int i=0; i<ep_size; i++) |
---|
369 | { |
---|
370 | if(rank_in_world == ranks_in_world_local[i]) |
---|
371 | num_ep_count++; |
---|
372 | } |
---|
373 | |
---|
374 | for(int i=0; i<remote_ep_size; i++) |
---|
375 | { |
---|
376 | if(rank_in_world == ranks_in_world_remote[i]) |
---|
377 | num_ep_count++; |
---|
378 | } |
---|
379 | |
---|
380 | |
---|
381 | /////////////////////////////////////////////////// |
---|
382 | // step 6 : create endpoints from extracted_comm // |
---|
383 | /////////////////////////////////////////////////// |
---|
384 | |
---|
385 | if(is_real_involved) |
---|
386 | { |
---|
387 | MPI_Comm *ep_comm; |
---|
388 | MPI_Info info; |
---|
389 | MPI_Comm_create_endpoints(intracomm, num_ep_count, info, ep_comm); |
---|
390 | |
---|
391 | #ifdef _showinfo |
---|
392 | printf("new ep_comm->ep_comm_ptr->intercomm->mpi_inter_comm = %p\n", mpi_inter_comm); |
---|
393 | #endif |
---|
394 | |
---|
395 | #pragma omp critical (write_to_tag_list) |
---|
396 | intercomm_list.push_back(make_pair( make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world)) , make_pair(ep_comm , make_pair(num_ep_count, 0)))); |
---|
397 | #pragma omp flush |
---|
398 | #ifdef _showinfo |
---|
399 | for(int i=0; i<num_ep_count; i++) |
---|
400 | printf("peer_rank = %d, ep_comm = %p, ep_comm[%d] -> new_ep_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_comm, i, ep_comm[i]->ep_comm_ptr->size_rank_info[0].first); |
---|
401 | #endif |
---|
402 | ::MPI_Comm_free(intracomm); |
---|
403 | delete intracomm; |
---|
404 | } |
---|
405 | |
---|
406 | |
---|
407 | delete ownership_list; |
---|
408 | delete mpi_rank_list; |
---|
409 | delete new_mpi_rank_list; |
---|
410 | |
---|
411 | } |
---|
412 | |
---|
413 | int repeated=0; |
---|
414 | for(int i=0; i<remote_ep_size; i++) |
---|
415 | { |
---|
416 | if(rank_in_world == ranks_in_world_remote[i]) |
---|
417 | repeated++; |
---|
418 | } |
---|
419 | |
---|
420 | int new_ep_rank_loc = ownership==1? ep_rank_loc : ep_rank_loc+repeated; |
---|
421 | |
---|
422 | #ifdef _showinfo |
---|
423 | |
---|
424 | MPI_Barrier(peer_comm); |
---|
425 | MPI_Barrier(peer_comm); |
---|
426 | printf("peer_rank = %d, ep_rank_loc = %d, ownership = %d, repeated = %d, new_ep_rank_loc = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_rank_loc, ownership, repeated, new_ep_rank_loc); |
---|
427 | MPI_Barrier(peer_comm); |
---|
428 | MPI_Barrier(peer_comm); |
---|
429 | |
---|
430 | #endif |
---|
431 | |
---|
432 | |
---|
433 | #pragma omp flush |
---|
434 | #pragma omp critical (read_from_intercomm_list) |
---|
435 | { |
---|
436 | bool flag=true; |
---|
437 | while(flag) |
---|
438 | { |
---|
439 | for(std::list<std::pair<std::pair<int, int> , std::pair<MPI_Comm * , std::pair<int, int> > > >::iterator iter = intercomm_list.begin(); iter!=intercomm_list.end(); iter++) |
---|
440 | { |
---|
441 | if(iter->first == make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world))) |
---|
442 | { |
---|
443 | *newintercomm = iter->second.first[new_ep_rank_loc]; |
---|
444 | |
---|
445 | iter->second.second.second++; |
---|
446 | |
---|
447 | if(iter->second.second.first == iter->second.second.second) |
---|
448 | intercomm_list.erase(iter); |
---|
449 | |
---|
450 | flag = false; |
---|
451 | break; |
---|
452 | } |
---|
453 | } |
---|
454 | } |
---|
455 | } |
---|
456 | |
---|
457 | |
---|
458 | |
---|
459 | #ifdef _showinfo |
---|
460 | |
---|
461 | MPI_Barrier(peer_comm); |
---|
462 | MPI_Barrier(peer_comm); |
---|
463 | printf("peer_rank = %d, test_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, (*newintercomm)->ep_comm_ptr->size_rank_info[0].first); |
---|
464 | MPI_Barrier(peer_comm); |
---|
465 | MPI_Barrier(peer_comm); |
---|
466 | |
---|
467 | #endif |
---|
468 | |
---|
469 | ////////////////////////////////////////////////////////// |
---|
470 | // step 7 : create intercomm_rank_map for local leaders // |
---|
471 | ////////////////////////////////////////////////////////// |
---|
472 | |
---|
473 | (*newintercomm)->is_intercomm = true; |
---|
474 | |
---|
475 | (*newintercomm)->inter_rank_map = new INTER_RANK_MAP; |
---|
476 | |
---|
477 | |
---|
478 | int rank_info[2]; |
---|
479 | rank_info[0] = ep_rank; |
---|
480 | rank_info[1] = (*newintercomm)->ep_comm_ptr->size_rank_info[0].first; |
---|
481 | |
---|
482 | #ifdef _showinfo |
---|
483 | printf("priority = %d, ep_rank = %d, new_ep_rank = %d\n", priority, rank_info[0], rank_info[1]); |
---|
484 | #endif |
---|
485 | |
---|
486 | int *local_rank_info = new int[2*ep_size]; |
---|
487 | int *remote_rank_info = new int[2*remote_ep_size]; |
---|
488 | |
---|
489 | MPI_Allgather(rank_info, 2, MPI_INT, local_rank_info, 2, MPI_INT, local_comm); |
---|
490 | |
---|
491 | if(is_local_leader) |
---|
492 | { |
---|
493 | MPI_Request request; |
---|
494 | MPI_Status status; |
---|
495 | |
---|
496 | if(priority) |
---|
497 | { |
---|
498 | MPI_Isend(local_rank_info, 2*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
499 | MPI_Wait(&request, &status); |
---|
500 | |
---|
501 | MPI_Irecv(remote_rank_info, 2*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
502 | MPI_Wait(&request, &status); |
---|
503 | } |
---|
504 | else |
---|
505 | { |
---|
506 | MPI_Irecv(remote_rank_info, 2*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
507 | MPI_Wait(&request, &status); |
---|
508 | |
---|
509 | MPI_Isend(local_rank_info, 2*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); |
---|
510 | MPI_Wait(&request, &status); |
---|
511 | } |
---|
512 | } |
---|
513 | |
---|
514 | MPI_Bcast(remote_rank_info, 2*remote_ep_size, MPI_INT, local_leader, local_comm); |
---|
515 | |
---|
516 | for(int i=0; i<remote_ep_size; i++) |
---|
517 | { |
---|
518 | (*newintercomm)->inter_rank_map->insert(make_pair(remote_rank_info[2*i], remote_rank_info[2*i+1])); |
---|
519 | } |
---|
520 | |
---|
521 | (*newintercomm)->ep_comm_ptr->size_rank_info[0] = local_comm->ep_comm_ptr->size_rank_info[0]; |
---|
522 | |
---|
523 | |
---|
524 | delete[] local_rank_info; |
---|
525 | delete[] remote_rank_info; |
---|
526 | delete[] ranks_in_world_local; |
---|
527 | delete[] ranks_in_world_remote; |
---|
528 | /* |
---|
529 | if((*newintercomm)->ep_comm_ptr->size_rank_info[0].second == 1) |
---|
530 | { |
---|
531 | for(INTER_RANK_MAP::iterator it = (*newintercomm)->inter_rank_map->begin(); it != (*newintercomm)->inter_rank_map->end(); it++) |
---|
532 | { |
---|
533 | printf("inter_rank_map[%d] = %d\n", it->first, it->second); |
---|
534 | } |
---|
535 | } |
---|
536 | */ |
---|
537 | |
---|
538 | |
---|
539 | } |
---|
540 | |
---|
541 | |
---|
542 | |
---|
543 | int MPI_Intercomm_create_mpi(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) |
---|
544 | { |
---|
545 | printf("MPI_Intercomm_create_mpi not yet implemented\n"); |
---|
546 | MPI_Abort(local_comm, 0); |
---|
547 | } |
---|
548 | |
---|
549 | } |
---|