Changeset 1638 for XIOS/trunk/extern/remap/src/mapper.cpp
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/extern/remap/src/mapper.cpp
r1614 r1638 32 32 33 33 int mpiRank, mpiSize; 34 MPI_Comm_rank(communicator, &mpiRank);35 MPI_Comm_size(communicator, &mpiSize);34 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 35 ep_lib::MPI_Comm_size(communicator, &mpiSize); 36 36 37 37 sourceElements.reserve(nbCells); … … 43 43 long int offset ; 44 44 long int nb=nbCells ; 45 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;45 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 46 46 offset=offset-nb ; 47 47 for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; … … 70 70 71 71 int mpiRank, mpiSize; 72 MPI_Comm_rank(communicator, &mpiRank);73 MPI_Comm_size(communicator, &mpiSize);72 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 73 ep_lib::MPI_Comm_size(communicator, &mpiSize); 74 74 75 75 targetElements.reserve(nbCells); … … 81 81 long int offset ; 82 82 long int nb=nbCells ; 83 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;83 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 84 84 offset=offset-nb ; 85 85 for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; … … 117 117 vector<double> timings; 118 118 int mpiSize, mpiRank; 119 MPI_Comm_size(communicator, &mpiSize);120 MPI_Comm_rank(communicator, &mpiRank);119 ep_lib::MPI_Comm_size(communicator, &mpiSize); 120 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 121 121 122 122 this->buildSSTree(sourceMesh, targetMesh); … … 173 173 { 174 174 int mpiSize, mpiRank; 175 MPI_Comm_size(communicator, &mpiSize);176 MPI_Comm_rank(communicator, &mpiRank);175 ep_lib::MPI_Comm_size(communicator, &mpiSize); 176 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 177 177 178 178 /* create list of intersections (super mesh elements) for each rank */ … … 235 235 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 236 236 int *nbRecvElement = new int[mpiSize]; 237 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator);237 ep_lib::MPI_Alltoall(nbSendElement, 1, EP_INT, nbRecvElement, 1, EP_INT, communicator); 238 238 239 239 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ … … 246 246 Coord **sendGrad = new Coord*[mpiSize]; 247 247 GloId **sendNeighIds = new GloId*[mpiSize]; 248 MPI_Request *sendRequest = newMPI_Request[5*mpiSize];249 MPI_Request *recvRequest = newMPI_Request[5*mpiSize];248 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[5*mpiSize]; 249 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[5*mpiSize]; 250 250 for (int rank = 0; rank < mpiSize; rank++) 251 251 { 252 252 if (nbSendElement[rank] > 0) 253 253 { 254 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);254 ep_lib::MPI_Issend(sendElement[rank], nbSendElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 255 255 nbSendRequest++; 256 256 } … … 271 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 272 } 273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);273 ep_lib::MPI_Irecv(recvElement[rank], nbRecvElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 274 nbRecvRequest++; 275 275 } 276 276 } 277 MPI_Status *status = newMPI_Status[5*mpiSize];277 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[5*mpiSize]; 278 278 279 MPI_Waitall(nbSendRequest, sendRequest, status);280 MPI_Waitall(nbRecvRequest, recvRequest, status);279 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 280 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 281 281 282 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ … … 310 310 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 311 311 } 312 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);312 ep_lib::MPI_Issend(sendValue[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 313 313 nbSendRequest++; 314 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);314 ep_lib::MPI_Issend(sendArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 315 315 nbSendRequest++; 316 MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);316 ep_lib::MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 317 317 nbSendRequest++; 318 318 if (order == 2) 319 319 { 320 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);320 ep_lib::MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 321 321 nbSendRequest++; 322 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);322 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 323 323 //ym --> attention taille GloId 324 324 nbSendRequest++; … … 326 326 else 327 327 { 328 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);328 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 329 329 //ym --> attention taille GloId 330 330 nbSendRequest++; … … 333 333 if (nbSendElement[rank] > 0) 334 334 { 335 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);335 ep_lib::MPI_Irecv(recvValue[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 336 nbRecvRequest++; 337 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);337 ep_lib::MPI_Irecv(recvArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 338 338 nbRecvRequest++; 339 MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);339 ep_lib::MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 340 340 nbRecvRequest++; 341 341 if (order == 2) 342 342 { 343 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1),344 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);343 ep_lib::MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 344 EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 345 345 nbRecvRequest++; 346 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);346 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 347 347 //ym --> attention taille GloId 348 348 nbRecvRequest++; … … 350 350 else 351 351 { 352 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);352 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 353 353 //ym --> attention taille GloId 354 354 nbRecvRequest++; … … 357 357 } 358 358 359 MPI_Waitall(nbSendRequest, sendRequest, status);360 MPI_Waitall(nbRecvRequest, recvRequest, status);359 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 360 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 361 361 362 362 … … 487 487 { 488 488 int mpiSize, mpiRank; 489 MPI_Comm_size(communicator, &mpiSize);490 MPI_Comm_rank(communicator, &mpiRank);489 ep_lib::MPI_Comm_size(communicator, &mpiSize); 490 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 491 491 492 492 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 522 522 } 523 523 524 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);525 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);524 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 525 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 526 526 527 527 char **sendBuffer = new char*[mpiSize]; … … 549 549 int nbSendRequest = 0; 550 550 int nbRecvRequest = 0; 551 MPI_Request *sendRequest = newMPI_Request[mpiSize];552 MPI_Request *recvRequest = newMPI_Request[mpiSize];553 MPI_Status *status = newMPI_Status[mpiSize];551 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 552 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 553 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 554 554 555 555 for (int rank = 0; rank < mpiSize; rank++) … … 557 557 if (nbSendNode[rank] > 0) 558 558 { 559 MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);559 ep_lib::MPI_Issend(sendBuffer[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 560 560 nbSendRequest++; 561 561 } 562 562 if (nbRecvNode[rank] > 0) 563 563 { 564 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);564 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 565 565 nbRecvRequest++; 566 566 } 567 567 } 568 568 569 MPI_Waitall(nbRecvRequest, recvRequest, status);570 MPI_Waitall(nbSendRequest, sendRequest, status);569 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 570 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 571 571 572 572 for (int rank = 0; rank < mpiSize; rank++) … … 615 615 616 616 617 MPI_Barrier(communicator);618 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);619 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);617 ep_lib::MPI_Barrier(communicator); 618 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 619 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 620 620 621 621 for (int rank = 0; rank < mpiSize; rank++) … … 629 629 if (nbSendNode[rank] > 0) 630 630 { 631 MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);631 ep_lib::MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 632 632 nbSendRequest++; 633 633 } 634 634 if (nbRecvNode[rank] > 0) 635 635 { 636 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);636 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 637 637 nbRecvRequest++; 638 638 } 639 639 } 640 640 641 MPI_Waitall(nbRecvRequest, recvRequest, status);642 MPI_Waitall(nbSendRequest, sendRequest, status);641 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 642 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 643 643 644 644 int nbNeighbourNodes = 0; … … 725 725 { 726 726 int mpiSize, mpiRank; 727 MPI_Comm_size(communicator, &mpiSize);728 MPI_Comm_rank(communicator, &mpiRank);729 730 MPI_Barrier(communicator);727 ep_lib::MPI_Comm_size(communicator, &mpiSize); 728 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 729 730 ep_lib::MPI_Barrier(communicator); 731 731 732 732 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 753 753 cout << endl; 754 754 } 755 MPI_Barrier(communicator);755 ep_lib::MPI_Barrier(communicator); 756 756 757 757 int *nbSendNode = new int[mpiSize]; … … 771 771 } 772 772 773 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);774 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);773 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 774 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 775 775 776 776 int total = 0; … … 805 805 int nbSendRequest = 0; 806 806 int nbRecvRequest = 0; 807 MPI_Request *sendRequest = newMPI_Request[mpiSize];808 MPI_Request *recvRequest = newMPI_Request[mpiSize];809 MPI_Status *status = newMPI_Status[mpiSize];807 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 808 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 809 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 810 810 811 811 for (int rank = 0; rank < mpiSize; rank++) … … 813 813 if (nbSendNode[rank] > 0) 814 814 { 815 MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);815 ep_lib::MPI_Issend(sendBuffer[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 816 816 nbSendRequest++; 817 817 } 818 818 if (nbRecvNode[rank] > 0) 819 819 { 820 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);820 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 821 821 nbRecvRequest++; 822 822 } 823 823 } 824 824 825 MPI_Waitall(nbRecvRequest, recvRequest, status);826 MPI_Waitall(nbSendRequest, sendRequest, status);825 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 826 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 827 827 char **sendBuffer2 = new char*[mpiSize]; 828 828 char **recvBuffer2 = new char*[mpiSize]; … … 883 883 884 884 if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; 885 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);885 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 886 886 887 887 for (int rank = 0; rank < mpiSize; rank++) … … 896 896 if (sentMessageSize[rank] > 0) 897 897 { 898 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);898 ep_lib::MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 899 899 nbSendRequest++; 900 900 } 901 901 if (recvMessageSize[rank] > 0) 902 902 { 903 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);903 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 904 904 nbRecvRequest++; 905 905 } 906 906 } 907 907 908 MPI_Waitall(nbRecvRequest, recvRequest, status);909 MPI_Waitall(nbSendRequest, sendRequest, status);908 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 909 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 910 910 911 911 delete [] sendRequest;
Note: See TracChangeset
for help on using the changeset viewer.