Changeset 1638 for XIOS/trunk/src/transformation
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- Location:
- XIOS/trunk/src/transformation
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/transformation/axis_algorithm_interpolate.cpp
r1622 r1638 272 272 273 273 int* recvCount=new int[nbClient]; 274 MPI_Allgather(&numValue,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);274 ep_lib::MPI_Allgather(&numValue,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 275 275 276 276 int* displ=new int[nbClient]; … … 279 279 280 280 // Each client have enough global info of axis 281 MPI_Allgatherv(sendIndexBuff,numValue,MPI_INT,recvIndexBuff,recvCount,displ,MPI_INT,client->intraComm);282 MPI_Allgatherv(sendValueBuff,numValue,MPI_DOUBLE,&(recvBuff[0]),recvCount,displ,MPI_DOUBLE,client->intraComm);281 ep_lib::MPI_Allgatherv(sendIndexBuff,numValue,EP_INT,recvIndexBuff,recvCount,displ,EP_INT,client->intraComm); 282 ep_lib::MPI_Allgatherv(sendValueBuff,numValue,EP_DOUBLE,&(recvBuff[0]),recvCount,displ,EP_DOUBLE,client->intraComm); 283 283 284 284 for (int idx = 0; idx < srcSize; ++idx) -
XIOS/trunk/src/transformation/axis_algorithm_inverse.cpp
r1622 r1638 161 161 sendRankSizeMap[itIndex->first] = sendSize; 162 162 } 163 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);163 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 164 164 165 165 displ[0]=0 ; … … 168 168 int* recvRankBuff=new int[recvSize]; 169 169 int* recvSizeBuff=new int[recvSize]; 170 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);171 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);170 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 171 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 172 172 for (int i = 0; i < nbClient; ++i) 173 173 { … … 181 181 182 182 // Sending global index of grid source to corresponding process as well as the corresponding mask 183 std::vector< MPI_Request> requests;184 std::vector< MPI_Status> status;183 std::vector<ep_lib::MPI_Request> requests; 184 std::vector<ep_lib::MPI_Status> status; 185 185 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 186 186 std::unordered_map<int, double* > sendValueToDest; … … 192 192 sendValueToDest[recvRank] = new double [recvSize]; 193 193 194 requests.push_back( MPI_Request());195 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back());194 requests.push_back(ep_lib::MPI_Request()); 195 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 196 196 } 197 197 … … 214 214 215 215 // Send global index source and mask 216 requests.push_back( MPI_Request());217 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back());216 requests.push_back(ep_lib::MPI_Request()); 217 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 218 218 } 219 219 220 220 status.resize(requests.size()); 221 MPI_Waitall(requests.size(), &requests[0], &status[0]);222 223 224 std::vector< MPI_Request>().swap(requests);225 std::vector< MPI_Status>().swap(status);221 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 222 223 224 std::vector<ep_lib::MPI_Request>().swap(requests); 225 std::vector<ep_lib::MPI_Status>().swap(status); 226 226 227 227 // Okie, on destination side, we will wait for information of masked index of source … … 231 231 int recvSize = itSend->second; 232 232 233 requests.push_back( MPI_Request());234 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back());233 requests.push_back(ep_lib::MPI_Request()); 234 ep_lib::MPI_Irecv(recvValueFromSrc[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 235 235 } 236 236 … … 249 249 } 250 250 // Okie, now inform the destination which source index are masked 251 requests.push_back( MPI_Request());252 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back());251 requests.push_back(ep_lib::MPI_Request()); 252 ep_lib::MPI_Isend(sendValueToDest[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 253 253 } 254 254 status.resize(requests.size()); 255 MPI_Waitall(requests.size(), &requests[0], &status[0]);255 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 256 256 257 257 -
XIOS/trunk/src/transformation/domain_algorithm_generate_rectilinear.cpp
r1622 r1638 70 70 StdSize hashValue = hashFunc.hashVec(globalAxisIndex); 71 71 std::vector<StdSize> recvBuff(client->clientSize); 72 MPI_Gather(&hashValue, 1, MPI_UNSIGNED_LONG,73 &recvBuff[0], 1, MPI_UNSIGNED_LONG,72 ep_lib::MPI_Gather(&hashValue, 1, EP_UNSIGNED_LONG, 73 &recvBuff[0], 1, EP_UNSIGNED_LONG, 74 74 0, 75 75 client->intraComm); … … 87 87 } 88 88 89 MPI_Bcast(&nbLocalAxis[0], nbAxis, MPI_INT,89 ep_lib::MPI_Bcast(&nbLocalAxis[0], nbAxis, EP_INT, 90 90 0, client->intraComm); 91 91 } -
XIOS/trunk/src/transformation/domain_algorithm_interpolate.cpp
r1622 r1638 434 434 CContextClient* client=context->client; 435 435 436 MPI_Comm poleComme(MPI_COMM_NULL); 436 ep_lib::MPI_Comm poleComme(EP_COMM_NULL); 437 #ifdef _usingMPI 437 438 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 438 if (MPI_COMM_NULL != poleComme) 439 #elif _usingEP 440 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 441 #endif 442 if (EP_COMM_NULL != poleComme) 439 443 { 440 444 int nbClientPole; 441 MPI_Comm_size(poleComme, &nbClientPole);445 ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 442 446 443 447 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 450 454 std::vector<int> recvCount(nbClientPole,0); 451 455 std::vector<int> displ(nbClientPole,0); 452 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ;456 ep_lib::MPI_Allgather(&nbWeight,1,EP_INT,&recvCount[0],1,EP_INT,poleComme) ; 453 457 454 458 displ[0]=0; … … 473 477 474 478 // Gather all index and weight for pole 475 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme);476 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme);479 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,EP_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],EP_INT,poleComme); 480 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,EP_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],EP_DOUBLE,poleComme); 477 481 478 482 std::map<int,double> recvTemp; … … 631 635 632 636 633 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm);637 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, EP_INT, EP_SUM, client->intraComm); 634 638 635 639 int* sendIndexDestBuff = new int [sendBuffSize]; … … 637 641 double* sendWeightBuff = new double [sendBuffSize]; 638 642 639 std::vector< MPI_Request> sendRequest;643 std::vector<ep_lib::MPI_Request> sendRequest; 640 644 641 645 int sendOffSet = 0, l = 0; … … 658 662 } 659 663 660 sendRequest.push_back( MPI_Request());661 MPI_Isend(sendIndexDestBuff + sendOffSet,664 sendRequest.push_back(ep_lib::MPI_Request()); 665 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 662 666 k, 663 MPI_INT,667 EP_INT, 664 668 itMap->first, 665 669 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 666 670 client->intraComm, 667 671 &sendRequest.back()); 668 sendRequest.push_back( MPI_Request());669 MPI_Isend(sendIndexSrcBuff + sendOffSet,672 sendRequest.push_back(ep_lib::MPI_Request()); 673 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 670 674 k, 671 MPI_INT,675 EP_INT, 672 676 itMap->first, 673 677 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 674 678 client->intraComm, 675 679 &sendRequest.back()); 676 sendRequest.push_back( MPI_Request());677 MPI_Isend(sendWeightBuff + sendOffSet,680 sendRequest.push_back(ep_lib::MPI_Request()); 681 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 678 682 k, 679 MPI_DOUBLE,683 EP_DOUBLE, 680 684 itMap->first, 681 685 MPI_DOMAIN_INTERPOLATION_WEIGHT, … … 693 697 while (receivedSize < recvBuffSize) 694 698 { 695 MPI_Status recvStatus; 699 ep_lib::MPI_Status recvStatus; 700 #ifdef _usingMPI 696 701 MPI_Recv((recvIndexDestBuff + receivedSize), 697 702 recvBuffSize, 698 MPI_INT,703 EP_INT, 699 704 MPI_ANY_SOURCE, 700 705 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 701 706 client->intraComm, 702 707 &recvStatus); 708 #elif _usingEP 709 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 710 recvBuffSize, 711 EP_INT, 712 -2, 713 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 714 client->intraComm, 715 &recvStatus); 716 #endif 703 717 704 718 int countBuff = 0; 705 MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 719 ep_lib::MPI_Get_count(&recvStatus, EP_INT, &countBuff); 720 #ifdef _usingMPI 706 721 clientSrcRank = recvStatus.MPI_SOURCE; 707 708 MPI_Recv((recvIndexSrcBuff + receivedSize), 722 #elif _usingEP 723 clientSrcRank = recvStatus.ep_src; 724 #endif 725 726 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 709 727 recvBuffSize, 710 MPI_INT,728 EP_INT, 711 729 clientSrcRank, 712 730 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, … … 714 732 &recvStatus); 715 733 716 MPI_Recv((recvWeightBuff + receivedSize),734 ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 717 735 recvBuffSize, 718 MPI_DOUBLE,736 EP_DOUBLE, 719 737 clientSrcRank, 720 738 MPI_DOMAIN_INTERPOLATION_WEIGHT, … … 730 748 } 731 749 732 std::vector<MPI_Status> requestStatus(sendRequest.size()); 750 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 751 #ifdef _usingMPI 733 752 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 753 #elif _usingEP 754 std::vector<ep_lib::MPI_Status> waitstat(sendRequest.size()); 755 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &waitstat[0]); 756 #endif 734 757 735 758 delete [] sendIndexDestBuff; … … 745 768 746 769 /*! Redefined some functions of CONetCDF4 to make use of them */ 747 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm)770 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm) 748 771 : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 749 772 int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name, … … 835 858 } 836 859 837 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm);838 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);860 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, EP_LONG, EP_SUM, client->intraComm); 861 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, EP_LONG, EP_SUM, client->intraComm); 839 862 840 863 if (0 == globalNbWeight) -
XIOS/trunk/src/transformation/domain_algorithm_interpolate.hpp
r1480 r1638 70 70 { 71 71 public: 72 WriteNetCdf(const StdString& filename, const MPI_Comm comm);72 WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm); 73 73 int addDimensionWrite(const StdString& name, const StdSize size = UNLIMITED_DIM); 74 74 int addVariableWrite(const StdString& name, nc_type type, -
XIOS/trunk/src/transformation/generic_algorithm_transformation.cpp
r1637 r1638 136 136 { 137 137 distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 138 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ;138 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, EP_INT, EP_LOR, client->intraComm) ; 139 139 140 140 } … … 142 142 { 143 143 distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 144 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ;144 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, EP_INT, EP_LOR, client->intraComm) ; 145 145 } 146 146 else //it's a scalar … … 238 238 int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 239 239 int recvValue = 0; 240 MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm);240 ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, EP_INT, EP_SUM, client->intraComm); 241 241 computeGlobalIndexOnProc = (0 < recvValue); 242 242 -
XIOS/trunk/src/transformation/grid_transformation.cpp
r1637 r1638 514 514 sendRankSizeMap[itIndex->first] = sendSize; 515 515 } 516 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);516 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 517 517 518 518 displ[0]=0 ; … … 521 521 int* recvRankBuff=new int[recvSize]; 522 522 int* recvSizeBuff=new int[recvSize]; 523 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);524 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);523 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 524 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 525 525 for (int i = 0; i < nbClient; ++i) 526 526 { … … 534 534 535 535 // Sending global index of grid source to corresponding process as well as the corresponding mask 536 std::vector< MPI_Request> requests;537 std::vector< MPI_Status> status;536 std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 537 std::vector<ep_lib::MPI_Status> status; 538 538 std::unordered_map<int, unsigned char* > recvMaskDst; 539 539 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 540 int requests_position = 0; 540 541 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 541 542 { … … 545 546 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 546 547 547 requests.push_back(MPI_Request()); 548 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 549 requests.push_back(MPI_Request()); 550 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 548 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 549 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 550 551 //requests.push_back(ep_lib::MPI_Request()); 552 //ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 553 //requests.push_back(ep_lib::MPI_Request()); 554 //ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 551 555 } 552 556 … … 583 587 584 588 // Send global index source and mask 585 requests.push_back(MPI_Request()); 586 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 587 requests.push_back(MPI_Request()); 588 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 589 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 590 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 591 //requests.push_back(ep_lib::MPI_Request()); 592 //ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 593 //requests.push_back(ep_lib::MPI_Request()); 594 //ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 589 595 } 590 596 591 597 status.resize(requests.size()); 592 MPI_Waitall(requests.size(), &requests[0], &status[0]);598 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 593 599 594 600 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 595 std::vector<MPI_Request>().swap(requests); 596 std::vector<MPI_Status>().swap(status); 601 requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 602 requests_position = 0; 603 std::vector<ep_lib::MPI_Status>().swap(status); 597 604 // Okie, on destination side, we will wait for information of masked index of source 598 605 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 601 608 int recvSize = itSend->second; 602 609 603 requests.push_back(MPI_Request()); 604 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 610 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 611 //requests.push_back(ep_lib::MPI_Request()); 612 //ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 605 613 } 606 614 … … 638 646 639 647 // Okie, now inform the destination which source index are masked 640 requests.push_back(MPI_Request()); 641 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 648 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 649 //requests.push_back(ep_lib::MPI_Request()); 650 //ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 642 651 } 643 652 status.resize(requests.size()); 644 MPI_Waitall(requests.size(), &requests[0], &status[0]);653 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 645 654 646 655 // Cool, now we can fill in local index of grid destination (counted for masked index)
Note: See TracChangeset
for help on using the changeset viewer.