- Timestamp:
- 01/22/19 16:43:32 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/transformation/domain_algorithm_interpolate.cpp
r1638 r1639 434 434 CContextClient* client=context->client; 435 435 436 ep_lib::MPI_Comm poleComme(EP_COMM_NULL); 437 #ifdef _usingMPI 436 MPI_Comm poleComme(MPI_COMM_NULL); 438 437 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 439 #elif _usingEP 440 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 441 #endif 442 if (EP_COMM_NULL != poleComme) 438 if (MPI_COMM_NULL != poleComme) 443 439 { 444 440 int nbClientPole; 445 ep_lib::MPI_Comm_size(poleComme, &nbClientPole);441 MPI_Comm_size(poleComme, &nbClientPole); 446 442 447 443 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 454 450 std::vector<int> recvCount(nbClientPole,0); 455 451 std::vector<int> displ(nbClientPole,0); 456 ep_lib::MPI_Allgather(&nbWeight,1,EP_INT,&recvCount[0],1,EP_INT,poleComme) ;452 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 457 453 458 454 displ[0]=0; … … 477 473 478 474 // Gather all index and weight for pole 479 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,EP_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],EP_INT,poleComme);480 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,EP_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],EP_DOUBLE,poleComme);475 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 476 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 481 477 482 478 std::map<int,double> recvTemp; … … 635 631 636 632 637 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, EP_INT, EP_SUM, client->intraComm);633 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 638 634 639 635 int* sendIndexDestBuff = new int [sendBuffSize]; … … 641 637 double* sendWeightBuff = new double [sendBuffSize]; 642 638 643 std::vector< ep_lib::MPI_Request> sendRequest;639 std::vector<MPI_Request> sendRequest; 644 640 645 641 int sendOffSet = 0, l = 0; … … 662 658 } 663 659 664 sendRequest.push_back( ep_lib::MPI_Request());665 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet,660 sendRequest.push_back(MPI_Request()); 661 MPI_Isend(sendIndexDestBuff + sendOffSet, 666 662 k, 667 EP_INT,663 MPI_INT, 668 664 itMap->first, 669 665 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 670 666 client->intraComm, 671 667 &sendRequest.back()); 672 sendRequest.push_back( ep_lib::MPI_Request());673 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet,668 sendRequest.push_back(MPI_Request()); 669 MPI_Isend(sendIndexSrcBuff + sendOffSet, 674 670 k, 675 EP_INT,671 MPI_INT, 676 672 itMap->first, 677 673 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 678 674 client->intraComm, 679 675 &sendRequest.back()); 680 sendRequest.push_back( ep_lib::MPI_Request());681 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet,676 sendRequest.push_back(MPI_Request()); 677 MPI_Isend(sendWeightBuff + sendOffSet, 682 678 k, 683 EP_DOUBLE,679 MPI_DOUBLE, 684 680 itMap->first, 685 681 MPI_DOMAIN_INTERPOLATION_WEIGHT, … … 697 693 while (receivedSize < recvBuffSize) 698 694 { 699 ep_lib::MPI_Status recvStatus; 700 #ifdef _usingMPI 695 MPI_Status recvStatus; 701 696 MPI_Recv((recvIndexDestBuff + receivedSize), 702 697 recvBuffSize, 703 EP_INT,698 MPI_INT, 704 699 MPI_ANY_SOURCE, 705 700 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 706 701 client->intraComm, 707 702 &recvStatus); 708 #elif _usingEP 709 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 703 704 int countBuff = 0; 705 MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 706 clientSrcRank = recvStatus.MPI_SOURCE; 707 708 MPI_Recv((recvIndexSrcBuff + receivedSize), 710 709 recvBuffSize, 711 EP_INT, 712 -2, 713 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 714 client->intraComm, 715 &recvStatus); 716 #endif 717 718 int countBuff = 0; 719 ep_lib::MPI_Get_count(&recvStatus, EP_INT, &countBuff); 720 #ifdef _usingMPI 721 clientSrcRank = recvStatus.MPI_SOURCE; 722 #elif _usingEP 723 clientSrcRank = recvStatus.ep_src; 724 #endif 725 726 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 727 recvBuffSize, 728 EP_INT, 710 MPI_INT, 729 711 clientSrcRank, 730 712 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, … … 732 714 &recvStatus); 733 715 734 ep_lib::MPI_Recv((recvWeightBuff + receivedSize),716 MPI_Recv((recvWeightBuff + receivedSize), 735 717 recvBuffSize, 736 EP_DOUBLE,718 MPI_DOUBLE, 737 719 clientSrcRank, 738 720 MPI_DOMAIN_INTERPOLATION_WEIGHT, … … 748 730 } 749 731 750 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 751 #ifdef _usingMPI 732 std::vector<MPI_Status> requestStatus(sendRequest.size()); 752 733 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 753 #elif _usingEP754 std::vector<ep_lib::MPI_Status> waitstat(sendRequest.size());755 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &waitstat[0]);756 #endif757 734 758 735 delete [] sendIndexDestBuff; … … 768 745 769 746 /*! Redefined some functions of CONetCDF4 to make use of them */ 770 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm)747 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm) 771 748 : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 772 749 int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name, … … 858 835 } 859 836 860 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, EP_LONG, EP_SUM, client->intraComm);861 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, EP_LONG, EP_SUM, client->intraComm);837 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 838 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 862 839 863 840 if (0 == globalNbWeight)
Note: See TracChangeset
for help on using the changeset viewer.