Changeset 1639 for XIOS/trunk/src/client_client_dht_template_impl.hpp
- Timestamp:
- 01/22/19 16:43:32 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/client_client_dht_template_impl.hpp
r1638 r1639 14 14 { 15 15 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm)16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm) 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);19 MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 34 34 template<typename T, typename H> 35 35 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const ep_lib::MPI_Comm& clientIntraComm)36 const MPI_Comm& clientIntraComm) 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);39 MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 59 59 template<typename T, typename H> 60 60 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const ep_lib::MPI_Comm& clientIntraComm)61 const MPI_Comm& clientIntraComm) 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);64 MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 95 95 template<typename T, typename H> 96 96 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const ep_lib::MPI_Comm& commLevel,97 const MPI_Comm& commLevel, 98 98 int level) 99 99 { 100 100 int clientRank; 101 ep_lib::MPI_Comm_rank(commLevel,&clientRank);101 MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 169 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 170 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 173 { 174 if (0 != recvNbIndexClientCount[idx]) 175 request_size ++; 176 } 177 178 request_size += client2ClientIndex.size(); 179 180 std::vector<ep_lib::MPI_Request> request(request_size); 171 std::vector<MPI_Request> request; 181 172 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 182 173 iteRecvIndex = recvRankClient.end(), … … 185 176 int currentIndex = 0; 186 177 int nbRecvClient = recvRankClient.size(); 187 int request_position = 0;188 178 for (int idx = 0; idx < nbRecvClient; ++idx) 189 179 { 190 180 if (0 != recvNbIndexClientCount[idx]) 191 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]);181 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 192 182 currentIndex += recvNbIndexClientCount[idx]; 193 183 } … … 196 186 iteIndex = client2ClientIndex.end(); 197 187 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 198 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]);199 200 std::vector< ep_lib::MPI_Status> status(request.size());201 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]);188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 189 190 std::vector<MPI_Status> status(request.size()); 191 MPI_Waitall(request.size(), &request[0], &status[0]); 202 192 203 193 CArray<size_t,1>* tmpGlobalIndex; … … 252 242 } 253 243 254 int requestOnReturn_size=0; 255 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 256 { 257 if (0 != recvNbIndexOnReturn[idx]) 258 { 259 requestOnReturn_size += 2; 260 } 261 } 262 263 for (int idx = 0; idx < nbRecvClient; ++idx) 264 { 265 if (0 != sendNbIndexOnReturn[idx]) 266 { 267 requestOnReturn_size += 2; 268 } 269 } 270 271 int requestOnReturn_position=0; 272 273 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 244 std::vector<MPI_Request> requestOnReturn; 274 245 currentIndex = 0; 275 246 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 277 248 if (0 != recvNbIndexOnReturn[idx]) 278 249 { 279 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]);250 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 280 251 recvInfoFromClients(recvRankOnReturn[idx], 281 252 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 282 253 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 283 commLevel, &requestOnReturn[requestOnReturn_position++]);254 commLevel, requestOnReturn); 284 255 } 285 256 currentIndex += recvNbIndexOnReturn[idx]; … … 315 286 316 287 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 317 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]);288 sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 318 289 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 319 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]);290 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 320 291 } 321 292 currentIndex += recvNbIndexClientCount[idx]; 322 293 } 323 294 324 std::vector< ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size());325 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);295 std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 296 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 326 297 327 298 Index2VectorInfoTypeMap indexToInfoMapping; … … 389 360 template<typename T, typename H> 390 361 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 391 const ep_lib::MPI_Comm& commLevel,362 const MPI_Comm& commLevel, 392 363 int level) 393 364 { 394 365 int clientRank; 395 ep_lib::MPI_Comm_rank(commLevel,&clientRank);366 MPI_Comm_rank(commLevel,&clientRank); 396 367 computeSendRecvRank(level, clientRank); 397 368 … … 468 439 // it will send a message to the correct clients. 469 440 // Contents of the message are index and its corresponding informatioin 470 int request_size = 0; 471 for (int idx = 0; idx < recvRankClient.size(); ++idx) 472 { 473 if (0 != recvNbIndexClientCount[idx]) 474 { 475 request_size += 2; 476 } 477 } 478 479 request_size += client2ClientIndex.size(); 480 request_size += client2ClientInfo.size(); 481 482 std::vector<ep_lib::MPI_Request> request(request_size); 483 441 std::vector<MPI_Request> request; 484 442 int currentIndex = 0; 485 443 int nbRecvClient = recvRankClient.size(); 486 int request_position=0;487 444 for (int idx = 0; idx < nbRecvClient; ++idx) 488 445 { 489 446 if (0 != recvNbIndexClientCount[idx]) 490 447 { 491 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 492 //if(clientRank==0) printf("recv index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 448 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 493 449 recvInfoFromClients(recvRankClient[idx], 494 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 495 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 496 commLevel, &request[request_position++]); 497 //if(clientRank==0) printf("recv info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 450 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 451 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 452 commLevel, request); 498 453 } 499 454 currentIndex += recvNbIndexClientCount[idx]; … … 503 458 iteIndex = client2ClientIndex.end(); 504 459 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 505 { sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 506 } //if(clientRank==0) printf("send index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 460 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 507 461 std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 508 462 iteInfo = client2ClientInfo.end(); 509 463 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 510 { sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 511 }// if(clientRank==0) printf("send info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 512 513 std::vector<ep_lib::MPI_Status> status(request.size()); 514 515 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 464 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 465 466 std::vector<MPI_Status> status(request.size()); 467 MPI_Waitall(request.size(), &request[0], &status[0]); 516 468 517 469 Index2VectorInfoTypeMap indexToInfoMapping; … … 566 518 template<typename T, typename H> 567 519 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 568 const ep_lib::MPI_Comm& clientIntraComm,569 std::vector< ep_lib::MPI_Request>& requestSendIndex)570 { 571 ep_lib::MPI_Request request;520 const MPI_Comm& clientIntraComm, 521 std::vector<MPI_Request>& requestSendIndex) 522 { 523 MPI_Request request; 572 524 requestSendIndex.push_back(request); 573 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG,525 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 574 526 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 575 }576 577 /*!578 Send message containing index to clients579 \param [in] clientDestRank rank of destination client580 \param [in] indices index to send581 \param [in] indiceSize size of index array to send582 \param [in] clientIntraComm communication group of client583 \param [in] requestSendIndex sending request584 */585 template<typename T, typename H>586 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize,587 const ep_lib::MPI_Comm& clientIntraComm,588 ep_lib::MPI_Request* requestSendIndex)589 {590 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG,591 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex);592 527 } 593 528 … … 601 536 template<typename T, typename H> 602 537 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 603 const ep_lib::MPI_Comm& clientIntraComm,604 std::vector< ep_lib::MPI_Request>& requestRecvIndex)605 { 606 ep_lib::MPI_Request request;538 const MPI_Comm& clientIntraComm, 539 std::vector<MPI_Request>& requestRecvIndex) 540 { 541 MPI_Request request; 607 542 requestRecvIndex.push_back(request); 608 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG,543 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 609 544 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 610 }611 612 /*!613 Receive message containing index to clients614 \param [in] clientDestRank rank of destination client615 \param [in] indices index to send616 \param [in] clientIntraComm communication group of client617 \param [in] requestRecvIndex receiving request618 */619 template<typename T, typename H>620 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize,621 const ep_lib::MPI_Comm& clientIntraComm,622 ep_lib::MPI_Request *requestRecvIndex)623 {624 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG,625 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex);626 545 } 627 546 … … 636 555 template<typename T, typename H> 637 556 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 638 const ep_lib::MPI_Comm& clientIntraComm,639 std::vector< ep_lib::MPI_Request>& requestSendInfo)640 { 641 ep_lib::MPI_Request request;557 const MPI_Comm& clientIntraComm, 558 std::vector<MPI_Request>& requestSendInfo) 559 { 560 MPI_Request request; 642 561 requestSendInfo.push_back(request); 643 562 644 ep_lib::MPI_Isend(info, infoSize, EP_CHAR,563 MPI_Isend(info, infoSize, MPI_CHAR, 645 564 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 646 }647 648 /*!649 Send message containing information to clients650 \param [in] clientDestRank rank of destination client651 \param [in] info info array to send652 \param [in] infoSize info array size to send653 \param [in] clientIntraComm communication group of client654 \param [in] requestSendInfo sending request655 */656 template<typename T, typename H>657 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize,658 const ep_lib::MPI_Comm& clientIntraComm,659 ep_lib::MPI_Request *requestSendInfo)660 {661 ep_lib::MPI_Isend(info, infoSize, EP_CHAR,662 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo);663 565 } 664 566 … … 673 575 template<typename T, typename H> 674 576 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 675 const ep_lib::MPI_Comm& clientIntraComm,676 std::vector< ep_lib::MPI_Request>& requestRecvInfo)677 { 678 ep_lib::MPI_Request request;577 const MPI_Comm& clientIntraComm, 578 std::vector<MPI_Request>& requestRecvInfo) 579 { 580 MPI_Request request; 679 581 requestRecvInfo.push_back(request); 680 582 681 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR,583 MPI_Irecv(info, infoSize, MPI_CHAR, 682 584 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 683 }684 685 /*!686 Receive message containing information from other clients687 \param [in] clientDestRank rank of destination client688 \param [in] info info array to receive689 \param [in] infoSize info array size to receive690 \param [in] clientIntraComm communication group of client691 \param [in] requestRecvInfo list of receiving request692 */693 template<typename T, typename H>694 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize,695 const ep_lib::MPI_Comm& clientIntraComm,696 ep_lib::MPI_Request* requestRecvInfo)697 {698 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR,699 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo);700 585 } 701 586 … … 766 651 { 767 652 recvNbElements.resize(recvNbRank.size()); 768 std::vector< ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size());769 std::vector< ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());653 std::vector<MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 654 std::vector<MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 770 655 771 656 int nRequest = 0; 772 657 for (int idx = 0; idx < recvNbRank.size(); ++idx) 773 658 { 774 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, EP_INT,659 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 775 660 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 776 661 ++nRequest; … … 779 664 for (int idx = 0; idx < sendNbRank.size(); ++idx) 780 665 { 781 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, EP_INT,666 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 782 667 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 783 668 ++nRequest; 784 669 } 785 670 786 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);671 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 787 672 } 788 673 … … 811 696 std::vector<int> recvBuff(recvBuffSize*2,0); 812 697 813 std::vector< ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize);814 std::vector< ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize);698 std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 699 std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 815 700 816 701 int nRequest = 0; 817 702 for (int idx = 0; idx < recvBuffSize; ++idx) 818 703 { 819 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, EP_INT,704 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 820 705 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 821 706 ++nRequest; … … 831 716 for (int idx = 0; idx < sendBuffSize; ++idx) 832 717 { 833 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, EP_INT,718 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 834 719 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 835 720 ++nRequest; 836 721 } 837 722 838 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]);723 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 839 724 int nbRecvRank = 0, nbRecvElements = 0; 840 725 recvNbRank.clear();
Note: See TracChangeset
for help on using the changeset viewer.