Changeset 2397 for XIOS3/trunk/src/distribution/grid_remote_connector.cpp
- Timestamp:
- 08/31/22 17:23:56 (22 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS3/trunk/src/distribution/grid_remote_connector.cpp
r2296 r2397 3 3 #include "leader_process.hpp" 4 4 #include "mpi.hpp" 5 #include "element.hpp" 5 6 6 7 … … 188 189 * redondantly the the remote servers 189 190 */ 190 void CGridRemoteConnector::computeConnectorMethods( void)191 void CGridRemoteConnector::computeConnectorMethods(bool reverse) 191 192 { 192 193 vector<shared_ptr<CLocalView>> srcView ; … … 201 202 for(int i=0;i<dstView_.size();i++) dstViewsNonDistributed = dstViewsNonDistributed && !isDstViewDistributed_[i] ; 202 203 203 if (srcViewsNonDistributed) 204 //***************************************************** 205 if (srcViewsNonDistributed && dstViewsNonDistributed) 206 { 207 int commRank, commSize ; 208 MPI_Comm_rank(localComm_,&commRank) ; 209 MPI_Comm_size(localComm_,&commSize) ; 210 211 map<int,bool> ranks ; 212 if (reverse) 213 { 214 int leaderRank=getLeaderRank(remoteSize_, commSize, commRank) ; 215 ranks[leaderRank] = true ; 216 } 217 else 218 { 219 list<int> remoteRanks; 220 list<int> notUsed ; 221 computeLeaderProcess(commRank, commSize, remoteSize_, remoteRanks, notUsed) ; 222 for(int rank : remoteRanks) ranks[rank]=true ; 223 } 224 for(int i=0; i<srcView_.size(); i++) computeSrcDstNonDistributed(i,ranks) ; 225 } 226 227 //***************************************************** 228 else if (srcViewsNonDistributed) 204 229 { 205 230 int commRank, commSize ; … … 208 233 list<int> remoteRanks; 209 234 list<int> notUsed ; 210 map<int,bool> ranks ; 211 computeLeaderProcess(commRank, commSize, remoteSize_, remoteRanks, notUsed) ; 212 for(int rank : remoteRanks) ranks[rank]=true ; 235 map<int,bool> ranks ; 213 236 214 for(int i=0; i<srcView_.size(); i++) 215 { 216 if (isDstViewDistributed_[i]) computeSrcNonDistributed(i) ; 217 else computeSrcDstNonDistributed(i, ranks) ; 218 } 237 if (reverse) 238 { 239 shared_ptr<CLocalElement> voidElement = make_shared<CLocalElement>(commRank, 0, CArray<size_t,1>()) ; 240 shared_ptr<CLocalView> voidView = make_shared<CLocalView>(voidElement, CElementView::FULL, CArray<int,1>()) ; 241 242 for(int i=0;i<srcView_.size();i++) 243 if (isDstViewDistributed_[i]) 244 { 245 if (commRank==0) srcView.push_back(srcView_[i]) ; 246 else srcView.push_back(make_shared<CLocalView>(make_shared<CLocalElement>(commRank, srcView_[i]->getGlobalSize(), CArray<size_t,1>()), 247 CElementView::FULL, CArray<int,1>())) ; // void view 248 dstView.push_back(dstView_[i]) ; 249 indElements.push_back(i) ; 250 } 251 252 computeGenericMethod(srcView, dstView, indElements) ; 253 254 for(int i=0;i<srcView_.size();i++) 255 if (isDstViewDistributed_[i]) 256 { 257 size_t sizeElement ; 258 int nRank ; 259 if (commRank==0) nRank = elements_[i].size() ; 260 MPI_Bcast(&nRank, 1, MPI_INT, 0, localComm_) ; 261 262 auto it=elements_[i].begin() ; 263 for(int j=0;j<nRank;j++) 264 { 265 int rank ; 266 size_t sizeElement ; 267 if (commRank==0) { rank = it->first ; sizeElement=it->second.numElements(); } 268 MPI_Bcast(&rank, 1, MPI_INT, 0, localComm_) ; 269 MPI_Bcast(&sizeElement, 1, MPI_SIZE_T, 0, localComm_) ; 270 if (commRank!=0) elements_[i][rank].resize(sizeElement) ; 271 MPI_Bcast(elements_[i][rank].dataFirst(), sizeElement, MPI_SIZE_T, 0, localComm_) ; 272 if (commRank==0) ++it ; 273 } 274 } 275 276 for(auto& it : elements_[indElements[0]]) 277 { 278 if (it.second.numElements()==0) ranks[it.first] = false ; 279 else ranks[it.first] = true ; 280 } 281 282 for(int i=0;i<srcView_.size();i++) 283 if (!isDstViewDistributed_[i]) computeSrcDstNonDistributed(i, ranks) ; 284 285 } 286 else 287 { 288 computeLeaderProcess(commRank, commSize, remoteSize_, remoteRanks, notUsed) ; 289 for(int rank : remoteRanks) ranks[rank]=true ; 290 291 for(int i=0; i<srcView_.size(); i++) 292 { 293 if (isDstViewDistributed_[i]) computeSrcNonDistributed(i) ; 294 else computeSrcDstNonDistributed(i, ranks) ; 295 } 296 } 297 219 298 } 299 //***************************************************** 220 300 else if (dstViewsNonDistributed) 221 301 { 302 int commRank, commSize ; 303 MPI_Comm_rank(localComm_,&commRank) ; 304 MPI_Comm_size(localComm_,&commSize) ; 305 222 306 map<int,bool> ranks ; 223 for(int i=0;i<remoteSize_;i++) ranks[i]=true ; 307 if (reverse) 308 { 309 int leaderRank=getLeaderRank(remoteSize_, commSize, commRank) ; 310 ranks[leaderRank] = true ; 311 } 312 else for(int i=0;i<remoteSize_;i++) ranks[i]=true ; 313 224 314 for(int i=0; i<srcView_.size(); i++) 225 315 { … … 228 318 } 229 319 } 320 //***************************************************** 230 321 else 231 322 { … … 338 429 } 339 430 } 431 432 /** 433 * \brief Compute the connector for the element \b i when the source view is not distributed. 434 * After the call element_[i] is defined. 435 * \param i Indice of the element composing the source grid. 436 */ 437 438 void CGridRemoteConnector::computeSrcNonDistributedReverse(int i) 439 { 440 auto& element = elements_[i] ; 441 map<int,CArray<size_t,1>> globalIndexView ; 442 dstView_[i]->getGlobalIndexView(globalIndexView) ; 443 444 CClientClientDHTTemplate<int>::Index2InfoTypeMap dataInfo; 445 446 for(auto& it : globalIndexView) 447 { 448 auto& globalIndex=it.second ; 449 for(size_t ind : globalIndex) dataInfo[ind]=it.first ; 450 } 451 452 // First we feed the distributed hash map with key (remote global index) 453 // associated with the value of the remote rank 454 CClientClientDHTTemplate<int> DHT(dataInfo, localComm_) ; 455 // after we feed the DHT with the local global indices of the source view 456 457 int commRank, commSize ; 458 MPI_Comm_rank(localComm_,&commRank) ; 459 MPI_Comm_size(localComm_,&commSize) ; 460 CArray<size_t,1> srcIndex ; 461 // like the source view is not distributed, then only the rank 0 need to feed the DHT 462 if (commRank==0) srcView_[i]->getGlobalIndexView(srcIndex) ; 463 464 // compute the mapping 465 DHT.computeIndexInfoMapping(srcIndex) ; 466 auto& returnInfo = DHT.getInfoIndexMap() ; 467 468 // returnInfo contains now the map for each global indices to send to a list of remote rank 469 // only for the rank=0 because it is the one to feed the DHT 470 // so it need to send the list to each server leader i.e. the local process that handle specifically one or more 471 // servers 472 473 // rankIndGlo : rankIndGlo[rank][indGlo] : list of indice to send the the remote server of rank "rank" 474 vector<vector<size_t>> rankIndGlo(remoteSize_) ; 475 if (commRank==0) 476 for(auto& it1 : returnInfo) 477 for(auto& it2 : it1.second) rankIndGlo[it2].push_back(it1.first) ; 478 479 // bcast the same for each client 480 for(int remoteRank=0 ; remoteRank<remoteSize_ ; remoteRank++) 481 { 482 int remoteDataSize ; 483 if (commRank==0) remoteDataSize = rankIndGlo[remoteRank].size() ; 484 MPI_Bcast(&remoteDataSize, 1, MPI_INT, 0, localComm_) ; 485 486 auto& element = elements_[i][remoteRank] ; 487 element.resize(remoteDataSize) ; 488 if (commRank==0) for(int j=0 ; j<remoteDataSize; j++) element(j)=rankIndGlo[remoteRank][j] ; 489 MPI_Bcast(element.dataFirst(), remoteDataSize, MPI_SIZE_T, 0, localComm_) ; 490 } 491 } 492 493 340 494 341 495 /** … … 442 596 for(auto& it2 : it1.second) indGlo.push_back(it1.first) ; 443 597 444 // now local rank 0 know which indices to se ed to remote rank 0, but all the server598 // now local rank 0 know which indices to send to remote rank 0, but all the server 445 599 // must receive the same information. So only the leader rank will sent this. 446 600 // So local rank 0 must broadcast the information to all leader. … … 642 796 * After we compare hash between local rank and remove redondant data corresponding to the same hash. 643 797 */ 644 void CGridRemoteConnector::computeRedondantRanks( void)798 void CGridRemoteConnector::computeRedondantRanks(bool reverse) 645 799 { 646 800 int commRank ; … … 672 826 } 673 827 } 674 // a hash is now computed for data block I will sent to the server. 675 676 CClientClientDHTTemplate<int>::Index2InfoTypeMap info ; 677 678 map<size_t,int> hashRank ; 679 HashXIOS<int> hashGlobalIndexRank; 680 for(auto& it : hashRanks) 681 { 682 it.second = hashGlobalIndexRank.hashCombine(it.first,it.second) ; 683 info[it.second]=commRank ; 684 hashRank[it.second]=it.first ; 685 } 686 687 // we feed a DHT map with key : hash, value : myrank 688 CClientClientDHTTemplate<int> dataHash(info, localComm_) ; 689 CArray<size_t,1> hashList(hashRank.size()) ; 690 691 int i=0 ; 692 for(auto& it : hashRank) { hashList(i)=it.first ; i++; } 693 694 // now who are the ranks that have the same hash : feed the DHT with my list of hash 695 dataHash.computeIndexInfoMapping(hashList) ; 696 auto& hashRankList = dataHash.getInfoIndexMap() ; 697 698 699 for(auto& it : hashRankList) 700 { 701 size_t hash = it.first ; 702 auto& ranks = it.second ; 828 829 if (reverse) 830 { 831 set<size_t> hashs ; 832 //easy because local 833 for(auto& hashRank : hashRanks) 834 { 835 if (hashs.count(hashRank.second)==0) hashs.insert(hashRank.second) ; 836 else rankToRemove_.insert(hashRank.first) ; 837 } 838 839 } 840 else 841 { 842 // a hash is now computed for data block I will sent to the server. 843 844 CClientClientDHTTemplate<int>::Index2InfoTypeMap info ; 845 846 map<size_t,int> hashRank ; 847 HashXIOS<int> hashGlobalIndexRank; 848 for(auto& it : hashRanks) 849 { 850 it.second = hashGlobalIndexRank.hashCombine(it.first,it.second) ; 851 info[it.second]=commRank ; 852 hashRank[it.second]=it.first ; 853 } 854 855 // we feed a DHT map with key : hash, value : myrank 856 CClientClientDHTTemplate<int> dataHash(info, localComm_) ; 857 CArray<size_t,1> hashList(hashRank.size()) ; 858 859 int i=0 ; 860 for(auto& it : hashRank) { hashList(i)=it.first ; i++; } 861 862 // now who are the ranks that have the same hash : feed the DHT with my list of hash 863 dataHash.computeIndexInfoMapping(hashList) ; 864 auto& hashRankList = dataHash.getInfoIndexMap() ; 865 866 867 for(auto& it : hashRankList) 868 { 869 size_t hash = it.first ; 870 auto& ranks = it.second ; 703 871 704 bool first=true ; 705 // only the process with the lowest rank get in charge of sendinf data to remote server 706 for(int rank : ranks) if (commRank>rank) first=false ; 707 if (!first) rankToRemove_.insert(hashRank[hash]) ; 872 bool first=true ; 873 // only the process with the lowest rank get in charge of sendinf data to remote server 874 for(int rank : ranks) if (commRank>rank) first=false ; 875 if (!first) rankToRemove_.insert(hashRank[hash]) ; 876 } 708 877 } 709 878 }
Note: See TracChangeset
for help on using the changeset viewer.