[591] | 1 | #include "xios_spl.hpp" |
---|
[300] | 2 | #include "context_client.hpp" |
---|
| 3 | #include "context_server.hpp" |
---|
| 4 | #include "event_client.hpp" |
---|
| 5 | #include "buffer_out.hpp" |
---|
| 6 | #include "buffer_client.hpp" |
---|
| 7 | #include "type.hpp" |
---|
| 8 | #include "event_client.hpp" |
---|
| 9 | #include "context.hpp" |
---|
[382] | 10 | #include "mpi.hpp" |
---|
[347] | 11 | #include "timer.hpp" |
---|
[401] | 12 | #include "cxios.hpp" |
---|
[1130] | 13 | #include "server.hpp" |
---|
[2130] | 14 | #include "services.hpp" |
---|
| 15 | #include <boost/functional/hash.hpp> |
---|
| 16 | #include <random> |
---|
| 17 | #include <chrono> |
---|
[300] | 18 | |
---|
[335] | 19 | namespace xios |
---|
[300] | 20 | { |
---|
[512] | 21 | /*! |
---|
| 22 | \param [in] parent Pointer to context on client side |
---|
| 23 | \param [in] intraComm_ communicator of group client |
---|
| 24 | \param [in] interComm_ communicator of group server |
---|
[983] | 25 | \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). |
---|
[512] | 26 | */ |
---|
[1639] | 27 | CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) |
---|
[1853] | 28 | : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4), associatedServer_(nullptr) |
---|
[300] | 29 | { |
---|
[1757] | 30 | |
---|
[2130] | 31 | context_ = parent; |
---|
[595] | 32 | intraComm = intraComm_; |
---|
| 33 | interComm = interComm_; |
---|
[1639] | 34 | MPI_Comm_rank(intraComm, &clientRank); |
---|
| 35 | MPI_Comm_size(intraComm, &clientSize); |
---|
[509] | 36 | |
---|
[595] | 37 | int flag; |
---|
[1639] | 38 | MPI_Comm_test_inter(interComm, &flag); |
---|
[1761] | 39 | if (flag) isAttached_=false ; |
---|
| 40 | else isAttached_=true ; |
---|
| 41 | |
---|
| 42 | pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) |
---|
| 43 | if (isAttachedModeEnabled()) pureOneSided=false ; // no one sided in attach mode |
---|
| 44 | |
---|
| 45 | |
---|
| 46 | |
---|
[1639] | 47 | if (flag) MPI_Comm_remote_size(interComm, &serverSize); |
---|
| 48 | else MPI_Comm_size(interComm, &serverSize); |
---|
[509] | 49 | |
---|
[1232] | 50 | computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); |
---|
| 51 | |
---|
[2259] | 52 | if (flag) MPI_Intercomm_merge(interComm_,false, &interCommMerged_) ; |
---|
[1757] | 53 | |
---|
[2259] | 54 | MPI_Comm_split(intraComm_,clientRank,clientRank, &commSelf_) ; // for windows |
---|
[2246] | 55 | |
---|
[2130] | 56 | auto time=chrono::system_clock::now().time_since_epoch().count() ; |
---|
| 57 | std::default_random_engine rd(time); // not reproducible from a run to another |
---|
| 58 | std::uniform_int_distribution<size_t> dist; |
---|
| 59 | hashId_=dist(rd) ; |
---|
| 60 | MPI_Bcast(&hashId_,1,MPI_SIZE_T,0,intraComm) ; // Bcast to all server of the context |
---|
| 61 | |
---|
[1757] | 62 | timeLine = 1; |
---|
[1232] | 63 | } |
---|
| 64 | |
---|
| 65 | void CContextClient::computeLeader(int clientRank, int clientSize, int serverSize, |
---|
| 66 | std::list<int>& rankRecvLeader, |
---|
| 67 | std::list<int>& rankRecvNotLeader) |
---|
| 68 | { |
---|
| 69 | if ((0 == clientSize) || (0 == serverSize)) return; |
---|
| 70 | |
---|
[595] | 71 | if (clientSize < serverSize) |
---|
| 72 | { |
---|
| 73 | int serverByClient = serverSize / clientSize; |
---|
| 74 | int remain = serverSize % clientSize; |
---|
| 75 | int rankStart = serverByClient * clientRank; |
---|
[300] | 76 | |
---|
[595] | 77 | if (clientRank < remain) |
---|
| 78 | { |
---|
| 79 | serverByClient++; |
---|
| 80 | rankStart += clientRank; |
---|
| 81 | } |
---|
| 82 | else |
---|
| 83 | rankStart += remain; |
---|
| 84 | |
---|
| 85 | for (int i = 0; i < serverByClient; i++) |
---|
[1232] | 86 | rankRecvLeader.push_back(rankStart + i); |
---|
[1021] | 87 | |
---|
[1232] | 88 | rankRecvNotLeader.resize(0); |
---|
[1158] | 89 | } |
---|
[595] | 90 | else |
---|
| 91 | { |
---|
| 92 | int clientByServer = clientSize / serverSize; |
---|
| 93 | int remain = clientSize % serverSize; |
---|
| 94 | |
---|
| 95 | if (clientRank < (clientByServer + 1) * remain) |
---|
| 96 | { |
---|
| 97 | if (clientRank % (clientByServer + 1) == 0) |
---|
[1232] | 98 | rankRecvLeader.push_back(clientRank / (clientByServer + 1)); |
---|
[1021] | 99 | else |
---|
[1232] | 100 | rankRecvNotLeader.push_back(clientRank / (clientByServer + 1)); |
---|
[595] | 101 | } |
---|
| 102 | else |
---|
| 103 | { |
---|
| 104 | int rank = clientRank - (clientByServer + 1) * remain; |
---|
| 105 | if (rank % clientByServer == 0) |
---|
[1232] | 106 | rankRecvLeader.push_back(remain + rank / clientByServer); |
---|
[1021] | 107 | else |
---|
[1232] | 108 | rankRecvNotLeader.push_back(remain + rank / clientByServer); |
---|
[595] | 109 | } |
---|
| 110 | } |
---|
[300] | 111 | } |
---|
| 112 | |
---|
[512] | 113 | /*! |
---|
| 114 | In case of attached mode, the current context must be reset to context for client |
---|
| 115 | \param [in] event Event sent to server |
---|
| 116 | */ |
---|
[300] | 117 | void CContextClient::sendEvent(CEventClient& event) |
---|
| 118 | { |
---|
[731] | 119 | list<int> ranks = event.getRanks(); |
---|
[2260] | 120 | |
---|
| 121 | // ostringstream str ; |
---|
| 122 | // for(auto& rank : ranks) str<<rank<<" ; " ; |
---|
| 123 | // info(100)<<"Event "<<timeLine<<" of context "<<context_->getId()<<" for ranks : "<<str.str()<<endl ; |
---|
| 124 | |
---|
[1377] | 125 | if (CXios::checkEventSync) |
---|
| 126 | { |
---|
[2189] | 127 | int typeId, classId, typeId_in, classId_in; |
---|
| 128 | long long timeLine_out; |
---|
| 129 | long long timeLine_in( timeLine ); |
---|
[1377] | 130 | typeId_in=event.getTypeId() ; |
---|
| 131 | classId_in=event.getClassId() ; |
---|
[1475] | 132 | // MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 |
---|
[2189] | 133 | MPI_Allreduce(&timeLine_in,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ; |
---|
[1639] | 134 | MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
| 135 | MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
[1377] | 136 | if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) |
---|
| 137 | { |
---|
| 138 | ERROR("void CContextClient::sendEvent(CEventClient& event)", |
---|
[2229] | 139 | << "Event are not coherent between client for timeline = "<<timeLine); |
---|
[1377] | 140 | } |
---|
[2229] | 141 | |
---|
| 142 | vector<int> servers(serverSize,0) ; |
---|
| 143 | auto ranks=event.getRanks() ; |
---|
| 144 | for(auto& rank : ranks) servers[rank]=1 ; |
---|
| 145 | MPI_Allreduce(MPI_IN_PLACE, servers.data(), serverSize,MPI_INT,MPI_SUM,intraComm) ; |
---|
| 146 | ostringstream osstr ; |
---|
| 147 | for(int i=0;i<serverSize;i++) if (servers[i]==0) osstr<<i<<" , " ; |
---|
| 148 | if (!osstr.str().empty()) |
---|
| 149 | { |
---|
| 150 | ERROR("void CContextClient::sendEvent(CEventClient& event)", |
---|
| 151 | <<" Some servers will not receive the message for timeline = "<<timeLine<<endl |
---|
| 152 | <<"Servers are : "<<osstr.str()) ; |
---|
| 153 | } |
---|
| 154 | |
---|
| 155 | |
---|
[1377] | 156 | } |
---|
| 157 | |
---|
[595] | 158 | if (!event.isEmpty()) |
---|
[300] | 159 | { |
---|
[731] | 160 | list<int> sizes = event.getSizes(); |
---|
[300] | 161 | |
---|
[1757] | 162 | // We force the getBuffers call to be non-blocking on classical servers |
---|
[1054] | 163 | list<CBufferOut*> buffList; |
---|
[1757] | 164 | getBuffers(timeLine, ranks, sizes, buffList) ; |
---|
[509] | 165 | |
---|
[1757] | 166 | event.send(timeLine, sizes, buffList); |
---|
| 167 | |
---|
| 168 | //for (auto itRank = ranks.begin(); itRank != ranks.end(); itRank++) buffers[*itRank]->infoBuffer() ; |
---|
[731] | 169 | |
---|
[1757] | 170 | unlockBuffers(ranks) ; |
---|
| 171 | checkBuffers(ranks); |
---|
[2260] | 172 | |
---|
[300] | 173 | } |
---|
[1761] | 174 | |
---|
| 175 | if (isAttachedModeEnabled()) // couldBuffer is always true in attached mode |
---|
| 176 | { |
---|
[2324] | 177 | while (checkBuffers(ranks)) callGlobalEventLoop() ; |
---|
[2130] | 178 | |
---|
| 179 | CXios::getDaemonsManager()->scheduleContext(hashId_) ; |
---|
[2324] | 180 | while (CXios::getDaemonsManager()->isScheduledContext(hashId_)) callGlobalEventLoop() ; |
---|
[1761] | 181 | } |
---|
| 182 | |
---|
[1054] | 183 | timeLine++; |
---|
| 184 | } |
---|
| 185 | |
---|
| 186 | /*! |
---|
[512] | 187 | If client is also server (attached mode), after sending event, it should process right away |
---|
| 188 | the incoming event. |
---|
| 189 | \param [in] ranks list rank of server connected this client |
---|
| 190 | */ |
---|
[300] | 191 | void CContextClient::waitEvent(list<int>& ranks) |
---|
| 192 | { |
---|
[1761] | 193 | while (checkBuffers(ranks)) |
---|
| 194 | { |
---|
[2130] | 195 | context_->eventLoop() ; |
---|
[1761] | 196 | } |
---|
| 197 | |
---|
| 198 | MPI_Request req ; |
---|
| 199 | MPI_Status status ; |
---|
| 200 | |
---|
| 201 | MPI_Ibarrier(intraComm,&req) ; |
---|
| 202 | int flag=false ; |
---|
| 203 | |
---|
| 204 | do |
---|
| 205 | { |
---|
| 206 | CXios::getDaemonsManager()->eventLoop() ; |
---|
| 207 | MPI_Test(&req,&flag,&status) ; |
---|
| 208 | } while (!flag) ; |
---|
| 209 | |
---|
| 210 | |
---|
| 211 | } |
---|
| 212 | |
---|
| 213 | |
---|
| 214 | void CContextClient::waitEvent_old(list<int>& ranks) |
---|
| 215 | { |
---|
[595] | 216 | parentServer->server->setPendingEvent(); |
---|
| 217 | while (checkBuffers(ranks)) |
---|
[300] | 218 | { |
---|
[595] | 219 | parentServer->server->listen(); |
---|
| 220 | parentServer->server->checkPendingRequest(); |
---|
[300] | 221 | } |
---|
[386] | 222 | |
---|
[595] | 223 | while (parentServer->server->hasPendingEvent()) |
---|
[386] | 224 | { |
---|
[595] | 225 | parentServer->server->eventLoop(); |
---|
[386] | 226 | } |
---|
[300] | 227 | } |
---|
| 228 | |
---|
[512] | 229 | /*! |
---|
[1054] | 230 | * Get buffers for each connection to the servers. This function blocks until there is enough room in the buffers unless |
---|
| 231 | * it is explicitly requested to be non-blocking. |
---|
| 232 | * |
---|
[1757] | 233 | * |
---|
| 234 | * \param [in] timeLine time line of the event which will be sent to servers |
---|
[1054] | 235 | * \param [in] serverList list of rank of connected server |
---|
| 236 | * \param [in] sizeList size of message corresponding to each connection |
---|
| 237 | * \param [out] retBuffers list of buffers that can be used to store an event |
---|
| 238 | * \param [in] nonBlocking whether this function should be non-blocking |
---|
| 239 | * \return whether the already allocated buffers could be used |
---|
[512] | 240 | */ |
---|
[1757] | 241 | bool CContextClient::getBuffers(const size_t timeLine, const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, |
---|
[1071] | 242 | bool nonBlocking /*= false*/) |
---|
[300] | 243 | { |
---|
[1054] | 244 | list<int>::const_iterator itServer, itSize; |
---|
[595] | 245 | list<CClientBuffer*> bufferList; |
---|
[1054] | 246 | map<int,CClientBuffer*>::const_iterator it; |
---|
[595] | 247 | list<CClientBuffer*>::iterator itBuffer; |
---|
[884] | 248 | bool areBuffersFree; |
---|
[300] | 249 | |
---|
[595] | 250 | for (itServer = serverList.begin(); itServer != serverList.end(); itServer++) |
---|
[300] | 251 | { |
---|
[595] | 252 | it = buffers.find(*itServer); |
---|
| 253 | if (it == buffers.end()) |
---|
[300] | 254 | { |
---|
[595] | 255 | newBuffer(*itServer); |
---|
| 256 | it = buffers.find(*itServer); |
---|
[509] | 257 | } |
---|
[595] | 258 | bufferList.push_back(it->second); |
---|
[300] | 259 | } |
---|
[347] | 260 | |
---|
[2246] | 261 | double lastTimeBuffersNotFree=0. ; |
---|
| 262 | double time ; |
---|
| 263 | bool doUnlockBuffers ; |
---|
[347] | 264 | CTimer::get("Blocking time").resume(); |
---|
[884] | 265 | do |
---|
[300] | 266 | { |
---|
[884] | 267 | areBuffersFree = true; |
---|
[2246] | 268 | doUnlockBuffers=false ; |
---|
| 269 | time=MPI_Wtime() ; |
---|
| 270 | if (time-lastTimeBuffersNotFree > latency_) |
---|
[1757] | 271 | { |
---|
[2246] | 272 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
| 273 | { |
---|
| 274 | areBuffersFree &= (*itBuffer)->isBufferFree(*itSize); |
---|
| 275 | } |
---|
| 276 | if (!areBuffersFree) |
---|
| 277 | { |
---|
| 278 | lastTimeBuffersNotFree = time ; |
---|
| 279 | doUnlockBuffers=true ; |
---|
| 280 | } |
---|
[1757] | 281 | } |
---|
[2246] | 282 | else areBuffersFree = false ; |
---|
[884] | 283 | |
---|
| 284 | if (!areBuffersFree) |
---|
[300] | 285 | { |
---|
[2246] | 286 | if (doUnlockBuffers) for (itBuffer = bufferList.begin(); itBuffer != bufferList.end(); itBuffer++) (*itBuffer)->unlockBuffer(); |
---|
[884] | 287 | checkBuffers(); |
---|
[1761] | 288 | |
---|
[2324] | 289 | callGlobalEventLoop() ; |
---|
[2130] | 290 | } |
---|
[1130] | 291 | |
---|
[1054] | 292 | } while (!areBuffersFree && !nonBlocking); |
---|
[347] | 293 | CTimer::get("Blocking time").suspend(); |
---|
| 294 | |
---|
[1054] | 295 | if (areBuffersFree) |
---|
[300] | 296 | { |
---|
[1054] | 297 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
[1757] | 298 | retBuffers.push_back((*itBuffer)->getBuffer(timeLine, *itSize)); |
---|
[300] | 299 | } |
---|
[1054] | 300 | return areBuffersFree; |
---|
[300] | 301 | } |
---|
[509] | 302 | |
---|
[2324] | 303 | void CContextClient::eventLoop(void) |
---|
| 304 | { |
---|
| 305 | if (!locked_) checkBuffers() ; |
---|
| 306 | } |
---|
| 307 | |
---|
| 308 | void CContextClient::callGlobalEventLoop(void) |
---|
| 309 | { |
---|
| 310 | locked_=true ; |
---|
| 311 | context_->globalEventLoop() ; |
---|
| 312 | locked_=false ; |
---|
| 313 | } |
---|
[512] | 314 | /*! |
---|
| 315 | Make a new buffer for a certain connection to server with specific rank |
---|
| 316 | \param [in] rank rank of connected server |
---|
| 317 | */ |
---|
[300] | 318 | void CContextClient::newBuffer(int rank) |
---|
| 319 | { |
---|
[1201] | 320 | if (!mapBufferSize_.count(rank)) |
---|
| 321 | { |
---|
| 322 | error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; |
---|
| 323 | mapBufferSize_[rank] = CXios::minBufferSize; |
---|
| 324 | maxEventSizes[rank] = CXios::minBufferSize; |
---|
| 325 | } |
---|
[1757] | 326 | |
---|
[2259] | 327 | CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxEventSizes[rank]); |
---|
[2130] | 328 | if (isGrowableBuffer_) buffer->setGrowableBuffer(1.2) ; |
---|
| 329 | else buffer->fixBuffer() ; |
---|
[1201] | 330 | // Notify the server |
---|
[2130] | 331 | CBufferOut* bufOut = buffer->getBuffer(0, 4*sizeof(MPI_Aint)); |
---|
| 332 | MPI_Aint sendBuff[4] ; |
---|
| 333 | sendBuff[0]=hashId_; |
---|
| 334 | sendBuff[1]=mapBufferSize_[rank]; |
---|
| 335 | sendBuff[2]=buffers[rank]->getWinAddress(0); |
---|
| 336 | sendBuff[3]=buffers[rank]->getWinAddress(1); |
---|
[1757] | 337 | info(100)<<"CContextClient::newBuffer : rank "<<rank<<" winAdress[0] "<<buffers[rank]->getWinAddress(0)<<" winAdress[1] "<<buffers[rank]->getWinAddress(1)<<endl; |
---|
[2130] | 338 | bufOut->put(sendBuff, 4); |
---|
[1757] | 339 | buffer->checkBuffer(true); |
---|
[2259] | 340 | |
---|
| 341 | // create windows dynamically for one-sided |
---|
| 342 | if (!isAttachedModeEnabled()) |
---|
| 343 | { |
---|
| 344 | CTimer::get("create Windows").resume() ; |
---|
| 345 | MPI_Comm interComm ; |
---|
| 346 | MPI_Intercomm_create(commSelf_, 0, interCommMerged_, clientSize+rank, 0, &interComm) ; |
---|
| 347 | MPI_Intercomm_merge(interComm, false, &winComm_[rank]) ; |
---|
[2310] | 348 | CXios::getMpiGarbageCollector().registerCommunicator(winComm_[rank]) ; |
---|
[2259] | 349 | MPI_Comm_free(&interComm) ; |
---|
| 350 | windows_[rank].resize(2) ; |
---|
[2310] | 351 | |
---|
[2259] | 352 | MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][0]); |
---|
[2310] | 353 | CXios::getMpiGarbageCollector().registerWindow(windows_[rank][0]) ; |
---|
| 354 | |
---|
[2259] | 355 | MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][1]); |
---|
[2310] | 356 | CXios::getMpiGarbageCollector().registerWindow(windows_[rank][1]) ; |
---|
| 357 | |
---|
[2259] | 358 | CTimer::get("create Windows").suspend() ; |
---|
| 359 | } |
---|
| 360 | else |
---|
| 361 | { |
---|
| 362 | winComm_[rank] = MPI_COMM_NULL ; |
---|
| 363 | windows_[rank].resize(2) ; |
---|
| 364 | windows_[rank][0] = MPI_WIN_NULL ; |
---|
| 365 | windows_[rank][1] = MPI_WIN_NULL ; |
---|
| 366 | } |
---|
| 367 | buffer->attachWindows(windows_[rank]) ; |
---|
[2260] | 368 | if (!isAttachedModeEnabled()) MPI_Barrier(winComm_[rank]) ; |
---|
| 369 | |
---|
[509] | 370 | } |
---|
[300] | 371 | |
---|
[512] | 372 | /*! |
---|
| 373 | Verify state of buffers. Buffer is under pending state if there is no message on it |
---|
| 374 | \return state of buffers, pending(true), ready(false) |
---|
| 375 | */ |
---|
[300] | 376 | bool CContextClient::checkBuffers(void) |
---|
| 377 | { |
---|
[595] | 378 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 379 | bool pending = false; |
---|
[1130] | 380 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
[1757] | 381 | pending |= itBuff->second->checkBuffer(!pureOneSided); |
---|
[595] | 382 | return pending; |
---|
[509] | 383 | } |
---|
[300] | 384 | |
---|
[512] | 385 | //! Release all buffers |
---|
[1071] | 386 | void CContextClient::releaseBuffers() |
---|
[300] | 387 | { |
---|
[2276] | 388 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 389 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 390 | { |
---|
| 391 | delete itBuff->second; |
---|
| 392 | } |
---|
| 393 | buffers.clear(); |
---|
[1757] | 394 | |
---|
[2240] | 395 | // don't know when release windows |
---|
[1757] | 396 | |
---|
[2287] | 397 | //if (!isAttachedModeEnabled()) |
---|
| 398 | //{ |
---|
| 399 | // for(auto& it : winComm_) |
---|
| 400 | // { |
---|
| 401 | // int rank = it.first ; |
---|
| 402 | // MPI_Win_free(&windows_[rank][0]); |
---|
| 403 | // MPI_Win_free(&windows_[rank][1]); |
---|
| 404 | // MPI_Comm_free(&winComm_[rank]) ; |
---|
| 405 | // } |
---|
| 406 | //} |
---|
[509] | 407 | } |
---|
[1761] | 408 | |
---|
[1757] | 409 | |
---|
| 410 | /*! |
---|
| 411 | Lock the buffers for one sided communications |
---|
| 412 | \param [in] ranks list rank of server to which client connects to |
---|
| 413 | */ |
---|
| 414 | void CContextClient::lockBuffers(list<int>& ranks) |
---|
| 415 | { |
---|
| 416 | list<int>::iterator it; |
---|
| 417 | for (it = ranks.begin(); it != ranks.end(); it++) buffers[*it]->lockBuffer(); |
---|
| 418 | } |
---|
[300] | 419 | |
---|
[1757] | 420 | /*! |
---|
| 421 | Unlock the buffers for one sided communications |
---|
| 422 | \param [in] ranks list rank of server to which client connects to |
---|
| 423 | */ |
---|
| 424 | void CContextClient::unlockBuffers(list<int>& ranks) |
---|
| 425 | { |
---|
| 426 | list<int>::iterator it; |
---|
| 427 | for (it = ranks.begin(); it != ranks.end(); it++) buffers[*it]->unlockBuffer(); |
---|
| 428 | } |
---|
| 429 | |
---|
[512] | 430 | /*! |
---|
| 431 | Verify state of buffers corresponding to a connection |
---|
| 432 | \param [in] ranks list rank of server to which client connects to |
---|
| 433 | \return state of buffers, pending(true), ready(false) |
---|
| 434 | */ |
---|
[300] | 435 | bool CContextClient::checkBuffers(list<int>& ranks) |
---|
| 436 | { |
---|
[595] | 437 | list<int>::iterator it; |
---|
| 438 | bool pending = false; |
---|
[1757] | 439 | for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(!pureOneSided); |
---|
[595] | 440 | return pending; |
---|
[509] | 441 | } |
---|
[300] | 442 | |
---|
[512] | 443 | /*! |
---|
[917] | 444 | * Set the buffer size for each connection. Warning: This function is collective. |
---|
| 445 | * |
---|
| 446 | * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer |
---|
| 447 | * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event |
---|
[512] | 448 | */ |
---|
[2130] | 449 | void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize) |
---|
[509] | 450 | { |
---|
[2326] | 451 | setFixedBuffer() ; |
---|
| 452 | for(auto& it : mapSize) |
---|
| 453 | { |
---|
| 454 | size_t size=std::max(CXios::minBufferSize*1.0,std::min(it.second*CXios::bufferSizeFactor*1.01,CXios::maxBufferSize*1.0)) ; |
---|
| 455 | mapBufferSize_[it.first]=size ; |
---|
| 456 | if (buffers.count(it.first)>0) buffers[it.first]->fixBufferSize(size); |
---|
| 457 | } |
---|
[509] | 458 | } |
---|
| 459 | |
---|
[1158] | 460 | /*! |
---|
| 461 | Get leading server in the group of connected server |
---|
| 462 | \return ranks of leading servers |
---|
| 463 | */ |
---|
| 464 | const std::list<int>& CContextClient::getRanksServerNotLeader(void) const |
---|
| 465 | { |
---|
| 466 | return ranksServerNotLeader; |
---|
| 467 | } |
---|
[1021] | 468 | |
---|
[1158] | 469 | /*! |
---|
| 470 | Check if client connects to leading server |
---|
| 471 | \return connected(true), not connected (false) |
---|
| 472 | */ |
---|
| 473 | bool CContextClient::isServerNotLeader(void) const |
---|
| 474 | { |
---|
| 475 | return !ranksServerNotLeader.empty(); |
---|
| 476 | } |
---|
[1021] | 477 | |
---|
[595] | 478 | /*! |
---|
| 479 | Get leading server in the group of connected server |
---|
| 480 | \return ranks of leading servers |
---|
| 481 | */ |
---|
| 482 | const std::list<int>& CContextClient::getRanksServerLeader(void) const |
---|
| 483 | { |
---|
| 484 | return ranksServerLeader; |
---|
| 485 | } |
---|
[509] | 486 | |
---|
[595] | 487 | /*! |
---|
| 488 | Check if client connects to leading server |
---|
| 489 | \return connected(true), not connected (false) |
---|
| 490 | */ |
---|
| 491 | bool CContextClient::isServerLeader(void) const |
---|
| 492 | { |
---|
| 493 | return !ranksServerLeader.empty(); |
---|
| 494 | } |
---|
[300] | 495 | |
---|
[512] | 496 | /*! |
---|
[1130] | 497 | * Finalize context client and do some reports. Function is non-blocking. |
---|
[512] | 498 | */ |
---|
[1130] | 499 | void CContextClient::finalize(void) |
---|
[1054] | 500 | { |
---|
| 501 | map<int,CClientBuffer*>::iterator itBuff; |
---|
[1757] | 502 | std::list<int>::iterator ItServerLeader; |
---|
| 503 | |
---|
[1054] | 504 | bool stop = false; |
---|
[731] | 505 | |
---|
[1757] | 506 | int* nbServerConnectionLocal = new int[serverSize] ; |
---|
| 507 | int* nbServerConnectionGlobal = new int[serverSize] ; |
---|
| 508 | for(int i=0;i<serverSize;++i) nbServerConnectionLocal[i]=0 ; |
---|
| 509 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) nbServerConnectionLocal[itBuff->first]=1 ; |
---|
| 510 | for (ItServerLeader = ranksServerLeader.begin(); ItServerLeader != ranksServerLeader.end(); ItServerLeader++) nbServerConnectionLocal[*ItServerLeader]=1 ; |
---|
| 511 | |
---|
| 512 | MPI_Allreduce(nbServerConnectionLocal, nbServerConnectionGlobal, serverSize, MPI_INT, MPI_SUM, intraComm); |
---|
| 513 | |
---|
| 514 | CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE); |
---|
| 515 | CMessage msg; |
---|
[509] | 516 | |
---|
[1757] | 517 | for (int i=0;i<serverSize;++i) if (nbServerConnectionLocal[i]==1) event.push(i, nbServerConnectionGlobal[i], msg) ; |
---|
| 518 | sendEvent(event); |
---|
| 519 | |
---|
| 520 | delete[] nbServerConnectionLocal ; |
---|
| 521 | delete[] nbServerConnectionGlobal ; |
---|
[509] | 522 | |
---|
[1765] | 523 | |
---|
[1054] | 524 | CTimer::get("Blocking time").resume(); |
---|
[1757] | 525 | checkBuffers(); |
---|
[1054] | 526 | CTimer::get("Blocking time").suspend(); |
---|
| 527 | |
---|
| 528 | std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
| 529 | iteMap = mapBufferSize_.end(), itMap; |
---|
[1071] | 530 | |
---|
[1054] | 531 | StdSize totalBuf = 0; |
---|
| 532 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
| 533 | { |
---|
[2130] | 534 | report(10) << " Memory report : Context <" << context_->getId() << "> : client side : memory used for buffer of each connection to server" << endl |
---|
[1054] | 535 | << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; |
---|
| 536 | totalBuf += itMap->second; |
---|
| 537 | } |
---|
[2130] | 538 | report(0) << " Memory report : Context <" << context_->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; |
---|
[1054] | 539 | |
---|
| 540 | } |
---|
[1130] | 541 | |
---|
[1139] | 542 | |
---|
| 543 | /*! |
---|
| 544 | */ |
---|
[1130] | 545 | bool CContextClient::havePendingRequests(void) |
---|
| 546 | { |
---|
| 547 | bool pending = false; |
---|
| 548 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 549 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 550 | pending |= itBuff->second->hasPendingRequest(); |
---|
| 551 | return pending; |
---|
| 552 | } |
---|
[1757] | 553 | |
---|
[2260] | 554 | bool CContextClient::havePendingRequests(list<int>& ranks) |
---|
| 555 | { |
---|
| 556 | list<int>::iterator it; |
---|
| 557 | bool pending = false; |
---|
| 558 | for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->hasPendingRequest(); |
---|
| 559 | return pending; |
---|
| 560 | } |
---|
| 561 | |
---|
[1757] | 562 | bool CContextClient::isNotifiedFinalized(void) |
---|
| 563 | { |
---|
| 564 | if (isAttachedModeEnabled()) return true ; |
---|
[1130] | 565 | |
---|
[1757] | 566 | bool finalized = true; |
---|
| 567 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 568 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 569 | finalized &= itBuff->second->isNotifiedFinalized(); |
---|
| 570 | return finalized; |
---|
| 571 | } |
---|
[1130] | 572 | |
---|
[509] | 573 | } |
---|