[300] | 1 | #include "context_server.hpp" |
---|
| 2 | #include "buffer_in.hpp" |
---|
| 3 | #include "type.hpp" |
---|
| 4 | #include "context.hpp" |
---|
[352] | 5 | #include "object_template.hpp" |
---|
| 6 | #include "group_template.hpp" |
---|
| 7 | #include "attribute_template.hpp" |
---|
[300] | 8 | #include "domain.hpp" |
---|
[352] | 9 | #include "field.hpp" |
---|
| 10 | #include "file.hpp" |
---|
| 11 | #include "grid.hpp" |
---|
[382] | 12 | #include "mpi.hpp" |
---|
[347] | 13 | #include "tracer.hpp" |
---|
| 14 | #include "timer.hpp" |
---|
[401] | 15 | #include "cxios.hpp" |
---|
[492] | 16 | #include "event_scheduler.hpp" |
---|
| 17 | #include "server.hpp" |
---|
[1761] | 18 | #include "servers_ressource.hpp" |
---|
| 19 | #include "pool_ressource.hpp" |
---|
| 20 | #include "services.hpp" |
---|
| 21 | #include "contexts_manager.hpp" |
---|
[2130] | 22 | #include "timeline_events.hpp" |
---|
[1761] | 23 | |
---|
[492] | 24 | #include <boost/functional/hash.hpp> |
---|
[1761] | 25 | #include <random> |
---|
| 26 | #include <chrono> |
---|
[300] | 27 | |
---|
| 28 | |
---|
[335] | 29 | namespace xios |
---|
[300] | 30 | { |
---|
[1761] | 31 | using namespace std ; |
---|
[300] | 32 | |
---|
[1853] | 33 | CContextServer::CContextServer(CContext* parent,MPI_Comm intraComm_,MPI_Comm interComm_) |
---|
| 34 | : eventScheduler_(nullptr), isProcessingEvent_(false), associatedClient_(nullptr) |
---|
[300] | 35 | { |
---|
[549] | 36 | context=parent; |
---|
| 37 | intraComm=intraComm_; |
---|
[1639] | 38 | MPI_Comm_size(intraComm,&intraCommSize); |
---|
| 39 | MPI_Comm_rank(intraComm,&intraCommRank); |
---|
[1054] | 40 | |
---|
[549] | 41 | interComm=interComm_; |
---|
| 42 | int flag; |
---|
[1639] | 43 | MPI_Comm_test_inter(interComm,&flag); |
---|
[1757] | 44 | |
---|
| 45 | if (flag) attachedMode=false ; |
---|
| 46 | else attachedMode=true ; |
---|
| 47 | |
---|
[2258] | 48 | if (flag) MPI_Comm_remote_size(interComm,&clientSize_); |
---|
| 49 | else MPI_Comm_size(interComm,&clientSize_); |
---|
[983] | 50 | |
---|
[1761] | 51 | |
---|
| 52 | SRegisterContextInfo contextInfo ; |
---|
| 53 | CXios::getContextsManager()->getContextInfo(context->getId(), contextInfo, intraComm) ; |
---|
| 54 | |
---|
[2022] | 55 | // if (contextInfo.serviceType != CServicesManager::CLIENT) // we must have an event scheduler => to be retrieve from the associated services |
---|
| 56 | // { |
---|
[2123] | 57 | //if (!isAttachedModeEnabled()) eventScheduler_=CXios::getPoolRessource()->getService(contextInfo.serviceId,contextInfo.partitionId)->getEventScheduler() ; |
---|
[2230] | 58 | eventScheduler_=CXios::getPoolRessource()->getService(contextInfo.serviceId,contextInfo.partitionId)->getEventScheduler() ; |
---|
| 59 | MPI_Comm_dup(intraComm, &processEventBarrier_) ; |
---|
[2022] | 60 | // } |
---|
[1761] | 61 | |
---|
| 62 | |
---|
[1757] | 63 | currentTimeLine=1; |
---|
[549] | 64 | scheduled=false; |
---|
| 65 | finished=false; |
---|
[1761] | 66 | |
---|
| 67 | // generate unique hash for server |
---|
| 68 | auto time=chrono::system_clock::now().time_since_epoch().count() ; |
---|
| 69 | std::default_random_engine rd(time); // not reproducible from a run to another |
---|
| 70 | std::uniform_int_distribution<size_t> dist; |
---|
| 71 | hashId=dist(rd) ; |
---|
| 72 | MPI_Bcast(&hashId,1,MPI_SIZE_T,0,intraComm) ; // Bcast to all server of the context |
---|
| 73 | |
---|
| 74 | |
---|
[2259] | 75 | if (!isAttachedModeEnabled()) MPI_Intercomm_merge(interComm_,true,&interCommMerged_) ; |
---|
| 76 | MPI_Comm_split(intraComm_, intraCommRank, intraCommRank, &commSelf_) ; // for windows |
---|
[1757] | 77 | |
---|
| 78 | itLastTimeLine=lastTimeLine.begin() ; |
---|
| 79 | |
---|
| 80 | pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) |
---|
| 81 | if (isAttachedModeEnabled()) pureOneSided=false ; // no one sided in attach mode |
---|
| 82 | |
---|
[300] | 83 | } |
---|
[992] | 84 | |
---|
[1757] | 85 | //! Attached mode is used ? |
---|
| 86 | //! \return true if attached mode is used, false otherwise |
---|
| 87 | bool CContextServer::isAttachedModeEnabled() const |
---|
| 88 | { |
---|
| 89 | return attachedMode ; |
---|
| 90 | } |
---|
| 91 | |
---|
[300] | 92 | void CContextServer::setPendingEvent(void) |
---|
| 93 | { |
---|
[549] | 94 | pendingEvent=true; |
---|
[300] | 95 | } |
---|
[489] | 96 | |
---|
[300] | 97 | bool CContextServer::hasPendingEvent(void) |
---|
| 98 | { |
---|
[549] | 99 | return pendingEvent; |
---|
[300] | 100 | } |
---|
[489] | 101 | |
---|
[597] | 102 | bool CContextServer::hasFinished(void) |
---|
| 103 | { |
---|
| 104 | return finished; |
---|
| 105 | } |
---|
| 106 | |
---|
[1054] | 107 | bool CContextServer::eventLoop(bool enableEventsProcessing /*= true*/) |
---|
[300] | 108 | { |
---|
[2246] | 109 | CTimer::get("listen request").resume(); |
---|
[549] | 110 | listen(); |
---|
[2246] | 111 | CTimer::get("listen request").suspend(); |
---|
| 112 | CTimer::get("check pending request").resume(); |
---|
[549] | 113 | checkPendingRequest(); |
---|
[2246] | 114 | checkPendingProbe() ; |
---|
| 115 | CTimer::get("check pending request").suspend(); |
---|
| 116 | CTimer::get("check event process").resume(); |
---|
[1757] | 117 | if (enableEventsProcessing) processEvents(); |
---|
[2246] | 118 | CTimer::get("check event process").suspend(); |
---|
[549] | 119 | return finished; |
---|
[300] | 120 | } |
---|
[489] | 121 | |
---|
[2246] | 122 | void CContextServer::listen(void) |
---|
| 123 | { |
---|
| 124 | int rank; |
---|
| 125 | int flag; |
---|
| 126 | int count; |
---|
| 127 | char * addr; |
---|
| 128 | MPI_Status status; |
---|
| 129 | MPI_Message message ; |
---|
| 130 | map<int,CServerBuffer*>::iterator it; |
---|
| 131 | bool okLoop; |
---|
[1225] | 132 | |
---|
[2246] | 133 | traceOff(); |
---|
| 134 | MPI_Improbe(MPI_ANY_SOURCE, 20,interComm,&flag,&message, &status); |
---|
| 135 | traceOn(); |
---|
| 136 | if (flag==true) listenPendingRequest(message, status) ; |
---|
| 137 | } |
---|
| 138 | |
---|
| 139 | bool CContextServer::listenPendingRequest( MPI_Message &message, MPI_Status& status) |
---|
| 140 | { |
---|
| 141 | int count; |
---|
| 142 | char * addr; |
---|
| 143 | map<int,CServerBuffer*>::iterator it; |
---|
| 144 | int rank=status.MPI_SOURCE ; |
---|
| 145 | |
---|
| 146 | it=buffers.find(rank); |
---|
| 147 | if (it==buffers.end()) // Receive the buffer size and allocate the buffer |
---|
| 148 | { |
---|
[2259] | 149 | MPI_Aint recvBuff[4] ; |
---|
| 150 | MPI_Mrecv(recvBuff, 4, MPI_AINT, &message, &status); |
---|
| 151 | remoteHashId_ = recvBuff[0] ; |
---|
| 152 | StdSize buffSize = recvBuff[1]; |
---|
| 153 | vector<MPI_Aint> winAdress(2) ; |
---|
| 154 | winAdress[0]=recvBuff[2] ; winAdress[1]=recvBuff[3] ; |
---|
| 155 | mapBufferSize_.insert(std::make_pair(rank, buffSize)); |
---|
| 156 | |
---|
| 157 | // create windows dynamically for one-sided |
---|
| 158 | if (!isAttachedModeEnabled()) |
---|
| 159 | { |
---|
| 160 | CTimer::get("create Windows").resume() ; |
---|
| 161 | MPI_Comm interComm ; |
---|
| 162 | MPI_Intercomm_create(commSelf_, 0, interCommMerged_, rank, 0 , &interComm) ; |
---|
| 163 | MPI_Intercomm_merge(interComm, true, &winComm_[rank]) ; |
---|
| 164 | windows_[rank].resize(2) ; |
---|
| 165 | MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][0]); |
---|
| 166 | MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][1]); |
---|
| 167 | CTimer::get("create Windows").suspend() ; |
---|
[2260] | 168 | MPI_Barrier(winComm_[rank]) ; |
---|
[2259] | 169 | } |
---|
| 170 | else |
---|
| 171 | { |
---|
| 172 | winComm_[rank] = MPI_COMM_NULL ; |
---|
| 173 | windows_[rank].resize(2) ; |
---|
| 174 | windows_[rank][0] = MPI_WIN_NULL ; |
---|
| 175 | windows_[rank][1] = MPI_WIN_NULL ; |
---|
| 176 | } |
---|
| 177 | |
---|
| 178 | it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows_[rank], winAdress, 0, buffSize)))).first; |
---|
| 179 | lastTimeLine[rank]=0 ; |
---|
| 180 | itLastTimeLine=lastTimeLine.begin() ; |
---|
| 181 | |
---|
| 182 | return true; |
---|
[2246] | 183 | } |
---|
| 184 | else |
---|
| 185 | { |
---|
| 186 | std::pair<MPI_Message,MPI_Status> mypair(message,status) ; |
---|
| 187 | pendingProbe[rank].push_back(mypair) ; |
---|
| 188 | return false; |
---|
| 189 | } |
---|
| 190 | } |
---|
| 191 | |
---|
| 192 | void CContextServer::checkPendingProbe(void) |
---|
| 193 | { |
---|
| 194 | |
---|
| 195 | list<int> recvProbe ; |
---|
| 196 | list<int>::iterator itRecv ; |
---|
| 197 | map<int, list<std::pair<MPI_Message,MPI_Status> > >::iterator itProbe; |
---|
| 198 | |
---|
| 199 | for(itProbe=pendingProbe.begin();itProbe!=pendingProbe.end();itProbe++) |
---|
| 200 | { |
---|
| 201 | int rank=itProbe->first ; |
---|
| 202 | if (pendingRequest.count(rank)==0) |
---|
| 203 | { |
---|
| 204 | MPI_Message& message = itProbe->second.front().first ; |
---|
| 205 | MPI_Status& status = itProbe->second.front().second ; |
---|
| 206 | int count ; |
---|
| 207 | MPI_Get_count(&status,MPI_CHAR,&count); |
---|
| 208 | map<int,CServerBuffer*>::iterator it = buffers.find(rank); |
---|
| 209 | if (it->second->isBufferFree(count)) |
---|
| 210 | { |
---|
| 211 | char * addr; |
---|
| 212 | addr=(char*)it->second->getBuffer(count); |
---|
| 213 | MPI_Imrecv(addr,count,MPI_CHAR, &message, &pendingRequest[rank]); |
---|
| 214 | bufferRequest[rank]=addr; |
---|
| 215 | recvProbe.push_back(rank) ; |
---|
| 216 | itProbe->second.pop_front() ; |
---|
| 217 | } |
---|
| 218 | } |
---|
| 219 | } |
---|
| 220 | |
---|
| 221 | for(itRecv=recvProbe.begin(); itRecv!=recvProbe.end(); itRecv++) if (pendingProbe[*itRecv].empty()) pendingProbe.erase(*itRecv) ; |
---|
| 222 | } |
---|
| 223 | |
---|
| 224 | |
---|
[300] | 225 | void CContextServer::checkPendingRequest(void) |
---|
| 226 | { |
---|
[1639] | 227 | map<int,MPI_Request>::iterator it; |
---|
[549] | 228 | list<int> recvRequest; |
---|
[300] | 229 | list<int>::iterator itRecv; |
---|
[549] | 230 | int rank; |
---|
| 231 | int flag; |
---|
| 232 | int count; |
---|
[1639] | 233 | MPI_Status status; |
---|
[2246] | 234 | |
---|
| 235 | if (!pendingRequest.empty()) CTimer::get("receiving requests").resume(); |
---|
| 236 | else CTimer::get("receiving requests").suspend(); |
---|
[489] | 237 | |
---|
[300] | 238 | for(it=pendingRequest.begin();it!=pendingRequest.end();it++) |
---|
| 239 | { |
---|
[549] | 240 | rank=it->first; |
---|
| 241 | traceOff(); |
---|
[1639] | 242 | MPI_Test(& it->second, &flag, &status); |
---|
[549] | 243 | traceOn(); |
---|
[300] | 244 | if (flag==true) |
---|
| 245 | { |
---|
[1757] | 246 | buffers[rank]->updateCurrentWindows() ; |
---|
[549] | 247 | recvRequest.push_back(rank); |
---|
[1639] | 248 | MPI_Get_count(&status,MPI_CHAR,&count); |
---|
[549] | 249 | processRequest(rank,bufferRequest[rank],count); |
---|
[300] | 250 | } |
---|
| 251 | } |
---|
[489] | 252 | |
---|
| 253 | for(itRecv=recvRequest.begin();itRecv!=recvRequest.end();itRecv++) |
---|
[300] | 254 | { |
---|
[549] | 255 | pendingRequest.erase(*itRecv); |
---|
| 256 | bufferRequest.erase(*itRecv); |
---|
[300] | 257 | } |
---|
| 258 | } |
---|
[489] | 259 | |
---|
[1757] | 260 | void CContextServer::getBufferFromClient(size_t timeLine) |
---|
| 261 | { |
---|
[2246] | 262 | CTimer::get("CContextServer::getBufferFromClient").resume() ; |
---|
[1757] | 263 | if (!isAttachedModeEnabled()) // one sided desactivated in attached mode |
---|
| 264 | { |
---|
| 265 | int rank ; |
---|
| 266 | char *buffer ; |
---|
| 267 | size_t count ; |
---|
| 268 | |
---|
| 269 | if (itLastTimeLine==lastTimeLine.end()) itLastTimeLine=lastTimeLine.begin() ; |
---|
| 270 | for(;itLastTimeLine!=lastTimeLine.end();++itLastTimeLine) |
---|
| 271 | { |
---|
| 272 | rank=itLastTimeLine->first ; |
---|
[2246] | 273 | if (itLastTimeLine->second < timeLine && pendingRequest.count(rank)==0 && buffers[rank]->isBufferEmpty()) |
---|
[1757] | 274 | { |
---|
[2246] | 275 | if (buffers[rank]->getBufferFromClient(timeLine, buffer, count)) processRequest(rank, buffer, count); |
---|
[2262] | 276 | if (count >= 0) ++itLastTimeLine ; |
---|
| 277 | break ; |
---|
[1757] | 278 | } |
---|
| 279 | } |
---|
| 280 | } |
---|
[2246] | 281 | CTimer::get("CContextServer::getBufferFromClient").suspend() ; |
---|
[1757] | 282 | } |
---|
| 283 | |
---|
| 284 | |
---|
[300] | 285 | void CContextServer::processRequest(int rank, char* buff,int count) |
---|
| 286 | { |
---|
[489] | 287 | |
---|
[549] | 288 | CBufferIn buffer(buff,count); |
---|
| 289 | char* startBuffer,endBuffer; |
---|
| 290 | int size, offset; |
---|
[1757] | 291 | size_t timeLine=0; |
---|
[549] | 292 | map<size_t,CEventServer*>::iterator it; |
---|
[489] | 293 | |
---|
[1757] | 294 | |
---|
[1225] | 295 | CTimer::get("Process request").resume(); |
---|
[300] | 296 | while(count>0) |
---|
| 297 | { |
---|
[549] | 298 | char* startBuffer=(char*)buffer.ptr(); |
---|
| 299 | CBufferIn newBuffer(startBuffer,buffer.remain()); |
---|
| 300 | newBuffer>>size>>timeLine; |
---|
[300] | 301 | |
---|
[2130] | 302 | if (timeLine==timelineEventNotifyChangeBufferSize) |
---|
| 303 | { |
---|
| 304 | buffers[rank]->notifyBufferResizing() ; |
---|
| 305 | buffers[rank]->updateCurrentWindows() ; |
---|
[2260] | 306 | info(100)<<"Receive NotifyChangeBufferSize from client rank "<<rank<<endl ; |
---|
[2130] | 307 | } |
---|
| 308 | else if (timeLine==timelineEventChangeBufferSize) |
---|
| 309 | { |
---|
| 310 | size_t newSize ; |
---|
| 311 | vector<MPI_Aint> winAdress(2) ; |
---|
| 312 | newBuffer>>newSize>>winAdress[0]>>winAdress[1] ; |
---|
| 313 | buffers.erase(rank) ; |
---|
[2258] | 314 | buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows_[rank], winAdress, 0, newSize))); |
---|
[2260] | 315 | info(100)<<"Receive ChangeBufferSize from client rank "<<rank<<" newSize : "<<newSize<<" Address : "<<winAdress[0]<<" & "<<winAdress[1]<<endl ; |
---|
[2130] | 316 | } |
---|
| 317 | else |
---|
| 318 | { |
---|
[2260] | 319 | info(100)<<"Receive standard event from client rank "<<rank<<" with timeLine : "<<timeLine<<endl ; |
---|
[2130] | 320 | it=events.find(timeLine); |
---|
| 321 | if (it==events.end()) it=events.insert(pair<int,CEventServer*>(timeLine,new CEventServer(this))).first; |
---|
| 322 | it->second->push(rank,buffers[rank],startBuffer,size); |
---|
| 323 | if (timeLine>0) lastTimeLine[rank]=timeLine ; |
---|
| 324 | } |
---|
[549] | 325 | buffer.advance(size); |
---|
| 326 | count=buffer.remain(); |
---|
[489] | 327 | } |
---|
[1757] | 328 | |
---|
[1225] | 329 | CTimer::get("Process request").suspend(); |
---|
[300] | 330 | } |
---|
[489] | 331 | |
---|
[300] | 332 | void CContextServer::processEvents(void) |
---|
| 333 | { |
---|
[549] | 334 | map<size_t,CEventServer*>::iterator it; |
---|
| 335 | CEventServer* event; |
---|
[1761] | 336 | |
---|
[1764] | 337 | // if (context->isProcessingEvent()) return ; |
---|
| 338 | if (isProcessingEvent_) return ; |
---|
[2130] | 339 | if (isAttachedModeEnabled()) |
---|
| 340 | if (!CXios::getDaemonsManager()->isScheduledContext(remoteHashId_)) return ; |
---|
[489] | 341 | |
---|
[549] | 342 | it=events.find(currentTimeLine); |
---|
[489] | 343 | if (it!=events.end()) |
---|
[300] | 344 | { |
---|
[549] | 345 | event=it->second; |
---|
[509] | 346 | |
---|
[300] | 347 | if (event->isFull()) |
---|
| 348 | { |
---|
[2123] | 349 | if (!scheduled && !isAttachedModeEnabled()) // Skip event scheduling for attached mode and reception on client side |
---|
[492] | 350 | { |
---|
[1764] | 351 | eventScheduler_->registerEvent(currentTimeLine,hashId); |
---|
[549] | 352 | scheduled=true; |
---|
[492] | 353 | } |
---|
[2123] | 354 | else if (isAttachedModeEnabled() || eventScheduler_->queryEvent(currentTimeLine,hashId) ) |
---|
[492] | 355 | { |
---|
[2123] | 356 | |
---|
[2230] | 357 | if (!eventScheduled_) |
---|
[2123] | 358 | { |
---|
[2230] | 359 | MPI_Ibarrier(processEventBarrier_,&processEventRequest_) ; |
---|
| 360 | eventScheduled_=true ; |
---|
| 361 | return ; |
---|
| 362 | } |
---|
| 363 | else |
---|
| 364 | { |
---|
| 365 | MPI_Status status ; |
---|
| 366 | int flag ; |
---|
| 367 | MPI_Test(&processEventRequest_, &flag, &status) ; |
---|
| 368 | if (!flag) return ; |
---|
| 369 | eventScheduled_=false ; |
---|
| 370 | } |
---|
[2123] | 371 | |
---|
[2230] | 372 | if (!isAttachedModeEnabled()) eventScheduler_->popEvent() ; |
---|
[2123] | 373 | //MPI_Barrier(intraComm) ; |
---|
[851] | 374 | // When using attached mode, synchronise the processes to avoid that differents event be scheduled by differents processes |
---|
| 375 | // The best way to properly solve this problem will be to use the event scheduler also in attached mode |
---|
| 376 | // for now just set up a MPI barrier |
---|
[1875] | 377 | //ym to be check later |
---|
| 378 | // if (!eventScheduler_ && CXios::isServer) MPI_Barrier(intraComm) ; |
---|
[851] | 379 | |
---|
[1764] | 380 | // context->setProcessingEvent() ; |
---|
| 381 | isProcessingEvent_=true ; |
---|
[549] | 382 | CTimer::get("Process events").resume(); |
---|
[2022] | 383 | info(100)<<"Received Event "<<currentTimeLine<<" of class "<<event->classId<<" of type "<<event->type<<endl ; |
---|
[549] | 384 | dispatchEvent(*event); |
---|
| 385 | CTimer::get("Process events").suspend(); |
---|
[1764] | 386 | isProcessingEvent_=false ; |
---|
| 387 | // context->unsetProcessingEvent() ; |
---|
[549] | 388 | pendingEvent=false; |
---|
| 389 | delete event; |
---|
| 390 | events.erase(it); |
---|
| 391 | currentTimeLine++; |
---|
| 392 | scheduled = false; |
---|
[2130] | 393 | if (isAttachedModeEnabled()) CXios::getDaemonsManager()->unscheduleContext() ; |
---|
[492] | 394 | } |
---|
| 395 | } |
---|
[2246] | 396 | else if (pendingRequest.empty()) getBufferFromClient(currentTimeLine) ; |
---|
[492] | 397 | } |
---|
[2260] | 398 | else if (pendingRequest.empty()) getBufferFromClient(currentTimeLine) ; // if pure one sided check buffer even if no event recorded at current time line |
---|
[492] | 399 | } |
---|
[489] | 400 | |
---|
[300] | 401 | CContextServer::~CContextServer() |
---|
| 402 | { |
---|
[549] | 403 | map<int,CServerBuffer*>::iterator it; |
---|
[1158] | 404 | for(it=buffers.begin();it!=buffers.end();++it) delete it->second; |
---|
[489] | 405 | } |
---|
[300] | 406 | |
---|
[1757] | 407 | void CContextServer::releaseBuffers() |
---|
| 408 | { |
---|
[2258] | 409 | for(auto it=buffers.begin();it!=buffers.end();++it) delete it->second ; |
---|
| 410 | buffers.clear() ; |
---|
| 411 | freeWindows() ; |
---|
| 412 | } |
---|
| 413 | |
---|
| 414 | void CContextServer::freeWindows() |
---|
| 415 | { |
---|
| 416 | if (!isAttachedModeEnabled()) |
---|
[1757] | 417 | { |
---|
[2259] | 418 | for(auto& it : winComm_) |
---|
[1757] | 419 | { |
---|
[2259] | 420 | int rank = it.first ; |
---|
[2258] | 421 | MPI_Win_free(&windows_[rank][0]); |
---|
| 422 | MPI_Win_free(&windows_[rank][1]); |
---|
| 423 | MPI_Comm_free(&winComm_[rank]) ; |
---|
[1757] | 424 | } |
---|
[2258] | 425 | } |
---|
[1757] | 426 | } |
---|
| 427 | |
---|
| 428 | void CContextServer::notifyClientsFinalize(void) |
---|
| 429 | { |
---|
| 430 | for(auto it=buffers.begin();it!=buffers.end();++it) |
---|
| 431 | { |
---|
| 432 | it->second->notifyClientFinalize() ; |
---|
| 433 | } |
---|
| 434 | } |
---|
| 435 | |
---|
[300] | 436 | void CContextServer::dispatchEvent(CEventServer& event) |
---|
| 437 | { |
---|
[549] | 438 | string contextName; |
---|
| 439 | string buff; |
---|
| 440 | int MsgSize; |
---|
| 441 | int rank; |
---|
| 442 | list<CEventServer::SSubEvent>::iterator it; |
---|
[1054] | 443 | StdString ctxId = context->getId(); |
---|
| 444 | CContext::setCurrent(ctxId); |
---|
[1130] | 445 | StdSize totalBuf = 0; |
---|
[489] | 446 | |
---|
[300] | 447 | if (event.classId==CContext::GetType() && event.type==CContext::EVENT_ID_CONTEXT_FINALIZE) |
---|
| 448 | { |
---|
[597] | 449 | finished=true; |
---|
[1194] | 450 | info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; |
---|
[1757] | 451 | notifyClientsFinalize() ; |
---|
[2246] | 452 | CTimer::get("receiving requests").suspend(); |
---|
[1194] | 453 | context->finalize(); |
---|
[2258] | 454 | freeWindows() ; |
---|
[1757] | 455 | |
---|
[511] | 456 | std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
[983] | 457 | iteMap = mapBufferSize_.end(), itMap; |
---|
[511] | 458 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
| 459 | { |
---|
[1054] | 460 | rank = itMap->first; |
---|
[1130] | 461 | report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl |
---|
| 462 | << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; |
---|
| 463 | totalBuf += itMap->second; |
---|
[511] | 464 | } |
---|
[1130] | 465 | report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; |
---|
[300] | 466 | } |
---|
[549] | 467 | else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); |
---|
| 468 | else if (event.classId==CContextGroup::GetType()) CContextGroup::dispatchEvent(event); |
---|
| 469 | else if (event.classId==CCalendarWrapper::GetType()) CCalendarWrapper::dispatchEvent(event); |
---|
| 470 | else if (event.classId==CDomain::GetType()) CDomain::dispatchEvent(event); |
---|
| 471 | else if (event.classId==CDomainGroup::GetType()) CDomainGroup::dispatchEvent(event); |
---|
| 472 | else if (event.classId==CAxis::GetType()) CAxis::dispatchEvent(event); |
---|
| 473 | else if (event.classId==CAxisGroup::GetType()) CAxisGroup::dispatchEvent(event); |
---|
[887] | 474 | else if (event.classId==CScalar::GetType()) CScalar::dispatchEvent(event); |
---|
| 475 | else if (event.classId==CScalarGroup::GetType()) CScalarGroup::dispatchEvent(event); |
---|
[549] | 476 | else if (event.classId==CGrid::GetType()) CGrid::dispatchEvent(event); |
---|
| 477 | else if (event.classId==CGridGroup::GetType()) CGridGroup::dispatchEvent(event); |
---|
| 478 | else if (event.classId==CField::GetType()) CField::dispatchEvent(event); |
---|
| 479 | else if (event.classId==CFieldGroup::GetType()) CFieldGroup::dispatchEvent(event); |
---|
| 480 | else if (event.classId==CFile::GetType()) CFile::dispatchEvent(event); |
---|
| 481 | else if (event.classId==CFileGroup::GetType()) CFileGroup::dispatchEvent(event); |
---|
| 482 | else if (event.classId==CVariable::GetType()) CVariable::dispatchEvent(event); |
---|
[300] | 483 | else |
---|
| 484 | { |
---|
[549] | 485 | ERROR("void CContextServer::dispatchEvent(CEventServer& event)",<<" Bad event class Id"<<endl); |
---|
[300] | 486 | } |
---|
| 487 | } |
---|
[2230] | 488 | |
---|
| 489 | bool CContextServer::isCollectiveEvent(CEventServer& event) |
---|
| 490 | { |
---|
| 491 | if (event.classId==CField::GetType()) return CField::isCollectiveEvent(event); |
---|
| 492 | else return true ; |
---|
| 493 | } |
---|
[300] | 494 | } |
---|