Changeset 2671


Ignore:
Timestamp:
10/29/24 15:25:33 (3 months ago)
Author:
ymipsl
Message:

Suffix all data member of CContexServer/CContextClient class by "_"

YM

Location:
XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_client.cpp

    r2669 r2671  
    2929    CLegacyContextClient::CLegacyContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtSer) 
    3030                         : CContextClient(parent, intraComm, interComm, cxtSer), 
    31                            mapBufferSize_(),  maxBufferedEvents(4) 
     31                           mapBufferSize_(),  maxBufferedEvents_(4) 
    3232    { 
    33       pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     33      pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    3434      xios::MPI_Intercomm_merge(interComm_,false, &interCommMerged_) ; 
    3535      CXios::getMpiGarbageCollector().registerCommunicator(interCommMerged_) ; 
     
    3737      CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; 
    3838      eventScheduler_ = parent->getEventScheduler() ;   
    39       timeLine = 1; 
     39      timeLine_ = 1; 
    4040    } 
    4141 
     
    4949//      ostringstream str ; 
    5050//      for(auto& rank : ranks) str<<rank<<" ; " ; 
    51 //      info(100)<<"Event "<<timeLine<<" of context "<<context_->getId()<<"  for ranks : "<<str.str()<<endl ; 
     51//      info(100)<<"Event "<<timeLine_<<" of context "<<context_->getId()<<"  for ranks : "<<str.str()<<endl ; 
    5252 
    5353      if (CXios::checkEventSync) 
     
    5555        int typeId, classId, typeId_in, classId_in; 
    5656        long long timeLine_out; 
    57         long long timeLine_in( timeLine ); 
     57        long long timeLine_in( timeLine_ ); 
    5858        typeId_in=event.getTypeId() ; 
    5959        classId_in=event.getClassId() ; 
    60 //        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
     60//        MPI_Allreduce(&timeLine_,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
    6161        MPI_Allreduce(&timeLine_in,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm_) ;  
    6262        MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    6363        MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    64         if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine) 
     64        if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine_) 
    6565        { 
    6666           ERROR("void CLegacyContextClient::sendEvent(CEventClient& event)", 
    67                << "Event are not coherent between client for timeline = "<<timeLine); 
     67               << "Event are not coherent between client for timeline = "<<timeLine_); 
    6868        } 
    6969         
     
    7777        { 
    7878          ERROR("void CLegacyContextClient::sendEvent(CEventClient& event)", 
    79                  <<" Some servers will not receive the message for timeline = "<<timeLine<<endl 
     79                 <<" Some servers will not receive the message for timeline = "<<timeLine_<<endl 
    8080                 <<"Servers are : "<<osstr.str()) ; 
    8181        } 
     
    9090         // We force the getBuffers call to be non-blocking on classical servers 
    9191        list<CBufferOut*> buffList; 
    92         getBuffers(timeLine, ranks, sizes, buffList) ; 
    93  
    94         event.send(timeLine, sizes, buffList); 
     92        getBuffers(timeLine_, ranks, sizes, buffList) ; 
     93 
     94        event.send(timeLine_, sizes, buffList); 
    9595        
    9696        //for (auto itRank = ranks.begin(); itRank != ranks.end(); itRank++) buffers[*itRank]->infoBuffer() ; 
     
    102102       
    103103      synchronize() ; 
    104       timeLine++; 
     104      timeLine_++; 
    105105    } 
    106106 
     
    297297        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; 
    298298        mapBufferSize_[rank] = CXios::minBufferSize; 
    299         maxEventSizes[rank] = CXios::minBufferSize; 
     299        maxEventSizes_[rank] = CXios::minBufferSize; 
    300300      } 
    301301      bool hasWindows = true ; 
     
    328328      bool pending = false; 
    329329      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) 
    330         pending |= itBuff->second->checkBuffer(!pureOneSided); 
     330        pending |= itBuff->second->checkBuffer(!pureOneSided_); 
    331331      return pending; 
    332332   } 
     
    378378      list<int>::iterator it; 
    379379      bool pending = false; 
    380       for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(!pureOneSided); 
     380      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(!pureOneSided_); 
    381381      return pending; 
    382382   } 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_client.hpp

    r2547 r2671  
    6262 
    6363 
    64       size_t timeLine; //!< Timeline of each event 
     64      size_t timeLine_; //!< Timeline of each event 
    6565 
    6666      MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. 
     
    6969      map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers 
    7070 
    71       bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     71      bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    7272       
    7373      //! Mapping of server and buffer size for each connection to server 
    7474      std::map<int,StdSize> mapBufferSize_; 
    7575      //! Maximum event sizes estimated for each connection to server 
    76       std::map<int,StdSize> maxEventSizes; 
     76      std::map<int,StdSize> maxEventSizes_; 
    7777      //! Maximum number of events that can be buffered 
    78       StdSize maxBufferedEvents; 
     78      StdSize maxBufferedEvents_; 
    7979 
    8080      std::map<int, MPI_Comm> winComm_ ; //! Window communicators 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_client_v2.cpp

    r2669 r2671  
    2929    CLegacyContextClientV2::CLegacyContextClientV2(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtSer) 
    3030                         : CContextClient(parent, intraComm, interComm, cxtSer), 
    31                            mapBufferSize_(),  maxBufferedEvents(4) 
     31                           mapBufferSize_(),  maxBufferedEvents_(4) 
    3232    { 
    33       pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     33      pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    3434      xios::MPI_Intercomm_merge(interComm_,false, &interCommMerged_) ; 
    3535      CXios::getMpiGarbageCollector().registerCommunicator(interCommMerged_) ; 
     
    4949 
    5050      eventScheduler_ = parent->getEventScheduler() ;   
    51       timeLine = 1; 
     51      timeLine_ = 1; 
    5252    } 
    5353 
     
    6363        int typeId, classId, typeId_in, classId_in; 
    6464        long long timeLine_out; 
    65         long long timeLine_in( timeLine ); 
     65        long long timeLine_in( timeLine_ ); 
    6666        typeId_in=event.getTypeId() ; 
    6767        classId_in=event.getClassId() ; 
    68 //        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
     68//        MPI_Allreduce(&timeLine_,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
    6969        MPI_Allreduce(&timeLine_in,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm_) ;  
    7070        MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    7171        MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    72         if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine) 
     72        if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine_) 
    7373        { 
    7474           ERROR("void CLegacyContextClientV2::sendEvent(CEventClient& event)", 
    75                << "Event are not coherent between client for timeline = "<<timeLine); 
     75               << "Event are not coherent between client for timeline = "<<timeLine_); 
    7676        } 
    7777         
     
    8585        { 
    8686          ERROR("void CLegacyContextClientV2::sendEvent(CEventClient& event)", 
    87                  <<" Some servers will not receive the message for timeline = "<<timeLine<<endl 
     87                 <<" Some servers will not receive the message for timeline = "<<timeLine_<<endl 
    8888                 <<"Servers are : "<<osstr.str()) ; 
    8989        } 
     
    9898         // We force the getBuffers call to be non-blocking on classical servers 
    9999        list<CBufferOut*> buffList; 
    100         getBuffers(timeLine, ranks, sizes, buffList) ; 
    101  
    102         event.send(timeLine, sizes, buffList); 
     100        getBuffers(timeLine_, ranks, sizes, buffList) ; 
     101 
     102        event.send(timeLine_, sizes, buffList); 
    103103        
    104104        unlockBuffers(ranks) ; 
     
    108108       
    109109      synchronize() ; 
    110       timeLine++; 
     110      timeLine_++; 
    111111    } 
    112112 
     
    221221        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; 
    222222        mapBufferSize_[rank] = CXios::minBufferSize; 
    223         maxEventSizes[rank] = CXios::minBufferSize; 
     223        maxEventSizes_[rank] = CXios::minBufferSize; 
    224224      } 
    225225      bool hasWindows = true ; 
     
    256256      bool pending = false; 
    257257      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) 
    258         pending |= itBuff->second->checkBuffer(!pureOneSided); 
     258        pending |= itBuff->second->checkBuffer(!pureOneSided_); 
    259259      return pending; 
    260260   } 
     
    306306      list<int>::iterator it; 
    307307      bool pending = false; 
    308       for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(!pureOneSided); 
     308      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(!pureOneSided_); 
    309309      return pending; 
    310310   } 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_client_v2.hpp

    r2667 r2671  
    6262 
    6363 
    64       size_t timeLine; //!< Timeline of each event 
     64      size_t timeLine_; //!< Timeline of each event 
    6565 
    6666      MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. 
     
    6969      map<int,CLegacyClientBufferV2*> buffers; //!< Buffers for connection to servers 
    7070 
    71       bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     71      bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    7272       
    7373      //! Mapping of server and buffer size for each connection to server 
    7474      std::map<int,StdSize> mapBufferSize_; 
    7575      //! Maximum event sizes estimated for each connection to server 
    76       std::map<int,StdSize> maxEventSizes; 
     76      std::map<int,StdSize> maxEventSizes_; 
    7777      //! Maximum number of events that can be buffered 
    78       StdSize maxBufferedEvents; 
     78      StdSize maxBufferedEvents_; 
    7979 
    8080      std::map<int, MPI_Comm> winComm_ ; //! Window communicators 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_server.cpp

    r2670 r2671  
    4040    CXios::getMpiGarbageCollector().registerCommunicator(processEventBarrier_) ; 
    4141 
    42     currentTimeLine=1; 
    43     scheduled=false; 
    44     finished=false; 
     42    currentTimeLine_=1; 
     43    scheduled_=false; 
     44    finished_=false; 
    4545 
    4646    xios::MPI_Intercomm_merge(interComm_,true,&interCommMerged_) ; 
     
    4949    CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; 
    5050   
    51     itLastTimeLine=lastTimeLine.begin() ; 
    52  
    53     pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     51    itLastTimeLine_=lastTimeLine_.begin() ; 
     52 
     53    pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    5454  } 
    5555  
    5656  void CLegacyContextServer::setPendingEvent(void) 
    5757  { 
    58     pendingEvent=true; 
     58    pendingEvent_=true; 
    5959  } 
    6060 
    6161  bool CLegacyContextServer::hasPendingEvent(void) 
    6262  { 
    63     return (pendingRequest.size()!=0); 
     63    return (pendingRequest_.size()!=0); 
    6464  } 
    6565 
    6666  bool CLegacyContextServer::hasFinished(void) 
    6767  { 
    68     return finished; 
     68    return finished_; 
    6969  } 
    7070 
     
    8383    if (info.isActive(logTimers)) CTimer::get("check event process").suspend(); 
    8484    if (info.isActive(logProfile)) CTimer::get("Recv event loop (legacy)").suspend(); 
    85     return finished; 
     85    return finished_; 
    8686  } 
    8787 
     
    110110    int rank=status.MPI_SOURCE ; 
    111111 
    112     it=buffers.find(rank); 
    113     if (it==buffers.end()) // Receive the buffer size and allocate the buffer 
     112    it=buffers_.find(rank); 
     113    if (it==buffers_.end()) // Receive the buffer size and allocate the buffer 
    114114    { 
    115115      MPI_Aint recvBuff[4] ; 
     
    148148      MPI_Barrier(winComm_[rank]) ; 
    149149 
    150       it=(buffers.insert(pair<int,CServerBuffer*>(rank, new CServerBuffer(rank, windows_[rank], winBufferAddress, 0, buffSize)))).first; 
    151       lastTimeLine[rank]=0 ; 
    152       itLastTimeLine=lastTimeLine.begin() ; 
     150      it=(buffers_.insert(pair<int,CServerBuffer*>(rank, new CServerBuffer(rank, windows_[rank], winBufferAddress, 0, buffSize)))).first; 
     151      lastTimeLine_[rank]=0 ; 
     152      itLastTimeLine_=lastTimeLine_.begin() ; 
    153153 
    154154      return true; 
     
    157157    { 
    158158        std::pair<MPI_Message,MPI_Status> mypair(message,status) ; 
    159         pendingProbe[rank].push_back(mypair) ; 
     159        pendingProbe_[rank].push_back(mypair) ; 
    160160        return false; 
    161161    } 
     
    169169    map<int, list<std::pair<MPI_Message,MPI_Status> > >::iterator itProbe; 
    170170 
    171     for(itProbe=pendingProbe.begin();itProbe!=pendingProbe.end();itProbe++) 
     171    for(itProbe=pendingProbe_.begin();itProbe!=pendingProbe_.end();itProbe++) 
    172172    { 
    173173      int rank=itProbe->first ; 
    174       if (pendingRequest.count(rank)==0) 
     174      if (pendingRequest_.count(rank)==0) 
    175175      { 
    176176        MPI_Message& message = itProbe->second.front().first ; 
     
    178178        int count ; 
    179179        MPI_Get_count(&status,MPI_CHAR,&count); 
    180         map<int,CServerBuffer*>::iterator it = buffers.find(rank); 
     180        map<int,CServerBuffer*>::iterator it = buffers_.find(rank); 
    181181        if ( (it->second->isBufferFree(count) && !it->second->isResizing()) // accept new request if buffer is free 
    182182          || (it->second->isResizing() && it->second->isBufferEmpty()) )    // or if resizing wait for buffer is empty 
     
    184184          char * addr; 
    185185          addr=(char*)it->second->getBuffer(count); 
    186           MPI_Imrecv(addr,count,MPI_CHAR, &message, &pendingRequest[rank]); 
    187           bufferRequest[rank]=addr; 
     186          MPI_Imrecv(addr,count,MPI_CHAR, &message, &pendingRequest_[rank]); 
     187          bufferRequest_[rank]=addr; 
    188188          recvProbe.push_back(rank) ; 
    189189          itProbe->second.pop_front() ; 
     
    192192    } 
    193193 
    194     for(itRecv=recvProbe.begin(); itRecv!=recvProbe.end(); itRecv++) if (pendingProbe[*itRecv].empty()) pendingProbe.erase(*itRecv) ; 
     194    for(itRecv=recvProbe.begin(); itRecv!=recvProbe.end(); itRecv++) if (pendingProbe_[*itRecv].empty()) pendingProbe_.erase(*itRecv) ; 
    195195  } 
    196196 
     
    206206    MPI_Status status; 
    207207    
    208     if (!pendingRequest.empty()) if (info.isActive(logTimers)) CTimer::get("receiving requests").resume(); 
     208    if (!pendingRequest_.empty()) if (info.isActive(logTimers)) CTimer::get("receiving requests").resume(); 
    209209    else if (info.isActive(logTimers)) CTimer::get("receiving requests").suspend(); 
    210210 
    211     for(it=pendingRequest.begin();it!=pendingRequest.end();it++) 
     211    for(it=pendingRequest_.begin();it!=pendingRequest_.end();it++) 
    212212    { 
    213213      rank=it->first; 
     
    217217      if (flag==true) 
    218218      { 
    219         buffers[rank]->updateCurrentWindows() ; 
     219        buffers_[rank]->updateCurrentWindows() ; 
    220220        recvRequest.push_back(rank); 
    221221        MPI_Get_count(&status,MPI_CHAR,&count); 
    222         processRequest(rank,bufferRequest[rank],count); 
     222        processRequest(rank,bufferRequest_[rank],count); 
    223223      } 
    224224    } 
     
    226226    for(itRecv=recvRequest.begin();itRecv!=recvRequest.end();itRecv++) 
    227227    { 
    228       pendingRequest.erase(*itRecv); 
    229       bufferRequest.erase(*itRecv); 
     228      pendingRequest_.erase(*itRecv); 
     229      bufferRequest_.erase(*itRecv); 
    230230    } 
    231231  } 
     
    239239    size_t count ;  
    240240 
    241     if (itLastTimeLine==lastTimeLine.end()) itLastTimeLine=lastTimeLine.begin() ; 
    242     for(;itLastTimeLine!=lastTimeLine.end();++itLastTimeLine) 
    243     { 
    244       rank=itLastTimeLine->first ; 
    245       if (itLastTimeLine->second < timeLine &&  pendingRequest.count(rank)==0 && buffers[rank]->isBufferEmpty()) 
    246       { 
    247         if (buffers[rank]->getBufferFromClient(timeLine, buffer, count)) processRequest(rank, buffer, count); 
    248         if (count >= 0) ++itLastTimeLine ; 
     241    if (itLastTimeLine_==lastTimeLine_.end()) itLastTimeLine_=lastTimeLine_.begin() ; 
     242    for(;itLastTimeLine_!=lastTimeLine_.end();++itLastTimeLine_) 
     243    { 
     244      rank=itLastTimeLine_->first ; 
     245      if (itLastTimeLine_->second < timeLine &&  pendingRequest_.count(rank)==0 && buffers_[rank]->isBufferEmpty()) 
     246      { 
     247        if (buffers_[rank]->getBufferFromClient(timeLine, buffer, count)) processRequest(rank, buffer, count); 
     248        if (count >= 0) ++itLastTimeLine_ ; 
    249249        break ; 
    250250      } 
     
    273273      if (timeLine==timelineEventNotifyChangeBufferSize) 
    274274      { 
    275         buffers[rank]->notifyBufferResizing() ; 
    276         buffers[rank]->updateCurrentWindows() ; 
    277         buffers[rank]->popBuffer(count) ; 
     275        buffers_[rank]->notifyBufferResizing() ; 
     276        buffers_[rank]->updateCurrentWindows() ; 
     277        buffers_[rank]->popBuffer(count) ; 
    278278        info(100)<<"Context id "<<context_->getId()<<" : Receive NotifyChangeBufferSize from client rank "<<rank<<endl 
    279                  <<"isBufferEmpty ? "<<buffers[rank]->isBufferEmpty()<<"  remaining count : "<<buffers[rank]->getUsed()<<endl; 
     279                 <<"isBufferEmpty ? "<<buffers_[rank]->isBufferEmpty()<<"  remaining count : "<<buffers_[rank]->getUsed()<<endl; 
    280280      }  
    281281      else if (timeLine==timelineEventChangeBufferSize) 
     
    284284        vector<MPI_Aint> winBufferAdress(2) ; 
    285285        newBuffer>>newSize>>winBufferAdress[0]>>winBufferAdress[1] ; 
    286         buffers[rank]->freeBuffer(count) ; 
    287         delete buffers[rank] ; 
     286        buffers_[rank]->freeBuffer(count) ; 
     287        delete buffers_[rank] ; 
    288288        windows_[rank][0] -> setWinBufferAddress(winBufferAdress[0],0) ; 
    289289        windows_[rank][1] -> setWinBufferAddress(winBufferAdress[1],0) ; 
    290         buffers[rank] = new CServerBuffer(rank, windows_[rank], winBufferAdress, 0, newSize) ; 
     290        buffers_[rank] = new CServerBuffer(rank, windows_[rank], winBufferAdress, 0, newSize) ; 
    291291        info(100)<<"Context id "<<context_->getId()<<" : Receive ChangeBufferSize from client rank "<<rank 
    292292                 <<"  newSize : "<<newSize<<" Address : "<<winBufferAdress[0]<<" & "<<winBufferAdress[1]<<endl ; 
     
    295295      { 
    296296        info(100)<<"Context id "<<context_->getId()<<" : Receive standard event from client rank "<<rank<<"  with timeLine : "<<timeLine<<endl ; 
    297         it=events.find(timeLine); 
     297        it=events_.find(timeLine); 
    298298        
    299         if (it==events.end()) it=events.insert(pair<int,CEventServer*>(timeLine,new CEventServer(this))).first; 
    300         it->second->push(rank,buffers[rank],startBuffer,size); 
    301         if (timeLine>0) lastTimeLine[rank]=timeLine ; 
     299        if (it==events_.end()) it=events_.insert(pair<int,CEventServer*>(timeLine,new CEventServer(this))).first; 
     300        it->second->push(rank,buffers_[rank],startBuffer,size); 
     301        if (timeLine>0) lastTimeLine_[rank]=timeLine ; 
    302302      } 
    303303      buffer.advance(size); 
     
    315315    if (isProcessingEvent_) return ; 
    316316 
    317     it=events.find(currentTimeLine); 
    318     if (it!=events.end()) 
     317    it=events_.find(currentTimeLine_); 
     318    if (it!=events_.end()) 
    319319    { 
    320320      event=it->second; 
     
    322322      if (event->isFull()) 
    323323      { 
    324         if (!scheduled) 
     324        if (!scheduled_) 
    325325        { 
    326           eventScheduler_->registerEvent(currentTimeLine,hashId_); 
    327           info(100)<<"Context id "<<context_->getId()<<"Schedule event : "<< currentTimeLine <<"  "<<hashId_<<endl ; 
    328           scheduled=true; 
     326          eventScheduler_->registerEvent(currentTimeLine_,hashId_); 
     327          info(100)<<"Context id "<<context_->getId()<<"Schedule event : "<< currentTimeLine_ <<"  "<<hashId_<<endl ; 
     328          scheduled_=true; 
    329329        } 
    330         else if (eventScheduler_->queryEvent(currentTimeLine,hashId_) ) 
     330        else if (eventScheduler_->queryEvent(currentTimeLine_,hashId_) ) 
    331331        { 
    332332          if (!enableEventsProcessing && isCollectiveEvent(*event)) return ; 
     
    351351            int typeId, classId, typeId_in, classId_in; 
    352352            long long timeLine_out; 
    353             long long timeLine_in( currentTimeLine ); 
     353            long long timeLine_in( currentTimeLine_ ); 
    354354            typeId_in=event->type ; 
    355355            classId_in=event->classId ; 
     
    358358            MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    359359            MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    360             if (typeId/intraCommSize_!=event->type || classId/intraCommSize_!=event->classId || timeLine_out/intraCommSize_!=currentTimeLine) 
     360            if (typeId/intraCommSize_!=event->type || classId/intraCommSize_!=event->classId || timeLine_out/intraCommSize_!=currentTimeLine_) 
    361361            { 
    362362               ERROR("void CLegacyContextClient::sendEvent(CEventClient& event)", 
    363                   << "Event are not coherent between client for timeline = "<<currentTimeLine); 
     363                  << "Event are not coherent between client for timeline = "<<currentTimeLine_); 
    364364            } 
    365365          } 
     
    367367          isProcessingEvent_=true ; 
    368368          CTimer::get("Process events").resume(); 
    369           info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine<<" of class "<<event->classId<<" of type "<<event->type<<endl ; 
     369          info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine_<<" of class "<<event->classId<<" of type "<<event->type<<endl ; 
    370370          eventScheduler_->popEvent() ; 
    371371          dispatchEvent(*event); 
    372372          CTimer::get("Process events").suspend(); 
    373373          isProcessingEvent_=false ; 
    374           pendingEvent=false; 
     374          pendingEvent_=false; 
    375375          delete event; 
    376           events.erase(it); 
    377           currentTimeLine++; 
    378           scheduled = false; 
     376          events_.erase(it); 
     377          currentTimeLine_++; 
     378          scheduled_ = false; 
    379379        } 
    380380      } 
    381       else if (pendingRequest.empty()) getBufferFromClient(currentTimeLine) ; 
    382     } 
    383     else if (pendingRequest.empty()) getBufferFromClient(currentTimeLine) ; // if pure one sided check buffer even if no event recorded at current time line 
     381      else if (pendingRequest_.empty()) getBufferFromClient(currentTimeLine_) ; 
     382    } 
     383    else if (pendingRequest_.empty()) getBufferFromClient(currentTimeLine_) ; // if pure one sided check buffer even if no event recorded at current time line 
    384384  } 
    385385 
     
    387387  { 
    388388    map<int,CServerBuffer*>::iterator it; 
    389     for(it=buffers.begin();it!=buffers.end();++it) delete it->second; 
    390     buffers.clear() ; 
     389    for(it=buffers_.begin();it!=buffers_.end();++it) delete it->second; 
     390    buffers_.clear() ; 
    391391  } 
    392392 
    393393  void CLegacyContextServer::releaseBuffers() 
    394394  { 
    395     //for(auto it=buffers.begin();it!=buffers.end();++it) delete it->second ; 
    396     //buffers.clear() ;  
     395    //for(auto it=buffers_.begin();it!=buffers_.end();++it) delete it->second ; 
     396    //buffers_.clear() ;  
    397397    freeWindows() ; 
    398398  } 
     
    412412  void CLegacyContextServer::notifyClientsFinalize(void) 
    413413  { 
    414     for(auto it=buffers.begin();it!=buffers.end();++it) 
     414    for(auto it=buffers_.begin();it!=buffers_.end();++it) 
    415415    { 
    416416      it->second->notifyClientFinalize() ; 
     
    432432    { 
    433433      if (info.isActive(logProfile)) CTimer::get("Context finalize").resume(); 
    434       finished=true; 
     434      finished_=true; 
    435435      info(20)<<" CLegacyContextServer: Receive context <"<<context_->getId()<<"> finalize."<<endl; 
    436436      notifyClientsFinalize() ; 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_server.hpp

    r2667 r2671  
    4242    MPI_Comm commSelf_ ; //!< Communicator for proc alone from interCommMerged  
    4343 
    44     map<int,CServerBuffer*> buffers ; 
    45     map<int,size_t> lastTimeLine ; //!< last event time line for a processed request 
    46     map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine 
    47     map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe; 
    48     map<int,MPI_Request> pendingRequest ; 
    49     map<int,char*> bufferRequest ; 
     44    map<int,CServerBuffer*> buffers_ ; 
     45    map<int,size_t> lastTimeLine_ ; //!< last event time line for a processed request 
     46    map<int,size_t>::iterator itLastTimeLine_ ; //!< iterator on lastTimeLine 
     47    map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe_; 
     48    map<int,MPI_Request> pendingRequest_ ; 
     49    map<int,char*> bufferRequest_ ; 
    5050 
    51     map<size_t,CEventServer*> events ; 
    52     size_t currentTimeLine ; 
     51    map<size_t,CEventServer*> events_ ; 
     52    size_t currentTimeLine_ ; 
    5353       
    54     bool finished ; 
    55     bool pendingEvent ; 
    56     bool scheduled  ;    /*!< event of current timeline is alreading scheduled ? */ 
    57     bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     54    bool finished_ ; 
     55    bool pendingEvent_ ; 
     56    bool scheduled_  ;    /*!< event of current timeline is alreading scheduled ? */ 
     57    bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    5858 
    5959    ~CLegacyContextServer() ; 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_server_v2.cpp

    r2670 r2671  
    4040    CXios::getMpiGarbageCollector().registerCommunicator(processEventBarrier_) ; 
    4141 
    42     currentTimeLine=1; 
    43     scheduled=false; 
    44     finished=false; 
     42    currentTimeLine_=1; 
     43    scheduled_=false; 
     44    finished_=false; 
    4545 
    4646    xios::MPI_Intercomm_merge(interComm_,true,&interCommMerged_) ; 
     
    5959    } 
    6060   
    61     itLastTimeLine=lastTimeLine.begin() ; 
    62  
    63     pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     61    itLastTimeLine_=lastTimeLine_.begin() ; 
     62 
     63    pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    6464  } 
    6565  
    6666  void CLegacyContextServerV2::setPendingEvent(void) 
    6767  { 
    68     pendingEvent=true; 
     68    pendingEvent_=true; 
    6969  } 
    7070 
    7171  bool CLegacyContextServerV2::hasPendingEvent(void) 
    7272  { 
    73     return (pendingRequest.size()!=0); 
     73    return (pendingRequest_.size()!=0); 
    7474  } 
    7575 
    7676  bool CLegacyContextServerV2::hasFinished(void) 
    7777  { 
    78     return finished; 
     78    return finished_; 
    7979  } 
    8080 
     
    9393    if (info.isActive(logTimers)) CTimer::get("check event process").suspend(); 
    9494    if (info.isActive(logProfile)) CTimer::get("Recv event loop (legacy)").suspend(); 
    95     return finished; 
     95    return finished_; 
    9696  } 
    9797 
     
    120120    int rank=status.MPI_SOURCE ; 
    121121 
    122     it=buffers.find(rank); 
    123     if (it==buffers.end()) // Receive the buffer size and allocate the buffer 
     122    it=buffers_.find(rank); 
     123    if (it==buffers_.end()) // Receive the buffer size and allocate the buffer 
    124124    { 
    125125      MPI_Aint recvBuff[4] ; 
     
    148148      if (info.isActive(logTimers)) CTimer::get("create Windows").suspend() ; 
    149149 
    150       it=(buffers.insert(pair<int,CServerBuffer*>(rank, new CServerBuffer(rank, windows_[rank], winBufferAddress, rank, buffSize)))).first; 
    151       lastTimeLine[rank]=0 ; 
    152       itLastTimeLine=lastTimeLine.begin() ; 
     150      it=(buffers_.insert(pair<int,CServerBuffer*>(rank, new CServerBuffer(rank, windows_[rank], winBufferAddress, rank, buffSize)))).first; 
     151      lastTimeLine_[rank]=0 ; 
     152      itLastTimeLine_=lastTimeLine_.begin() ; 
    153153 
    154154      return true; 
     
    157157    { 
    158158        std::pair<MPI_Message,MPI_Status> mypair(message,status) ; 
    159         pendingProbe[rank].push_back(mypair) ; 
     159        pendingProbe_[rank].push_back(mypair) ; 
    160160        return false; 
    161161    } 
     
    169169    map<int, list<std::pair<MPI_Message,MPI_Status> > >::iterator itProbe; 
    170170 
    171     for(itProbe=pendingProbe.begin();itProbe!=pendingProbe.end();itProbe++) 
     171    for(itProbe=pendingProbe_.begin();itProbe!=pendingProbe_.end();itProbe++) 
    172172    { 
    173173      int rank=itProbe->first ; 
    174       if (pendingRequest.count(rank)==0) 
     174      if (pendingRequest_.count(rank)==0) 
    175175      { 
    176176        MPI_Message& message = itProbe->second.front().first ; 
     
    178178        int count ; 
    179179        MPI_Get_count(&status,MPI_CHAR,&count); 
    180         map<int,CServerBuffer*>::iterator it = buffers.find(rank); 
     180        map<int,CServerBuffer*>::iterator it = buffers_.find(rank); 
    181181        if ( (it->second->isBufferFree(count) && !it->second->isResizing()) // accept new request if buffer is free 
    182182          || (it->second->isResizing() && it->second->isBufferEmpty()) )    // or if resizing wait for buffer is empty 
     
    184184          char * addr; 
    185185          addr=(char*)it->second->getBuffer(count); 
    186           MPI_Imrecv(addr,count,MPI_CHAR, &message, &pendingRequest[rank]); 
    187           bufferRequest[rank]=addr; 
     186          MPI_Imrecv(addr,count,MPI_CHAR, &message, &pendingRequest_[rank]); 
     187          bufferRequest_[rank]=addr; 
    188188          recvProbe.push_back(rank) ; 
    189189          itProbe->second.pop_front() ; 
     
    192192    } 
    193193 
    194     for(itRecv=recvProbe.begin(); itRecv!=recvProbe.end(); itRecv++) if (pendingProbe[*itRecv].empty()) pendingProbe.erase(*itRecv) ; 
     194    for(itRecv=recvProbe.begin(); itRecv!=recvProbe.end(); itRecv++) if (pendingProbe_[*itRecv].empty()) pendingProbe_.erase(*itRecv) ; 
    195195  } 
    196196 
     
    206206    MPI_Status status; 
    207207    
    208     if (!pendingRequest.empty()) if (info.isActive(logTimers)) CTimer::get("receiving requests").resume(); 
     208    if (!pendingRequest_.empty()) if (info.isActive(logTimers)) CTimer::get("receiving requests").resume(); 
    209209    else if (info.isActive(logTimers)) CTimer::get("receiving requests").suspend(); 
    210210 
    211     for(it=pendingRequest.begin();it!=pendingRequest.end();it++) 
     211    for(it=pendingRequest_.begin();it!=pendingRequest_.end();it++) 
    212212    { 
    213213      rank=it->first; 
     
    217217      if (flag==true) 
    218218      { 
    219         buffers[rank]->updateCurrentWindows() ; 
     219        buffers_[rank]->updateCurrentWindows() ; 
    220220        recvRequest.push_back(rank); 
    221221        MPI_Get_count(&status,MPI_CHAR,&count); 
    222         processRequest(rank,bufferRequest[rank],count); 
     222        processRequest(rank,bufferRequest_[rank],count); 
    223223      } 
    224224    } 
     
    226226    for(itRecv=recvRequest.begin();itRecv!=recvRequest.end();itRecv++) 
    227227    { 
    228       pendingRequest.erase(*itRecv); 
    229       bufferRequest.erase(*itRecv); 
     228      pendingRequest_.erase(*itRecv); 
     229      bufferRequest_.erase(*itRecv); 
    230230    } 
    231231  } 
     
    239239    size_t count ;  
    240240 
    241     if (itLastTimeLine==lastTimeLine.end()) itLastTimeLine=lastTimeLine.begin() ; 
    242     for(;itLastTimeLine!=lastTimeLine.end();++itLastTimeLine) 
    243     { 
    244       rank=itLastTimeLine->first ; 
    245       if (itLastTimeLine->second < timeLine &&  pendingRequest.count(rank)==0 && buffers[rank]->isBufferEmpty()) 
    246       { 
    247         if (buffers[rank]->getBufferFromClient(timeLine, buffer, count)) processRequest(rank, buffer, count); 
    248         if (count >= 0) ++itLastTimeLine ; 
     241    if (itLastTimeLine_==lastTimeLine_.end()) itLastTimeLine_=lastTimeLine_.begin() ; 
     242    for(;itLastTimeLine_!=lastTimeLine_.end();++itLastTimeLine_) 
     243    { 
     244      rank=itLastTimeLine_->first ; 
     245      if (itLastTimeLine_->second < timeLine &&  pendingRequest_.count(rank)==0 && buffers_[rank]->isBufferEmpty()) 
     246      { 
     247        if (buffers_[rank]->getBufferFromClient(timeLine, buffer, count)) processRequest(rank, buffer, count); 
     248        if (count >= 0) ++itLastTimeLine_ ; 
    249249        break ; 
    250250      } 
     
    273273      if (timeLine==timelineEventNotifyChangeBufferSize) 
    274274      { 
    275         buffers[rank]->notifyBufferResizing() ; 
    276         buffers[rank]->updateCurrentWindows() ; 
    277         buffers[rank]->popBuffer(count) ; 
     275        buffers_[rank]->notifyBufferResizing() ; 
     276        buffers_[rank]->updateCurrentWindows() ; 
     277        buffers_[rank]->popBuffer(count) ; 
    278278        info(100)<<"Context id "<<context_->getId()<<" : Receive NotifyChangeBufferSize from client rank "<<rank<<endl 
    279                  <<"isBufferEmpty ? "<<buffers[rank]->isBufferEmpty()<<"  remaining count : "<<buffers[rank]->getUsed()<<endl; 
     279                 <<"isBufferEmpty ? "<<buffers_[rank]->isBufferEmpty()<<"  remaining count : "<<buffers_[rank]->getUsed()<<endl; 
    280280      }  
    281281      else if (timeLine==timelineEventChangeBufferSize) 
     
    284284        vector<MPI_Aint> winBufferAdress(2) ; 
    285285        newBuffer>>newSize>>winBufferAdress[0]>>winBufferAdress[1] ; 
    286         buffers[rank]->freeBuffer(count) ; 
    287         delete buffers[rank] ; 
     286        buffers_[rank]->freeBuffer(count) ; 
     287        delete buffers_[rank] ; 
    288288 
    289289        int serverRank, commSize ; 
    290290        windows_[rank][0] -> setWinBufferAddress(winBufferAdress[0], rank) ; 
    291291        windows_[rank][1] -> setWinBufferAddress(winBufferAdress[1], rank) ; 
    292         buffers[rank] = new CServerBuffer(rank, windows_[rank], winBufferAdress, rank, newSize) ; 
     292        buffers_[rank] = new CServerBuffer(rank, windows_[rank], winBufferAdress, rank, newSize) ; 
    293293        info(100)<<"Context id "<<context_->getId()<<" : Receive ChangeBufferSize from client rank "<<rank 
    294294                 <<"  newSize : "<<newSize<<" Address : "<<winBufferAdress[0]<<" & "<<winBufferAdress[1]<<endl ; 
     
    297297      { 
    298298        info(100)<<"Context id "<<context_->getId()<<" : Receive standard event from client rank "<<rank<<"  with timeLine : "<<timeLine<<endl ; 
    299         it=events.find(timeLine); 
     299        it=events_.find(timeLine); 
    300300        
    301         if (it==events.end()) it=events.insert(pair<int,CEventServer*>(timeLine,new CEventServer(this))).first; 
    302         it->second->push(rank,buffers[rank],startBuffer,size); 
    303         if (timeLine>0) lastTimeLine[rank]=timeLine ; 
     301        if (it==events_.end()) it=events_.insert(pair<int,CEventServer*>(timeLine,new CEventServer(this))).first; 
     302        it->second->push(rank,buffers_[rank],startBuffer,size); 
     303        if (timeLine>0) lastTimeLine_[rank]=timeLine ; 
    304304      } 
    305305      buffer.advance(size); 
     
    317317    if (isProcessingEvent_) return ; 
    318318 
    319     it=events.find(currentTimeLine); 
    320     if (it!=events.end()) 
     319    it=events_.find(currentTimeLine_); 
     320    if (it!=events_.end()) 
    321321    { 
    322322      event=it->second; 
     
    324324      if (event->isFull()) 
    325325      { 
    326         if (!scheduled) 
     326        if (!scheduled_) 
    327327        { 
    328           eventScheduler_->registerEvent(currentTimeLine,hashId_); 
    329           info(100)<<"Context id "<<context_->getId()<<"Schedule event : "<< currentTimeLine <<"  "<<hashId_<<endl ; 
    330           scheduled=true; 
     328          eventScheduler_->registerEvent(currentTimeLine_,hashId_); 
     329          info(100)<<"Context id "<<context_->getId()<<"Schedule event : "<< currentTimeLine_ <<"  "<<hashId_<<endl ; 
     330          scheduled_=true; 
    331331        } 
    332         else if (eventScheduler_->queryEvent(currentTimeLine,hashId_) ) 
     332        else if (eventScheduler_->queryEvent(currentTimeLine_,hashId_) ) 
    333333        { 
    334334          if (!enableEventsProcessing && isCollectiveEvent(*event)) return ; 
     
    353353            int typeId, classId, typeId_in, classId_in; 
    354354            long long timeLine_out; 
    355             long long timeLine_in( currentTimeLine ); 
     355            long long timeLine_in( currentTimeLine_ ); 
    356356            typeId_in=event->type ; 
    357357            classId_in=event->classId ; 
     
    360360            MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    361361            MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    362             if (typeId/intraCommSize_!=event->type || classId/intraCommSize_!=event->classId || timeLine_out/intraCommSize_!=currentTimeLine) 
     362            if (typeId/intraCommSize_!=event->type || classId/intraCommSize_!=event->classId || timeLine_out/intraCommSize_!=currentTimeLine_) 
    363363            { 
    364364               ERROR("void CLegacyContextClient::sendEvent(CEventClient& event)", 
    365                   << "Event are not coherent between client for timeline = "<<currentTimeLine); 
     365                  << "Event are not coherent between client for timeline = "<<currentTimeLine_); 
    366366            } 
    367367          } 
     
    369369          isProcessingEvent_=true ; 
    370370          CTimer::get("Process events").resume(); 
    371           info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine<<" of class "<<event->classId<<" of type "<<event->type<<endl ; 
     371          info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine_<<" of class "<<event->classId<<" of type "<<event->type<<endl ; 
    372372          eventScheduler_->popEvent() ; 
    373373          dispatchEvent(*event); 
    374374          CTimer::get("Process events").suspend(); 
    375375          isProcessingEvent_=false ; 
    376           pendingEvent=false; 
     376          pendingEvent_=false; 
    377377          delete event; 
    378           events.erase(it); 
    379           currentTimeLine++; 
    380           scheduled = false; 
     378          events_.erase(it); 
     379          currentTimeLine_++; 
     380          scheduled_ = false; 
    381381        } 
    382382      } 
    383       else if (pendingRequest.empty()) getBufferFromClient(currentTimeLine) ; 
    384     } 
    385     else if (pendingRequest.empty()) getBufferFromClient(currentTimeLine) ; // if pure one sided check buffer even if no event recorded at current time line 
     383      else if (pendingRequest_.empty()) getBufferFromClient(currentTimeLine_) ; 
     384    } 
     385    else if (pendingRequest_.empty()) getBufferFromClient(currentTimeLine_) ; // if pure one sided check buffer even if no event recorded at current time line 
    386386  } 
    387387 
     
    389389  { 
    390390    map<int,CServerBuffer*>::iterator it; 
    391     for(it=buffers.begin();it!=buffers.end();++it) delete it->second; 
    392     buffers.clear() ; 
     391    for(it=buffers_.begin();it!=buffers_.end();++it) delete it->second; 
     392    buffers_.clear() ; 
    393393  } 
    394394 
    395395  void CLegacyContextServerV2::releaseBuffers() 
    396396  { 
    397     //for(auto it=buffers.begin();it!=buffers.end();++it) delete it->second ; 
    398     //buffers.clear() ;  
     397    //for(auto it=buffers_.begin();it!=buffers_.end();++it) delete it->second ; 
     398    //buffers_.clear() ;  
    399399    freeWindows() ; 
    400400  } 
     
    412412  void CLegacyContextServerV2::notifyClientsFinalize(void) 
    413413  { 
    414     for(auto it=buffers.begin();it!=buffers.end();++it) 
     414    for(auto it=buffers_.begin();it!=buffers_.end();++it) 
    415415    { 
    416416      it->second->notifyClientFinalize() ; 
     
    432432    { 
    433433      if (info.isActive(logProfile)) CTimer::get("Context finalize").resume(); 
    434       finished=true; 
     434      finished_=true; 
    435435      info(20)<<" CLegacyContextServerV2: Receive context <"<<context_->getId()<<"> finalize."<<endl; 
    436436      notifyClientsFinalize() ; 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/legacy_context_server_v2.hpp

    r2667 r2671  
    4242    MPI_Comm commSelf_ ; //!< Communicator for proc alone from interCommMerged  
    4343 
    44     map<int,CServerBuffer*> buffers ; 
    45     map<int,size_t> lastTimeLine ; //!< last event time line for a processed request 
    46     map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine 
    47     map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe; 
    48     map<int,MPI_Request> pendingRequest ; 
    49     map<int,char*> bufferRequest ; 
     44    map<int,CServerBuffer*> buffers_ ; 
     45    map<int,size_t> lastTimeLine_ ; //!< last event time line for a processed request 
     46    map<int,size_t>::iterator itLastTimeLine_ ; //!< iterator on lastTimeLine 
     47    map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe_; 
     48    map<int,MPI_Request> pendingRequest_ ; 
     49    map<int,char*> bufferRequest_ ; 
    5050 
    51     map<size_t,CEventServer*> events ; 
    52     size_t currentTimeLine ; 
     51    map<size_t,CEventServer*> events_ ; 
     52    size_t currentTimeLine_ ; 
    5353       
    54     bool finished ; 
    55     bool pendingEvent ; 
    56     bool scheduled  ;    /*!< event of current timeline is alreading scheduled ? */ 
    57     bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     54    bool finished_ ; 
     55    bool pendingEvent_ ; 
     56    bool scheduled_  ;    /*!< event of current timeline is alreading scheduled ? */ 
     57    bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    5858 
    5959    ~CLegacyContextServerV2() ; 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/one_sided_context_client.cpp

    r2669 r2671  
    2525    COneSidedContextClient::COneSidedContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtSer) 
    2626     : CContextClient(parent, intraComm, interComm, cxtSer), 
    27        mapBufferSize_(), maxBufferedEvents(4) 
     27       mapBufferSize_(), maxBufferedEvents_(4) 
    2828    { 
    2929       
    30       pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     30      pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    3131 
    3232      xios::MPI_Intercomm_merge(interComm_,false, &interCommMerged_) ; 
     
    3636      CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; 
    3737      eventScheduler_ = parent->getEventScheduler() ;   
    38       timeLine = 1; 
     38      timeLine_ = 1; 
    3939    } 
    4040 
     
    5050//      ostringstream str ; 
    5151//      for(auto& rank : ranks) str<<rank<<" ; " ; 
    52 //      info(100)<<"Event "<<timeLine<<" of context "<<context_->getId()<<"  for ranks : "<<str.str()<<endl ; 
     52//      info(100)<<"Event "<<timeLine_<<" of context "<<context_->getId()<<"  for ranks : "<<str.str()<<endl ; 
    5353 
    5454      if (CXios::checkEventSync) 
     
    5656        int typeId, classId, typeId_in, classId_in; 
    5757        long long timeLine_out; 
    58         long long timeLine_in( timeLine ); 
     58        long long timeLine_in( timeLine_ ); 
    5959        typeId_in=event.getTypeId() ; 
    6060        classId_in=event.getClassId() ; 
    61 //        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
     61//        MPI_Allreduce(&timeLine_,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
    6262        MPI_Allreduce(&timeLine_in,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm_) ;  
    6363        MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    6464        MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    65         if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine) 
     65        if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine_) 
    6666        { 
    6767           ERROR("void COneSidedContextClient::sendEvent(CEventClient& event)", 
    68                << "Event are not coherent between client for timeline = "<<timeLine); 
     68               << "Event are not coherent between client for timeline = "<<timeLine_); 
    6969        } 
    7070         
     
    7878        { 
    7979          ERROR("void COneSidedContextClient::sendEvent(CEventClient& event)", 
    80                  <<" Some servers will not receive the message for timeline = "<<timeLine<<endl 
     80                 <<" Some servers will not receive the message for timeline = "<<timeLine_<<endl 
    8181                 <<"Servers are : "<<osstr.str()) ; 
    8282        } 
     
    9797        itBuffer->second->eventLoop() ; 
    9898        double time=CTimer::getTime() ; 
    99         bool succed = itBuffer->second->writeEvent(timeLine, event)  ; 
     99        bool succed = itBuffer->second->writeEvent(timeLine_, event)  ; 
    100100        if (succed)  
    101101        { 
     
    117117      synchronize() ; 
    118118       
    119       timeLine++; 
     119      timeLine_++; 
    120120    } 
    121121 
     
    160160        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; 
    161161        mapBufferSize_[rank] = CXios::minBufferSize; 
    162         maxEventSizes[rank] = CXios::minBufferSize; 
     162        maxEventSizes_[rank] = CXios::minBufferSize; 
    163163      } 
    164164 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/one_sided_context_client.hpp

    r2547 r2671  
    5757      void setFixedBuffer(void) { isGrowableBuffer_=false;} 
    5858 
    59       size_t timeLine; //!< Timeline of each event 
     59      size_t timeLine_; //!< Timeline of each event 
    6060 
    6161      MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. 
     
    6363      map<int,COneSidedClientBuffer*> buffers; //!< Buffers for connection to servers 
    6464 
    65       bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     65      bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    6666 
    6767    private: 
     
    7070      std::map<int,StdSize> mapBufferSize_; 
    7171      //! Maximum event sizes estimated for each connection to server 
    72       std::map<int,StdSize> maxEventSizes; 
     72      std::map<int,StdSize> maxEventSizes_; 
    7373      //! Maximum number of events that can be buffered 
    74       StdSize maxBufferedEvents; 
     74      StdSize maxBufferedEvents_; 
    7575 
    7676      std::map<int, MPI_Comm> winComm_ ; //! Window communicators 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/one_sided_context_server.cpp

    r2670 r2671  
    4040    CXios::getMpiGarbageCollector().registerCommunicator(processEventBarrier_) ; 
    4141     
    42     currentTimeLine=1; 
    43     scheduled=false; 
    44     finished=false; 
     42    currentTimeLine_=1; 
     43    scheduled_=false; 
     44    finished_=false; 
    4545 
    4646    xios::MPI_Intercomm_merge(interComm_,true,&interCommMerged_) ; 
     
    4949    CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; 
    5050 
    51     itLastTimeLine=lastTimeLine.begin() ; 
    52  
    53     pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     51    itLastTimeLine_=lastTimeLine_.begin() ; 
     52 
     53    pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    5454       
    5555  } 
     
    5757  void COneSidedContextServer::setPendingEvent(void) 
    5858  { 
    59     pendingEvent=true; 
     59    pendingEvent_=true; 
    6060  } 
    6161 
     
    6767  bool COneSidedContextServer::hasFinished(void) 
    6868  { 
    69     return finished; 
     69    return finished_; 
    7070  } 
    7171 
     
    8787    processEvents(enableEventsProcessing); 
    8888    if (info.isActive(logTimers)) CTimer::get("check event process").suspend(); 
    89     return finished; 
     89    return finished_; 
    9090 
    9191  } 
     
    170170    if (isProcessingEvent_) return ; 
    171171 
    172     auto it=completedEvents_.find(currentTimeLine); 
     172    auto it=completedEvents_.find(currentTimeLine_); 
    173173 
    174174    if (it!=completedEvents_.end()) 
     
    176176      if (it->second.nbSenders == it->second.currentNbSenders) 
    177177      { 
    178         if (!scheduled)  
     178        if (!scheduled_)  
    179179        { 
    180           eventScheduler_->registerEvent(currentTimeLine,hashId_); 
    181           scheduled=true; 
     180          eventScheduler_->registerEvent(currentTimeLine_,hashId_); 
     181          scheduled_=true; 
    182182        } 
    183         else if (eventScheduler_->queryEvent(currentTimeLine,hashId_) ) 
     183        else if (eventScheduler_->queryEvent(currentTimeLine_,hashId_) ) 
    184184        { 
    185185          //if (!enableEventsProcessing && isCollectiveEvent(event)) return ; 
     
    204204          isProcessingEvent_=true ; 
    205205          CEventServer event(this) ; 
    206           for(auto& buffer : it->second.buffers) buffer->fillEventServer(currentTimeLine, event) ; 
     206          for(auto& buffer : it->second.buffers) buffer->fillEventServer(currentTimeLine_, event) ; 
    207207//          MPI_Barrier(intraComm) ; 
    208208          CTimer::get("Process events").resume(); 
    209           info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine<<" of class "<<event.classId<<" of type "<<event.type<<endl ; 
     209          info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine_<<" of class "<<event.classId<<" of type "<<event.type<<endl ; 
    210210          dispatchEvent(event); 
    211211          CTimer::get("Process events").suspend(); 
    212212          isProcessingEvent_=false ; 
    213213//         context_->unsetProcessingEvent() ; 
    214           pendingEvent=false; 
     214          pendingEvent_=false; 
    215215          completedEvents_.erase(it); 
    216           currentTimeLine++; 
    217           scheduled = false; 
     216          currentTimeLine_++; 
     217          scheduled_ = false; 
    218218        } 
    219219      } 
     
    267267    { 
    268268      if (info.isActive(logProfile)) CTimer::get("Context finalize").resume(); 
    269       finished=true; 
     269      finished_=true; 
    270270      info(20)<<" COneSidedContextServer: Receive context <"<<context_->getId()<<"> finalize."<<endl; 
    271271      notifyClientsFinalize() ; 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/one_sided_context_server.hpp

    r2561 r2671  
    8080 
    8181    map<int,COneSidedServerBuffer*> buffers_ ; 
    82     map<int,size_t> lastTimeLine ; //!< last event time line for a processed request 
    83     map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine 
    84     map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe; 
    85     map<int,MPI_Request> pendingRequest ; 
    86     map<int,char*> bufferRequest ; 
     82    map<int,size_t> lastTimeLine_ ; //!< last event time line for a processed request 
     83    map<int,size_t>::iterator itLastTimeLine_ ; //!< iterator on lastTimeLine 
     84    map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe_; 
     85    map<int,MPI_Request> pendingRequest_ ; 
     86    map<int,char*> bufferRequest_ ; 
    8787 
    88     map<size_t,CEventServer*> events ; 
    89     size_t currentTimeLine ; 
     88    map<size_t,CEventServer*> events_ ; 
     89    size_t currentTimeLine_ ; 
    9090       
    91     bool finished ; 
    92     bool pendingEvent ; 
    93     bool scheduled  ;    /*!< event of current timeline is alreading scheduled ? */ 
    94     bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     91    bool finished_ ; 
     92    bool pendingEvent_ ; 
     93    bool scheduled_  ;    /*!< event of current timeline is alreading scheduled ? */ 
     94    bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    9595 
    9696    private: 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/p2p_context_client.cpp

    r2669 r2671  
    2525    CP2pContextClient::CP2pContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtSer) 
    2626     : CContextClient(parent, intraComm, interComm, cxtSer), 
    27        mapBufferSize_(), maxBufferedEvents(4) 
     27       mapBufferSize_(), maxBufferedEvents_(4) 
    2828    { 
    2929       
    30       pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     30      pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    3131 
    3232      xios::MPI_Intercomm_merge(interComm_,false, &interCommMerged_) ; 
     
    3636      CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; 
    3737      eventScheduler_ = parent->getEventScheduler() ;   
    38       timeLine = 1; 
     38      timeLine_ = 1; 
    3939    } 
    4040 
     
    5050//      ostringstream str ; 
    5151//      for(auto& rank : ranks) str<<rank<<" ; " ; 
    52 //      info(100)<<"Event "<<timeLine<<" of context "<<context_->getId()<<"  for ranks : "<<str.str()<<endl ; 
     52//      info(100)<<"Event "<<timeLine_<<" of context "<<context_->getId()<<"  for ranks : "<<str.str()<<endl ; 
    5353 
    5454      if (CXios::checkEventSync) 
     
    5656        int typeId, classId, typeId_in, classId_in; 
    5757        long long timeLine_out; 
    58         long long timeLine_in( timeLine ); 
     58        long long timeLine_in( timeLine_ ); 
    5959        typeId_in=event.getTypeId() ; 
    6060        classId_in=event.getClassId() ; 
    61 //        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
     61//        MPI_Allreduce(&timeLine_,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm_) ; // MPI_UINT64_T standardized by MPI 3 
    6262        MPI_Allreduce(&timeLine_in,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm_) ;  
    6363        MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    6464        MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm_) ; 
    65         if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine) 
     65        if (typeId/clientSize_!=event.getTypeId() || classId/clientSize_!=event.getClassId() || timeLine_out/clientSize_!=timeLine_) 
    6666        { 
    6767           ERROR("void COneSidedContextClient::sendEvent(CEventClient& event)", 
    68                << "Event are not coherent between client for timeline = "<<timeLine); 
     68               << "Event are not coherent between client for timeline = "<<timeLine_); 
    6969        } 
    7070         
     
    7878        { 
    7979          ERROR("void COneSidedContextClient::sendEvent(CEventClient& event)", 
    80                  <<" Some servers will not receive the message for timeline = "<<timeLine<<endl 
     80                 <<" Some servers will not receive the message for timeline = "<<timeLine_<<endl 
    8181                 <<"Servers are : "<<osstr.str()) ; 
    8282        } 
     
    8989      { 
    9090        int rank=event.getRank() ; 
    91         auto itBuffer=buffers.find(rank) ; 
    92         if (itBuffer==buffers.end())  
     91        auto itBuffer=buffers_.find(rank) ; 
     92        if (itBuffer==buffers_.end())  
    9393        {   
    9494          newBuffer(rank) ; 
    95           itBuffer=buffers.find(rank) ; 
     95          itBuffer=buffers_.find(rank) ; 
    9696        } 
    9797        itBuffer->second->eventLoop() ; 
    9898        double time=CTimer::getTime() ; 
    99         bool succed = itBuffer->second->writeEvent(timeLine, event)  ; 
     99        bool succed = itBuffer->second->writeEvent(timeLine_, event)  ; 
    100100        if (succed)  
    101101        { 
     
    117117      synchronize() ; 
    118118       
    119       timeLine++; 
     119      timeLine_++; 
    120120    } 
    121121 
     
    163163      } 
    164164 
    165       CP2pClientBuffer* buffer = buffers[rank] = new CP2pClientBuffer(interComm_, rank, commSelf_, interCommMerged_, clientSize_+rank ); 
     165      CP2pClientBuffer* buffer = buffers_[rank] = new CP2pClientBuffer(interComm_, rank, commSelf_, interCommMerged_, clientSize_+rank ); 
    166166      if (isGrowableBuffer_) { buffer->setGrowable(growingFactor_) ; } 
    167167      else buffer->setFixed(mapBufferSize_[rank]) ; 
     
    176176   { 
    177177      bool pending = false; 
    178       for (auto itBuff : buffers) 
     178      for (auto itBuff : buffers_) 
    179179      { 
    180180        itBuff.second->eventLoop() ; 
     
    187187   void CP2pContextClient::releaseBuffers() 
    188188   { 
    189       for (auto& itBuff : buffers) delete itBuff.second; 
    190       buffers.clear(); 
     189      for (auto& itBuff : buffers_) delete itBuff.second; 
     190      buffers_.clear(); 
    191191   } 
    192192 
     
    202202      for (auto& rank : ranks)  
    203203      { 
    204         buffers[rank]->eventLoop() ; 
    205         pending |= !(buffers[rank]->isEmpty()) ; 
     204        buffers_[rank]->eventLoop() ; 
     205        pending |= !(buffers_[rank]->isEmpty()) ; 
    206206      } 
    207207      return pending; 
     
    221221      size_t size=std::max(CXios::minBufferSize*1.0,std::min(it.second*CXios::bufferSizeFactor*1.01,CXios::maxBufferSize*1.0)) ; 
    222222      mapBufferSize_[it.first]=size ; 
    223       if (buffers.count(it.first)>0) buffers[it.first]->setFixed(size); 
     223      if (buffers_.count(it.first)>0) buffers_[it.first]->setFixed(size); 
    224224     } 
    225225   } 
     
    236236    int* nbServerConnectionGlobal  = new int[serverSize_] ; 
    237237    for(int i=0;i<serverSize_;++i) nbServerConnectionLocal[i]=0 ; 
    238     for (auto itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)  nbServerConnectionLocal[itBuff->first]=1 ; 
     238    for (auto itBuff = buffers_.begin(); itBuff != buffers_.end(); itBuff++)  nbServerConnectionLocal[itBuff->first]=1 ; 
    239239    for (auto ItServerLeader = ranksServerLeader_.begin(); ItServerLeader != ranksServerLeader_.end(); ItServerLeader++)  nbServerConnectionLocal[*ItServerLeader]=1 ; 
    240240     
     
    286286 
    287287    bool finalized = true; 
    288     for (auto& it : buffers ) finalized &= it.second->isNotifiedFinalized(); 
     288    for (auto& it : buffers_ ) finalized &= it.second->isNotifiedFinalized(); 
    289289    return finalized; 
    290290  } 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/p2p_context_client.hpp

    r2556 r2671  
    5757      void setFixedBuffer(void) { isGrowableBuffer_=false;} 
    5858 
    59       size_t timeLine; //!< Timeline of each event 
     59      size_t timeLine_; //!< Timeline of each event 
    6060 
    6161      MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. 
    6262      MPI_Comm commSelf_ ; //!< Communicator for proc alone from interCommMerged  
    63       map<int,CP2pClientBuffer*> buffers; //!< Buffers for connection to servers 
     63      map<int,CP2pClientBuffer*> buffers_; //!< Buffers for connection to servers 
    6464 
    65       bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     65      bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    6666 
    6767    private: 
     
    7272      std::map<int,StdSize> maxEventSizes; 
    7373      //! Maximum number of events that can be buffered 
    74       StdSize maxBufferedEvents; 
     74      StdSize maxBufferedEvents_; 
    7575 
    7676      std::map<int, MPI_Comm> winComm_ ; //! Window communicators 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/p2p_context_server.cpp

    r2670 r2671  
    4040    CXios::getMpiGarbageCollector().registerCommunicator(processEventBarrier_) ; 
    4141     
    42     currentTimeLine=1; 
    43     scheduled=false; 
    44     finished=false; 
     42    currentTimeLine_=1; 
     43    scheduled_=false; 
     44    finished_=false; 
    4545 
    4646    xios::MPI_Intercomm_merge(interComm_,true,&interCommMerged_) ; 
     
    4949    CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; 
    5050     
    51     itLastTimeLine=lastTimeLine.begin() ; 
    52  
    53     pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
     51    itLastTimeLine_=lastTimeLine_.begin() ; 
     52 
     53    pureOneSided_=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) 
    5454       
    5555  } 
     
    5757  void CP2pContextServer::setPendingEvent(void) 
    5858  { 
    59     pendingEvent=true; 
     59    pendingEvent_=true; 
    6060  } 
    6161 
     
    6767  bool CP2pContextServer::hasFinished(void) 
    6868  { 
    69     return finished; 
     69    return finished_; 
    7070  } 
    7171 
     
    8989    if (info.isActive(logTimers)) CTimer::get("check event process").suspend(); 
    9090    if (info.isActive(logProfile)) CTimer::get("Recv event loop (p2p)").suspend(); 
    91     return finished; 
     91    return finished_; 
    9292 
    9393  } 
     
    176176    if (isProcessingEvent_) return ; 
    177177 
    178     auto it=completedEvents_.find(currentTimeLine); 
     178    auto it=completedEvents_.find(currentTimeLine_); 
    179179 
    180180    if (it!=completedEvents_.end()) 
     
    182182      if (it->second.nbSenders == it->second.currentNbSenders) 
    183183      { 
    184         if (!scheduled)  
     184        if (!scheduled_)  
    185185        { 
    186           eventScheduler_->registerEvent(currentTimeLine,hashId_); 
    187           scheduled=true; 
     186          eventScheduler_->registerEvent(currentTimeLine_,hashId_); 
     187          scheduled_=true; 
    188188        } 
    189         else if (eventScheduler_->queryEvent(currentTimeLine,hashId_) ) 
     189        else if (eventScheduler_->queryEvent(currentTimeLine_,hashId_) ) 
    190190        { 
    191191          //if (!enableEventsProcessing && isCollectiveEvent(event)) return ; 
     
    210210          isProcessingEvent_=true ; 
    211211          CEventServer event(this) ; 
    212           for(auto& buffer : it->second.buffers) buffer->fillEventServer(currentTimeLine, event) ; 
     212          for(auto& buffer : it->second.buffers) buffer->fillEventServer(currentTimeLine_, event) ; 
    213213//          MPI_Barrier(intraComm) ; 
    214214          CTimer::get("Process events").resume(); 
    215           info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine<<" of class "<<event.classId<<" of type "<<event.type<<endl ; 
     215          info(100)<<"Context id "<<context_->getId()<<" : Process Event "<<currentTimeLine_<<" of class "<<event.classId<<" of type "<<event.type<<endl ; 
    216216          dispatchEvent(event); 
    217217          CTimer::get("Process events").suspend(); 
    218218          isProcessingEvent_=false ; 
    219219//         context_->unsetProcessingEvent() ; 
    220           pendingEvent=false; 
     220          pendingEvent_=false; 
    221221          completedEvents_.erase(it); 
    222           currentTimeLine++; 
    223           scheduled = false; 
     222          currentTimeLine_++; 
     223          scheduled_ = false; 
    224224        } 
    225225      } 
     
    273273    { 
    274274      CTimer::get("Context finalize").resume(); 
    275       finished=true; 
     275      finished_=true; 
    276276      info(20)<<" CP2pContextServer: Receive context <"<<context_->getId()<<"> finalize."<<endl; 
    277277      notifyClientsFinalize() ; 
  • XIOS3/dev/XIOS_NOTIFICATIONS_MANAGER/src/transport/p2p_context_server.hpp

    r2563 r2671  
    8080 
    8181    map<int,CP2pServerBuffer*> buffers_ ; 
    82     map<int,size_t> lastTimeLine ; //!< last event time line for a processed request 
    83     map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine 
    84     map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe; 
    85     map<int,MPI_Request> pendingRequest ; 
    86     map<int,char*> bufferRequest ; 
     82    map<int,size_t> lastTimeLine_ ; //!< last event time line for a processed request 
     83    map<int,size_t>::iterator itLastTimeLine_ ; //!< iterator on lastTimeLine 
     84    map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe_; 
     85    map<int,MPI_Request> pendingRequest_ ; 
     86    map<int,char*> bufferRequest_ ; 
    8787 
    88     map<size_t,CEventServer*> events ; 
    89     size_t currentTimeLine ; 
     88    map<size_t,CEventServer*> events_ ; 
     89    size_t currentTimeLine_ ; 
    9090       
    91     bool finished ; 
    92     bool pendingEvent ; 
    93     bool scheduled  ;    /*!< event of current timeline is alreading scheduled ? */ 
    94     bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
     91    bool finished_ ; 
     92    bool pendingEvent_ ; 
     93    bool scheduled_  ;    /*!< event of current timeline is alreading scheduled ? */ 
     94    bool pureOneSided_ ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. 
    9595 
    9696    private: 
Note: See TracChangeset for help on using the changeset viewer.