Ignore:
Timestamp:
06/23/23 14:46:51 (12 months ago)
Author:
ymipsl
Message:

Adaptation to new hyper event scheduler.
YM

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS3/trunk/src/manager/pool_ressource.cpp

    r2517 r2523  
    77#include "cxios.hpp" 
    88#include "timer.hpp" 
     9#include "event_scheduler.hpp" 
    910 
    1011namespace xios 
    1112{ 
    12   CPoolRessource::CPoolRessource(MPI_Comm poolComm, const std::string& Id, bool isServer) : Id_(Id), finalizeSignal_(false) 
     13  CPoolRessource::CPoolRessource(MPI_Comm poolComm, shared_ptr<CEventScheduler> eventScheduler, const std::string& Id, bool isServer) : Id_(Id), finalizeSignal_(false) 
    1314  { 
    1415    int commRank, commSize ; 
     
    2930    winNotify_->updateToExclusiveWindow(commRank, this, &CPoolRessource::notificationsDumpOut) ; 
    3031    MPI_Barrier(poolComm_) ; 
     32    if (eventScheduler) eventScheduler_=eventScheduler ; 
     33    else eventScheduler_= make_shared<CEventScheduler>(poolComm) ; 
     34    freeRessourceEventScheduler_ = eventScheduler_ ; 
    3135  } 
    3236 
     
    3640     
    3741    auto it=occupancy_.begin() ; 
     42 
     43    // ym obsolete, service cannot overlap, only created on separate ressource or matching excatly existing service 
     44    // occupancy management must not be used anymore => simplification 
     45    // for now raise a message error when no ressources are availables 
     46     
    3847    int commSize ; 
    3948    MPI_Comm_size(poolComm_, &commSize) ; 
     
    4352    for(int i=0; i<size; i++)  
    4453    { 
     54      if (it->first != 0) ERROR("void CPoolRessource::createService(const std::string& serviceId, int type, int size, int nbPartitions)", 
     55                                 << "No enough free ressources on pool id="<<getId()<<" to launch service id="<<serviceId); 
    4556      procs_in[it->second]=true ; 
    4657      procs_update.push_back(std::pair<int,int>(it->first+1,it->second)) ; 
    4758      ++it ; 
    4859    } 
    49      
     60 
     61 
    5062    occupancy_.erase(occupancy_.begin(),it) ; 
    5163    occupancy_.insert(procs_update.begin(),procs_update.end()) ; 
     
    247259  { 
    248260      
    249      info(40)<<"CPoolRessource::createNewService  : receive createService notification ; serviceId : "<<serviceId<<endl ; 
    250      MPI_Comm serviceComm, newServiceComm ; 
    251      int commRank ; 
    252      MPI_Comm_rank(poolComm_,&commRank) ; 
    253      MPI_Comm_split(poolComm_, in, commRank, &serviceComm) ; 
    254      if (in) 
    255      { 
    256        int serviceCommSize ; 
    257        int serviceCommRank ; 
    258        MPI_Comm_size(serviceComm,&serviceCommSize) ; 
    259        MPI_Comm_rank(serviceComm,&serviceCommRank) ; 
    260  
    261        info(10)<<"Service  "<<serviceId<<" created "<<"  service size : "<<serviceCommSize<< "   service rank : "<<serviceCommRank  
    262                             <<" on rank pool "<<commRank<<endl ; 
     261    info(40)<<"CPoolRessource::createNewService  : receive createService notification ; serviceId : "<<serviceId<<endl ; 
     262    MPI_Comm serviceComm, newServiceComm, freeComm ; 
     263    int commRank ; 
     264      
     265    int color; 
     266    if (!services_.empty()) color = 0 ; 
     267    else color=1 ;  
     268    MPI_Comm_rank(poolComm_,&commRank) ; 
     269    MPI_Comm_split(poolComm_, color, commRank, &freeComm) ;  // workaround 
     270     
     271    if (services_.empty())  
     272    { 
     273      MPI_Comm_rank(freeComm,&commRank) ; 
     274      MPI_Comm_split(freeComm, in, commRank, &serviceComm) ; 
     275 
     276      // temporary for event scheduler, we must using hierarchical split of free ressources communicator. 
     277      // we hope for now that spliting using occupancy make this match 
     278 
     279      if (in) 
     280      { 
     281        int serviceCommSize ; 
     282        int serviceCommRank ; 
     283        MPI_Comm_size(serviceComm,&serviceCommSize) ; 
     284        MPI_Comm_rank(serviceComm,&serviceCommRank) ; 
     285 
     286        info(10)<<"Service  "<<serviceId<<" created "<<"  service size : "<<serviceCommSize<< "   service rank : "<<serviceCommRank  
     287                              <<" on rank pool "<<commRank<<endl ; 
    263288        
    264        int partitionId ;  
    265        if ( serviceCommRank >= (serviceCommSize/nbPartitions+1)*(serviceCommSize%nbPartitions) ) 
    266        { 
    267          int rank =  serviceCommRank - (serviceCommSize/nbPartitions+1)*(serviceCommSize%nbPartitions) ; 
    268          partitionId = serviceCommSize%nbPartitions +  rank / (serviceCommSize/nbPartitions) ; 
    269        } 
    270        else  partitionId = serviceCommRank / (serviceCommSize/nbPartitions + 1) ; 
    271  
    272        MPI_Comm_split(serviceComm, partitionId, commRank, &newServiceComm) ; 
     289        int partitionId ;  
     290        if ( serviceCommRank >= (serviceCommSize/nbPartitions+1)*(serviceCommSize%nbPartitions) ) 
     291        { 
     292          int rank =  serviceCommRank - (serviceCommSize/nbPartitions+1)*(serviceCommSize%nbPartitions) ; 
     293          partitionId = serviceCommSize%nbPartitions +  rank / (serviceCommSize/nbPartitions) ; 
     294        } 
     295        else  partitionId = serviceCommRank / (serviceCommSize/nbPartitions + 1) ; 
     296 
     297        MPI_Comm_split(serviceComm, partitionId, commRank, &newServiceComm) ; 
     298 
     299        MPI_Comm_size(newServiceComm,&serviceCommSize) ; 
     300        MPI_Comm_rank(newServiceComm,&serviceCommRank) ; 
     301        info(10)<<"Service  "<<serviceId<<" created "<<"  partition : " <<partitionId<<" service size : "<<serviceCommSize 
     302                << " service rank : "<<serviceCommRank <<" on rank pool "<<commRank<<endl ; 
     303       
     304        shared_ptr<CEventScheduler> parentScheduler, childScheduler ; 
     305        freeRessourceEventScheduler_->splitScheduler(newServiceComm, parentScheduler, childScheduler) ; 
     306        if (isFirstSplit_) eventScheduler_ = parentScheduler ; 
     307        isFirstSplit_=false ; 
     308 
     309        services_[std::make_tuple(serviceId,partitionId)] = new CService(newServiceComm, childScheduler, Id_, serviceId, partitionId, type, nbPartitions) ; 
    273310        
    274        MPI_Comm_size(newServiceComm,&serviceCommSize) ; 
    275        MPI_Comm_rank(newServiceComm,&serviceCommRank) ; 
    276        info(10)<<"Service  "<<serviceId<<" created "<<"  partition : " <<partitionId<<" service size : "<<serviceCommSize 
    277                << " service rank : "<<serviceCommRank <<" on rank pool "<<commRank<<endl ; 
    278        
    279        services_[std::make_tuple(serviceId,partitionId)] = new CService(newServiceComm, Id_, serviceId, partitionId, type, nbPartitions) ; 
    280         
    281        MPI_Comm_free(&newServiceComm) ; 
    282      } 
    283      MPI_Comm_free(&serviceComm) ; 
     311        MPI_Comm_free(&newServiceComm) ; 
     312      } 
     313      else 
     314      { 
     315        shared_ptr<CEventScheduler> parentScheduler, childScheduler ; 
     316        freeRessourceEventScheduler_->splitScheduler(serviceComm, parentScheduler, childScheduler) ; 
     317        if (isFirstSplit_) eventScheduler_ = parentScheduler ; 
     318        freeRessourceEventScheduler_ = childScheduler ; 
     319        isFirstSplit_=false ; 
     320      } 
     321      MPI_Comm_free(&serviceComm) ; 
     322    } 
     323    MPI_Comm_free(&freeComm) ; 
    284324  } 
    285325   
     
    300340        shared_ptr<CEventScheduler>  eventScheduler = service.second->getEventScheduler() ; 
    301341        info(40)<<"CPoolRessource::createNewServiceOnto ; found onServiceId : "<<onServiceId<<endl  ; 
    302         services_[std::make_tuple(serviceId,partitionId)] = new CService(newServiceComm, Id_, serviceId, partitionId, type, 
    303                                                                          nbPartitions, eventScheduler) ;        
    304       } 
    305     } 
    306      
    307   } 
    308  
    309   void CPoolRessource::createService(MPI_Comm serviceComm, const std::string& serviceId, int partitionId, int type, int nbPartitions) // for clients & attached 
    310   { 
    311     services_[std::make_tuple(serviceId,partitionId)] = new CService(serviceComm, Id_, serviceId, partitionId, type, nbPartitions) ; 
     342        services_[std::make_tuple(serviceId,partitionId)] = new CService(newServiceComm, eventScheduler, Id_, serviceId, partitionId, type, 
     343                                                                         nbPartitions) ;        
     344      } 
     345    } 
     346     
     347  } 
     348 
     349  void CPoolRessource::createService(MPI_Comm serviceComm, shared_ptr<CEventScheduler> eventScheduler, const std::string& serviceId, int partitionId, int type, int nbPartitions) // for clients & attached 
     350  { 
     351    services_[std::make_tuple(serviceId,partitionId)] = new CService(serviceComm, eventScheduler, Id_, serviceId, partitionId, type, nbPartitions) ; 
    312352  } 
    313353 
Note: See TracChangeset for help on using the changeset viewer.