Changeset 2518


Ignore:
Timestamp:
06/12/23 15:01:16 (13 months ago)
Author:
ymipsl
Message:

Make data member conform to XIOS framework developement, ie, an "_" suffix is append to each data member.

YM

Location:
XIOS3/trunk/src
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • XIOS3/trunk/src/event_scheduler.cpp

    r2274 r2518  
    1010  CEventScheduler::CEventScheduler(const MPI_Comm& comm)  
    1111  { 
    12     MPI_Comm_dup(comm, &communicator) ; 
    13     MPI_Comm_size(communicator,&mpiSize) ; 
    14     MPI_Comm_rank(communicator,&mpiRank); 
     12    MPI_Comm_dup(comm, &communicator_) ; 
     13    MPI_Comm_size(communicator_,&mpiSize_) ; 
     14    MPI_Comm_rank(communicator_,&mpiRank_); 
    1515 
    1616 
     
    2323      maxChild=maxChild+1 ; 
    2424      for(int i=0;i<maxChild;i++) m=m*maxChild ; 
    25      } while(m<mpiSize) ; 
     25     } while(m<mpiSize_) ; 
    2626     
    2727     
    2828    int maxLevel=0 ; 
    29     for(int size=1; size<=mpiSize; size*=maxChild) maxLevel++ ;  
     29    for(int size=1; size<=mpiSize_; size*=maxChild) maxLevel++ ;  
    3030 
    3131    int begin, end, nb ; 
    3232    int pos, n ; 
    3333  
    34     parent=vector<int>(maxLevel+1) ; 
    35     child=vector<vector<int> >(maxLevel+1,vector<int>(maxChild)) ; 
    36     nbChild=vector<int> (maxLevel+1) ; 
     34    parent_=vector<int>(maxLevel+1) ; 
     35    child_=vector<vector<int> >(maxLevel+1,vector<int>(maxChild)) ; 
     36    nbChild_=vector<int> (maxLevel+1) ; 
    3737    
    38     level=0 ; 
     38    level_=0 ; 
    3939    begin=0 ; 
    40     end=mpiSize-1 ;      
     40    end=mpiSize_-1 ;      
    4141    nb=end-begin+1 ; 
    4242      
     
    4545      n=0 ; 
    4646      pos=begin ; 
    47       nbChild[level]=0 ; 
    48       parent[level+1]=begin ; 
     47      nbChild_[level_]=0 ; 
     48      parent_[level_+1]=begin ; 
    4949      for(int i=0;i<maxChild && i<nb ;i++) 
    5050      { 
     
    5252        else n = nb/maxChild ; 
    5353       
    54         if (mpiRank>=pos && mpiRank<pos+n) 
     54        if (mpiRank_>=pos && mpiRank_<pos+n) 
    5555        { 
    5656          begin=pos ; 
    5757          end=pos+n-1 ; 
    5858        } 
    59         child[level][i]=pos ; 
     59        child_[level_][i]=pos ; 
    6060        pos=pos+n ; 
    61         nbChild[level]++ ; 
     61        nbChild_[level_]++ ; 
    6262      }  
    6363      nb=end-begin+1 ; 
    64       level=level+1 ; 
     64      level_=level_+1 ; 
    6565    } while (nb>1) ; 
    6666 
     
    7070  CEventScheduler::~CEventScheduler() 
    7171  { 
    72     while (!pendingSentParentRequest.empty() || !pendingRecvParentRequest.empty() || !pendingRecvChildRequest.empty() ||  !pendingSentChildRequest.empty()) 
     72    while (!pendingSentParentRequest_.empty() || !pendingRecvParentRequest_.empty() || !pendingRecvChildRequest_.empty() ||  !pendingSentChildRequest_.empty()) 
    7373    { 
    7474      checkEvent() ; 
     
    7878  void CEventScheduler::registerEvent(const size_t timeLine, const size_t contextHashId) 
    7979  { 
    80     registerEvent(timeLine, contextHashId, level) ; 
     80    registerEvent(timeLine, contextHashId, level_) ; 
    8181    checkEvent() ; 
    8282  } 
     
    9191    sentRequest->buffer[2]=lev-1 ; 
    9292 
    93     pendingSentParentRequest.push(sentRequest) ; 
    94     MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, parent[lev], 0, communicator, &sentRequest->request) ; 
     93    pendingSentParentRequest_.push(sentRequest) ; 
     94    MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, parent_[lev], 0, communicator_, &sentRequest->request) ; 
    9595    traceOn() ; 
    9696  }  
     
    9999  { 
    100100    checkEvent() ; 
    101     if (! eventStack.empty() && eventStack.front().first==timeLine && eventStack.front().second==contextHashId) 
    102     { 
    103       //eventStack.pop() ; 
     101    if (! eventStack_.empty() && eventStack_.front().first==timeLine && eventStack_.front().second==contextHashId) 
     102    { 
     103      //eventStack_.pop() ; 
    104104      return true ; 
    105105    } 
     
    125125     
    126126    // check sent request to parent 
    127     while (! pendingSentParentRequest.empty() && completed) 
    128     { 
    129       MPI_Test( & pendingSentParentRequest.front()->request, &completed, &status) ; 
     127    while (! pendingSentParentRequest_.empty() && completed) 
     128    { 
     129      MPI_Test( & pendingSentParentRequest_.front()->request, &completed, &status) ; 
    130130      if (completed)  
    131131      { 
    132         delete pendingSentParentRequest.front() ; 
    133         pendingSentParentRequest.pop() ; 
     132        delete pendingSentParentRequest_.front() ; 
     133        pendingSentParentRequest_.pop() ; 
    134134      } 
    135135    } 
     
    139139    while(received) 
    140140    { 
    141       MPI_Iprobe(MPI_ANY_SOURCE,1,communicator,&received, &status) ; 
     141      MPI_Iprobe(MPI_ANY_SOURCE,1,communicator_,&received, &status) ; 
    142142      if (received) 
    143143      { 
    144144        recvRequest=new SPendingRequest ; 
    145         MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ; 
    146         pendingRecvParentRequest.push(recvRequest) ; 
     145        MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator_, &(recvRequest->request)) ; 
     146        pendingRecvParentRequest_.push(recvRequest) ; 
    147147      } 
    148148    } 
     
    150150     // check sent request from parent 
    151151    completed=true ; 
    152     while (! pendingRecvParentRequest.empty() && completed) 
    153     { 
    154       recvRequest=pendingRecvParentRequest.front() ; 
     152    while (! pendingRecvParentRequest_.empty() && completed) 
     153    { 
     154      recvRequest=pendingRecvParentRequest_.front() ; 
    155155      MPI_Test( &(recvRequest->request), &completed, &status) ; 
    156156      if (completed)  
     
    160160        size_t lev=recvRequest->buffer[2] ; 
    161161        delete recvRequest ; 
    162         pendingRecvParentRequest.pop() ;        
    163   
    164         if (lev==level) eventStack.push(pair<size_t,size_t>(timeLine,hashId)) ; 
     162        pendingRecvParentRequest_.pop() ;        
     163  
     164        if (lev==level_) eventStack_.push(pair<size_t,size_t>(timeLine,hashId)) ; 
    165165        else  bcastEvent(timeLine, hashId, lev) ; 
    166166      } 
     
    181181    while(received) 
    182182    { 
    183       MPI_Iprobe(MPI_ANY_SOURCE,0,communicator,&received, &status) ; 
     183      MPI_Iprobe(MPI_ANY_SOURCE,0,communicator_,&received, &status) ; 
    184184      if (received) 
    185185      { 
    186186        recvRequest=new SPendingRequest ; 
    187         MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ; 
    188         pendingRecvChildRequest.push_back(recvRequest) ; 
     187        MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator_, &recvRequest->request) ; 
     188        pendingRecvChildRequest_.push_back(recvRequest) ; 
    189189      } 
    190190    } 
     
    192192    // check if receive request is achieved 
    193193     
    194     for(list<SPendingRequest*>::iterator it=pendingRecvChildRequest.begin(); it!=pendingRecvChildRequest.end() ; ) 
     194    for(list<SPendingRequest*>::iterator it=pendingRecvChildRequest_.begin(); it!=pendingRecvChildRequest_.end() ; ) 
    195195    { 
    196196      MPI_Test(&((*it)->request),&received,&status) ; 
     
    203203        SEvent event={timeLine,hashId,lev} ; 
    204204        delete *it ; // free mem 
    205         it=pendingRecvChildRequest.erase(it) ; // get out of the list 
     205        it=pendingRecvChildRequest_.erase(it) ; // get out of the list 
    206206         
    207         map< SEvent,int>::iterator itEvent=recvEvent.find(event) ; 
    208         if (itEvent==recvEvent.end())  
     207        map< SEvent,int>::iterator itEvent=recvEvent_.find(event) ; 
     208        if (itEvent==recvEvent_.end())  
    209209        { 
    210           itEvent=(recvEvent.insert(pair< SEvent ,int > (event,1))).first ; 
     210          itEvent=(recvEvent_.insert(pair< SEvent ,int > (event,1))).first ; 
    211211  
    212212        } 
    213213        else (itEvent->second)++ ; 
    214         if (itEvent->second==nbChild[lev]) 
     214        if (itEvent->second==nbChild_[lev]) 
    215215        { 
    216216          if (lev==0) 
    217217          { 
    218218            bcastEvent(timeLine,hashId,lev) ; 
    219             recvEvent.erase(itEvent) ; 
     219            recvEvent_.erase(itEvent) ; 
    220220          } 
    221221          else 
    222222          { 
    223223            registerEvent( timeLine,hashId,lev) ; 
    224             recvEvent.erase(itEvent) ; 
     224            recvEvent_.erase(itEvent) ; 
    225225          } 
    226226        } 
     
    231231    // check if bcast request is achieved 
    232232 
    233     for(list<SPendingRequest*>::iterator it=pendingSentChildRequest.begin(); it!=pendingSentChildRequest.end() ; ) 
     233    for(list<SPendingRequest*>::iterator it=pendingSentChildRequest_.begin(); it!=pendingSentChildRequest_.end() ; ) 
    234234    { 
    235235      MPI_Test(&(*it)->request,&received,&status) ; 
     
    237237      { 
    238238        delete *it ;    // free memory 
    239         it = pendingSentChildRequest.erase(it) ;          // get out of the list 
     239        it = pendingSentChildRequest_.erase(it) ;          // get out of the list 
    240240 
    241241      } 
     
    250250      
    251251     
    252     for(int i=0; i<nbChild[lev];i++) 
     252    for(int i=0; i<nbChild_[lev];i++) 
    253253    { 
    254254      sentRequest=new SPendingRequest ; 
     
    256256      sentRequest->buffer[1]=contextHashId ; 
    257257      sentRequest->buffer[2]=lev+1 ; 
    258       MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, child[lev][i], 1, communicator, & sentRequest->request) ; 
    259       pendingSentChildRequest.push_back(sentRequest) ; 
     258      MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, child_[lev][i], 1, communicator_, & sentRequest->request) ; 
     259      pendingSentChildRequest_.push_back(sentRequest) ; 
    260260    } 
    261261  } 
  • XIOS3/trunk/src/event_scheduler.hpp

    r2230 r2518  
    5252        */     
    5353       bool queryEvent(const size_t timeLine, const size_t contextHashId) ; 
    54        void popEvent() { eventStack.pop() ; } 
     54       void popEvent() { eventStack_.pop() ; } 
    5555 
    5656 
     
    155155       } ; 
    156156        
    157        MPI_Comm communicator ;  /*!< Internal MPI communicator */  
    158        int mpiRank ;            /*!< Rank in the communicator */ 
    159        int mpiSize ;            /*!< Size of the communicator */ 
     157       MPI_Comm communicator_ ;  /*!< Internal MPI communicator */  
     158       int mpiRank_ ;            /*!< Rank in the communicator */ 
     159       int mpiSize_ ;            /*!< Size of the communicator */ 
    160160  
    161        queue< pair<size_t, size_t> > eventStack ;           
    162        queue<SPendingRequest* > pendingSentParentRequest ;   /*!< Pending request sent to parent   */ 
    163        queue<SPendingRequest*>  pendingRecvParentRequest ;   /*!< Pending request recv from parent */     
    164        list<SPendingRequest* >  pendingRecvChildRequest ;    /*!< Pending request recv from child  */ 
    165        list<SPendingRequest*>   pendingSentChildRequest ;    /*!< Pending request sent to child    */ 
    166        map< SEvent, int > recvEvent ;                        /*!< list of event received from children. Contains the currnet number children that have already post the same event */ 
     161       queue< pair<size_t, size_t> > eventStack_ ;           
     162       queue<SPendingRequest* > pendingSentParentRequest_ ;   /*!< Pending request sent to parent   */ 
     163       queue<SPendingRequest*>  pendingRecvParentRequest_ ;   /*!< Pending request recv from parent */     
     164       list<SPendingRequest* >  pendingRecvChildRequest_ ;    /*!< Pending request recv from child  */ 
     165       list<SPendingRequest*>   pendingSentChildRequest_ ;    /*!< Pending request sent to child    */ 
     166       map< SEvent, int > recvEvent_ ;                        /*!< list of event received from children. Contains the currnet number children that have already post the same event */ 
    167167        
    168168        
    169        int level ;                   /*!< Number of hierachical level for communication */ 
    170        vector<int> parent ;          /*!< Parent rank for each level */  
    171        vector<vector<int> >  child ; /*!< List of child rank for each level */ 
    172        vector<int> nbChild ;         /*!< Number of child for each level */     
     169       int level_ ;                   /*!< Number of hierachical level for communication */ 
     170       vector<int> parent_ ;          /*!< Parent rank for each level */  
     171       vector<vector<int> >  child_ ; /*!< List of child rank for each level */ 
     172       vector<int> nbChild_ ;         /*!< Number of child for each level */     
    173173 
    174174    } ; 
Note: See TracChangeset for help on using the changeset viewer.