Changeset 1642 for XIOS/dev/branch_openmp/src/server.cpp
- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/server.cpp
r1545 r1642 14 14 #include "timer.hpp" 15 15 #include "event_scheduler.hpp" 16 using namespace ep_lib; 16 #include "string_tools.hpp" 17 17 18 18 namespace xios 19 19 { 20 MPI_Comm CServer::intraComm ;21 std::list< MPI_Comm> CServer::interCommLeft ;22 std::list< MPI_Comm> CServer::interCommRight ;23 std::list< MPI_Comm> CServer::contextInterComms;24 std::list< MPI_Comm> CServer::contextIntraComms;20 ep_lib::MPI_Comm CServer::intraComm ; 21 std::list<ep_lib::MPI_Comm> CServer::interCommLeft ; 22 std::list<ep_lib::MPI_Comm> CServer::interCommRight ; 23 std::list<ep_lib::MPI_Comm> CServer::contextInterComms; 24 std::list<ep_lib::MPI_Comm> CServer::contextIntraComms; 25 25 int CServer::serverLevel = 0 ; 26 26 int CServer::nbContexts = 0; … … 47 47 void CServer::initialize(void) 48 48 { 49 //int initialized ;50 //MPI_Initialized(&initialized) ;51 //if (initialized) is_MPI_Initialized=true ;52 //else is_MPI_Initialized=false ;49 int initialized ; 50 ep_lib::MPI_Initialized(&initialized) ; 51 if (initialized) is_MPI_Initialized=true ; 52 else is_MPI_Initialized=false ; 53 53 int rank ; 54 54 … … 57 57 { 58 58 59 //if (!is_MPI_Initialized)60 //{61 //MPI_Init(NULL, NULL);62 //}59 if (!is_MPI_Initialized) 60 { 61 ep_lib::MPI_Init(NULL, NULL); 62 } 63 63 CTimer::get("XIOS").resume() ; 64 64 … … 72 72 int myColor ; 73 73 int i,c ; 74 MPI_Comm newComm;75 76 MPI_Comm_size(CXios::globalComm, &size) ;77 MPI_Comm_rank(CXios::globalComm, &rank_);74 ep_lib::MPI_Comm newComm; 75 76 ep_lib::MPI_Comm_size(CXios::globalComm, &size) ; 77 ep_lib::MPI_Comm_rank(CXios::globalComm, &rank_); 78 78 79 79 hashAll=new unsigned long[size] ; 80 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ;80 ep_lib::MPI_Allgather(&hashServer, 1, EP_LONG, hashAll, 1, EP_LONG, CXios::globalComm) ; 81 81 82 82 map<unsigned long, int> colors ; … … 152 152 if (serverLevel==2) 153 153 { 154 #pragma omp critical (_output)155 154 info(50)<<"The number of secondary server pools is "<< sndServerGlobalRanks.size() <<endl ; 156 155 for (i=0; i<sndServerGlobalRanks.size(); i++) … … 175 174 // (2) Create intraComm 176 175 if (serverLevel != 2) myColor=colors[hashServer]; 177 MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ;176 ep_lib::MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; 178 177 179 178 // (3) Create interComm … … 187 186 clientLeader=it->second ; 188 187 int intraCommSize, intraCommRank ; 189 MPI_Comm_size(intraComm,&intraCommSize) ; 190 MPI_Comm_rank(intraComm,&intraCommRank) ; 191 192 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 193 #pragma omp critical (_output) 194 { 195 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 188 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 189 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ; 190 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 196 191 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 197 } 198 199 192 193 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 200 194 interCommLeft.push_back(newComm) ; 201 195 } … … 213 207 clientLeader=it->second ; 214 208 int intraCommSize, intraCommRank ; 215 MPI_Comm_size(intraComm, &intraCommSize) ; 216 MPI_Comm_rank(intraComm, &intraCommRank) ; 217 218 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 219 #pragma omp critical (_output) 220 { 221 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 209 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 210 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 211 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 222 212 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 223 } 224 213 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 225 214 interCommLeft.push_back(newComm) ; 226 215 } … … 230 219 { 231 220 int intraCommSize, intraCommRank ; 232 MPI_Comm_size(intraComm, &intraCommSize) ; 233 MPI_Comm_rank(intraComm, &intraCommRank) ; 234 235 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 236 #pragma omp critical (_output) 237 { 238 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 221 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 222 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 223 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 239 224 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; 240 } 241 225 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 242 226 interCommRight.push_back(newComm) ; 243 227 } … … 248 232 clientLeader = leaders[hashString(CXios::xiosCodeId)]; 249 233 int intraCommSize, intraCommRank ; 250 MPI_Comm_size(intraComm, &intraCommSize) ; 251 MPI_Comm_rank(intraComm, &intraCommRank) ; 252 253 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 254 #pragma omp critical (_output) 255 { 256 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 234 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 235 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 236 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 257 237 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 258 } 259 238 239 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 260 240 interCommLeft.push_back(newComm) ; 261 241 } … … 273 253 274 254 CTimer::get("XIOS").resume() ; 275 MPI_Comm localComm;255 ep_lib::MPI_Comm localComm; 276 256 oasis_get_localcomm(localComm); 277 MPI_Comm_rank(localComm,&rank_) ;257 ep_lib::MPI_Comm_rank(localComm,&rank_) ; 278 258 279 259 // (1) Create server intraComm 280 260 if (!CXios::usingServer2) 281 261 { 282 MPI_Comm_dup(localComm, &intraComm);262 ep_lib::MPI_Comm_dup(localComm, &intraComm); 283 263 } 284 264 else 285 265 { 286 266 int globalRank; 287 MPI_Comm_size(localComm,&size) ;288 MPI_Comm_rank(CXios::globalComm,&globalRank) ;267 ep_lib::MPI_Comm_size(localComm,&size) ; 268 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ; 289 269 srvGlobalRanks = new int[size] ; 290 MPI_Allgather(&globalRank, 1, MPI_INT, srvGlobalRanks, 1, MPI_INT, localComm) ;270 ep_lib::MPI_Allgather(&globalRank, 1, EP_INT, srvGlobalRanks, 1, EP_INT, localComm) ; 291 271 292 272 int reqNbProc = size*CXios::ratioServer2/100.; … … 296 276 << "It is impossible to dedicate the requested number of processes = "<<reqNbProc 297 277 <<" to secondary server. XIOS will run in the classical server mode."<<endl; 298 MPI_Comm_dup(localComm, &intraComm);278 ep_lib::MPI_Comm_dup(localComm, &intraComm); 299 279 } 300 280 else … … 359 339 } 360 340 if (serverLevel != 2) myColor=0; 361 MPI_Comm_split(localComm, myColor, rank_, &intraComm) ;341 ep_lib::MPI_Comm_split(localComm, myColor, rank_, &intraComm) ; 362 342 } 363 343 } 364 344 365 345 string codesId=CXios::getin<string>("oasis_codes_id") ; 366 vector<string> splitted;367 boost::split( splitted, codesId, boost::is_any_of(","), boost::token_compress_on ) ;346 vector<string> oasisCodeId=splitRegex(codesId,"\\s*,\\s*") ; 347 368 348 vector<string>::iterator it ; 369 349 370 MPI_Comm newComm ;350 ep_lib::MPI_Comm newComm ; 371 351 int globalRank ; 372 MPI_Comm_rank(CXios::globalComm,&globalRank);352 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank); 373 353 374 354 // (2) Create interComms with models 375 for(it= splitted.begin();it!=splitted.end();it++)355 for(it=oasisCodeId.begin();it!=oasisCodeId.end();it++) 376 356 { 377 357 oasis_get_intercomm(newComm,*it) ; … … 379 359 { 380 360 interCommLeft.push_back(newComm) ; 381 if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ;361 if (rank_==0) ep_lib::MPI_Send(&globalRank,1,EP_INT,0,0,newComm) ; 382 362 } 383 363 } … … 385 365 // (3) Create interComms between primary and secondary servers 386 366 int intraCommSize, intraCommRank ; 387 MPI_Comm_size(intraComm,&intraCommSize) ;388 MPI_Comm_rank(intraComm, &intraCommRank) ;367 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 368 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 389 369 390 370 if (serverLevel == 1) … … 395 375 info(50)<<"intercommCreate::client (server level 1) "<<globalRank<<" intraCommSize : "<<intraCommSize 396 376 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; 397 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ;377 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; 398 378 interCommRight.push_back(newComm) ; 399 379 } … … 403 383 info(50)<<"intercommCreate::server (server level 2)"<<globalRank<<" intraCommSize : "<<intraCommSize 404 384 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvGlobalRanks[0] <<endl ; 405 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ;385 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ; 406 386 interCommLeft.push_back(newComm) ; 407 387 } 408 388 if (CXios::usingServer2) delete [] srvGlobalRanks ; 409 oasis_enddef() ; 389 390 bool oasisEnddef=CXios::getin<bool>("call_oasis_enddef",true) ; 391 if (!oasisEnddef) oasis_enddef() ; 410 392 } 411 393 412 394 413 MPI_Comm_rank(intraComm, &rank) ;395 ep_lib::MPI_Comm_rank(intraComm, &rank) ; 414 396 if (rank==0) isRoot=true; 415 397 else isRoot=false; … … 424 406 delete eventScheduler ; 425 407 426 for (std::list< MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)427 MPI_Comm_free(&(*it));428 429 for (std::list< MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++)430 MPI_Comm_free(&(*it));408 for (std::list<ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 409 ep_lib::MPI_Comm_free(&(*it)); 410 411 for (std::list<ep_lib::MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) 412 ep_lib::MPI_Comm_free(&(*it)); 431 413 432 414 // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) … … 436 418 // MPI_Comm_free(&(*it)); 437 419 438 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)439 MPI_Comm_free(&(*it));440 441 MPI_Comm_free(&intraComm);420 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 421 ep_lib::MPI_Comm_free(&(*it)); 422 423 ep_lib::MPI_Comm_free(&intraComm); 442 424 443 425 if (!is_MPI_Initialized) 444 426 { 445 427 if (CXios::usingOasis) oasis_finalize(); 446 //elseMPI_Finalize() ;428 else ep_lib::MPI_Finalize() ; 447 429 } 448 449 430 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; 450 431 report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; … … 464 445 listenContext(); 465 446 listenRootContext(); 447 listenOasisEnddef() ; 448 listenRootOasisEnddef() ; 466 449 if (!finished) listenFinalize() ; 467 450 } … … 469 452 { 470 453 listenRootContext(); 454 listenRootOasisEnddef() ; 471 455 if (!finished) listenRootFinalize() ; 472 456 } … … 481 465 void CServer::listenFinalize(void) 482 466 { 483 list< MPI_Comm>::iterator it, itr;467 list<ep_lib::MPI_Comm>::iterator it, itr; 484 468 int msg ; 485 469 int flag ; … … 487 471 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 488 472 { 489 MPI_Status status ;473 ep_lib::MPI_Status status ; 490 474 traceOff() ; 491 MPI_Iprobe(0,0,*it,&flag,&status) ;475 ep_lib::MPI_Iprobe(0,0,*it,&flag,&status) ; 492 476 traceOn() ; 493 477 if (flag==true) 494 478 { 495 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ;479 ep_lib::MPI_Recv(&msg,1,EP_INT,0,0,*it,&status) ; 496 480 info(20)<<" CServer : Receive client finalize"<<endl ; 497 481 // Sending server finalize message to secondary servers (if any) 498 482 for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) 499 483 { 500 MPI_Send(&msg,1,MPI_INT,0,0,*itr) ;484 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,*itr) ; 501 485 } 502 MPI_Comm_free(&(*it));486 ep_lib::MPI_Comm_free(&(*it)); 503 487 interCommLeft.erase(it) ; 504 488 break ; … … 509 493 { 510 494 int i,size ; 511 MPI_Comm_size(intraComm,&size) ;512 MPI_Request* requests= newMPI_Request[size-1] ;513 MPI_Status* status= newMPI_Status[size-1] ;514 515 for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ;516 MPI_Waitall(size-1,requests,status) ;495 ep_lib::MPI_Comm_size(intraComm,&size) ; 496 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size-1] ; 497 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size-1] ; 498 499 for(int i=1;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,4,intraComm,&requests[i-1]) ; 500 ep_lib::MPI_Waitall(size-1,requests,status) ; 517 501 518 502 finished=true ; … … 526 510 { 527 511 int flag ; 528 MPI_Status status ;512 ep_lib::MPI_Status status ; 529 513 int msg ; 530 514 531 515 traceOff() ; 532 MPI_Iprobe(0,4,intraComm, &flag, &status) ;516 ep_lib::MPI_Iprobe(0,4,intraComm, &flag, &status) ; 533 517 traceOn() ; 534 518 if (flag==true) 535 519 { 536 MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ;520 ep_lib::MPI_Recv(&msg,1,EP_INT,0,4,intraComm,&status) ; 537 521 finished=true ; 538 522 } 539 523 } 540 524 525 526 /*! 527 * Root process is listening for an order sent by client to call "oasis_enddef". 528 * The root client of a compound send the order (tag 5). It is probed and received. 529 * When the order has been received from each coumpound, the server root process ping the order to the root processes of the secondary levels of servers (if any). 530 * After, it also inform (asynchronous call) other processes of the communicator that the oasis_enddef call must be done 531 */ 532 533 void CServer::listenOasisEnddef(void) 534 { 535 int flag ; 536 ep_lib::MPI_Status status ; 537 list<ep_lib::MPI_Comm>::iterator it; 538 int msg ; 539 static int nbCompound=0 ; 540 int size ; 541 static bool sent=false ; 542 static ep_lib::MPI_Request* allRequests ; 543 static ep_lib::MPI_Status* allStatus ; 544 545 546 if (sent) 547 { 548 ep_lib::MPI_Comm_size(intraComm,&size) ; 549 ep_lib::MPI_Testall(size,allRequests, &flag, allStatus) ; 550 if (flag==true) 551 { 552 delete [] allRequests ; 553 delete [] allStatus ; 554 sent=false ; 555 } 556 } 557 558 559 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 560 { 561 ep_lib::MPI_Status status ; 562 traceOff() ; 563 ep_lib::MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5 564 traceOn() ; 565 if (flag==true) 566 { 567 ep_lib::MPI_Recv(&msg,1,EP_INT,0,5,*it,&status) ; // tags oasis_endded = 5 568 nbCompound++ ; 569 if (nbCompound==interCommLeft.size()) 570 { 571 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 572 { 573 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,*it) ; // tags oasis_endded = 5 574 } 575 ep_lib::MPI_Comm_size(intraComm,&size) ; 576 allRequests= new ep_lib::MPI_Request[size] ; 577 allStatus= new ep_lib::MPI_Status[size] ; 578 for(int i=0;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5 579 sent=true ; 580 } 581 } 582 } 583 } 584 585 /*! 586 * Processes probes message from root process if oasis_enddef call must be done. 587 * When the order is received it is scheduled to be treated in a synchronized way by all server processes of the communicator 588 */ 589 void CServer::listenRootOasisEnddef(void) 590 { 591 int flag ; 592 ep_lib::MPI_Status status ; 593 const int root=0 ; 594 int msg ; 595 static bool eventSent=false ; 596 597 if (eventSent) 598 { 599 boost::hash<string> hashString; 600 size_t hashId = hashString("oasis_enddef"); 601 if (eventScheduler->queryEvent(0,hashId)) 602 { 603 oasis_enddef() ; 604 eventSent=false ; 605 } 606 } 607 608 traceOff() ; 609 ep_lib::MPI_Iprobe(root,5,intraComm, &flag, &status) ; 610 traceOn() ; 611 if (flag==true) 612 { 613 ep_lib::MPI_Recv(&msg,1,EP_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5 614 boost::hash<string> hashString; 615 size_t hashId = hashString("oasis_enddef"); 616 eventScheduler->registerEvent(0,hashId); 617 eventSent=true ; 618 } 619 } 620 621 622 623 624 541 625 void CServer::listenContext(void) 542 626 { 543 627 544 MPI_Status status ;628 ep_lib::MPI_Status status ; 545 629 int flag ; 546 630 static char* buffer ; 547 static MPI_Request request ;631 static ep_lib::MPI_Request request ; 548 632 static bool recept=false ; 549 633 int rank ; … … 553 637 { 554 638 traceOff() ; 555 MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 639 #ifdef _usingMPI 640 ep_lib::MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 641 #elif _usingEP 642 ep_lib::MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 643 #endif 556 644 traceOn() ; 557 645 if (flag==true) … … 560 648 rank=status.MPI_SOURCE ; 561 649 #elif _usingEP 562 rank=status.ep_src ;650 rank=status.ep_src ; 563 651 #endif 564 MPI_Get_count(&status,MPI_CHAR,&count) ;652 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 565 653 buffer=new char[count] ; 566 MPI_Irecv((void*)buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ;654 ep_lib::MPI_Irecv((void*)buffer,count,EP_CHAR,rank,1,CXios::globalComm,&request) ; 567 655 recept=true ; 568 656 } … … 571 659 { 572 660 traceOff() ; 573 MPI_Test(&request,&flag,&status) ;661 ep_lib::MPI_Test(&request,&flag,&status) ; 574 662 traceOn() ; 575 663 if (flag==true) … … 578 666 rank=status.MPI_SOURCE ; 579 667 #elif _usingEP 580 rank=status.ep_src ;668 rank=status.ep_src ; 581 669 #endif 582 MPI_Get_count(&status,MPI_CHAR,&count) ;670 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 583 671 recvContextMessage((void*)buffer,count) ; 584 672 delete [] buffer ; … … 613 701 { 614 702 int size ; 615 MPI_Comm_size(intraComm,&size) ;703 ep_lib::MPI_Comm_size(intraComm,&size) ; 616 704 // MPI_Request* requests= new MPI_Request[size-1] ; 617 705 // MPI_Status* status= new MPI_Status[size-1] ; 618 MPI_Request* requests= newMPI_Request[size] ;619 MPI_Status* status= newMPI_Status[size] ;706 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size] ; 707 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size] ; 620 708 621 709 CMessage msg ; … … 629 717 for(int i=0; i<size; i++) 630 718 { 631 MPI_Isend(sendBuff,sendBuffer.count(),MPI_CHAR,i,2,intraComm,&requests[i]) ;719 ep_lib::MPI_Isend(sendBuff,sendBuffer.count(),EP_CHAR,i,2,intraComm,&requests[i]) ; 632 720 } 633 721 … … 641 729 void CServer::listenRootContext(void) 642 730 { 643 MPI_Status status ;731 ep_lib::MPI_Status status ; 644 732 int flag ; 645 733 static std::vector<void*> buffers; 646 static std::vector< MPI_Request> requests ;734 static std::vector<ep_lib::MPI_Request> requests ; 647 735 static std::vector<int> counts ; 648 736 static std::vector<bool> isEventRegistered ; 649 737 static std::vector<bool> isEventQueued ; 650 MPI_Request request;738 ep_lib::MPI_Request request; 651 739 652 740 int rank ; … … 657 745 // (1) Receive context id from the root, save it into a buffer 658 746 traceOff() ; 659 MPI_Iprobe(root,2,intraComm, &flag, &status) ;747 ep_lib::MPI_Iprobe(root,2,intraComm, &flag, &status) ; 660 748 traceOn() ; 661 749 if (flag==true) 662 750 { 663 751 counts.push_back(0); 664 MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ;752 ep_lib::MPI_Get_count(&status,EP_CHAR,&(counts.back())) ; 665 753 buffers.push_back(new char[counts.back()]) ; 666 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&request) ;667 754 requests.push_back(request); 755 ep_lib::MPI_Irecv((void*)(buffers.back()),counts.back(),EP_CHAR,root,2,intraComm,&(requests.back())) ; 668 756 isEventRegistered.push_back(false); 669 757 isEventQueued.push_back(false); … … 674 762 { 675 763 // (2) If context id is received, register an event 676 if(!isEventRegistered[ctxNb])MPI_Test(&requests[ctxNb],&flag,&status) ;764 ep_lib::MPI_Test(&requests[ctxNb],&flag,&status) ; 677 765 if (flag==true && !isEventRegistered[ctxNb]) 678 766 { … … 711 799 // (1) create interComm (with a client) 712 800 // (2) initialize client and server (contextClient and contextServer) 713 MPI_Comm inter;801 ep_lib::MPI_Comm inter; 714 802 if (serverLevel < 2) 715 803 { 716 MPI_Comm contextInterComm; 717 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); 718 MPI_Intercomm_merge(contextInterComm,1,&inter); 719 MPI_Barrier(inter); 804 ep_lib::MPI_Comm contextInterComm; 805 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); 806 ep_lib::MPI_Intercomm_merge(contextInterComm,1,&inter); 807 ep_lib::MPI_Barrier(inter); 808 ep_lib::MPI_Comm_free(&inter); 720 809 context->initServer(intraComm,contextInterComm); 721 810 contextInterComms.push_back(contextInterComm); 722 811 723 MPI_Comm_free(&inter);724 812 } 725 813 // Secondary server: create communication channel with a primary server … … 731 819 else if (serverLevel == 2) 732 820 { 733 MPI_Comm_dup(interCommLeft.front(), &inter);821 ep_lib::MPI_Comm_dup(interCommLeft.front(), &inter); 734 822 contextInterComms.push_back(inter); 735 823 context->initServer(intraComm, contextInterComms.back()); … … 742 830 { 743 831 int i = 0, size; 744 MPI_Comm_size(intraComm, &size) ;745 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i)832 ep_lib::MPI_Comm_size(intraComm, &size) ; 833 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) 746 834 { 747 835 StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); … … 753 841 CBufferOut buffer(buff,messageSize) ; 754 842 buffer<<msg ; 755 MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ;756 MPI_Comm_dup(*it, &inter);843 ep_lib::MPI_Send(buff, buffer.count(), EP_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ; 844 ep_lib::MPI_Comm_dup(*it, &inter); 757 845 contextInterComms.push_back(inter); 758 MPI_Comm_dup(intraComm, &inter);846 ep_lib::MPI_Comm_dup(intraComm, &inter); 759 847 contextIntraComms.push_back(inter); 760 848 context->initClient(contextIntraComms.back(), contextInterComms.back()) ; … … 786 874 { 787 875 int rank; 788 MPI_Comm_rank(intraComm,&rank);876 ep_lib::MPI_Comm_rank(intraComm,&rank); 789 877 return rank; 790 878 } … … 809 897 int size = 0; 810 898 int id; 811 MPI_Comm_size(CXios::globalComm, &size);899 ep_lib::MPI_Comm_size(CXios::globalComm, &size); 812 900 while (size) 813 901 {
Note: See TracChangeset
for help on using the changeset viewer.