Changeset 1642 for XIOS/dev/branch_openmp/src/context_server.cpp
- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/context_server.cpp
r1545 r1642 18 18 #include <boost/functional/hash.hpp> 19 19 20 using namespace ep_lib; 20 21 21 22 22 namespace xios 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent,ep_lib::MPI_Comm intraComm_,ep_lib::MPI_Comm interComm_) 26 26 { 27 27 context=parent; 28 28 intraComm=intraComm_; 29 MPI_Comm_size(intraComm,&intraCommSize);30 MPI_Comm_rank(intraComm,&intraCommRank);29 ep_lib::MPI_Comm_size(intraComm,&intraCommSize); 30 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank); 31 31 32 32 interComm=interComm_; 33 33 int flag; 34 MPI_Comm_test_inter(interComm,&flag);35 if (flag) MPI_Comm_remote_size(interComm,&commSize);36 else MPI_Comm_size(interComm,&commSize);34 ep_lib::MPI_Comm_test_inter(interComm,&flag); 35 if (flag) ep_lib::MPI_Comm_remote_size(interComm,&commSize); 36 else ep_lib::MPI_Comm_size(interComm,&commSize); 37 37 38 38 currentTimeLine=0; … … 76 76 int count; 77 77 char * addr; 78 MPI_Status status;78 ep_lib::MPI_Status status; 79 79 map<int,CServerBuffer*>::iterator it; 80 80 bool okLoop; 81 81 82 82 traceOff(); 83 MPI_Iprobe(-2, 20,interComm,&flag,&status); 83 #ifdef _usingMPI 84 MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status); 85 #elif _usingEP 86 ep_lib::MPI_Iprobe(-2, 20,interComm,&flag,&status); 87 #endif 84 88 traceOn(); 85 89 … … 102 106 103 107 traceOff(); 104 MPI_Iprobe(rank, 20,interComm,&flag,&status);108 ep_lib::MPI_Iprobe(rank, 20,interComm,&flag,&status); 105 109 traceOn(); 106 110 if (flag==true) listenPendingRequest(status) ; … … 111 115 } 112 116 113 bool CContextServer::listenPendingRequest( MPI_Status& status)117 bool CContextServer::listenPendingRequest(ep_lib::MPI_Status& status) 114 118 { 115 119 int count; … … 119 123 int rank=status.MPI_SOURCE ; 120 124 #elif _usingEP 121 int rank=status.ep_src ;122 #endif 125 int rank=status.ep_src ; 126 #endif 123 127 124 128 it=buffers.find(rank); … … 126 130 { 127 131 StdSize buffSize = 0; 128 MPI_Request request; 129 130 MPI_Irecv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &request); 131 MPI_Wait(&request, &status); 132 ep_lib::MPI_Recv(&buffSize, 1, EP_LONG, rank, 20, interComm, &status); 132 133 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 133 134 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; … … 136 137 else 137 138 { 138 MPI_Get_count(&status,MPI_CHAR,&count);139 ep_lib::MPI_Get_count(&status,EP_CHAR,&count); 139 140 if (it->second->isBufferFree(count)) 140 141 { 141 142 addr=(char*)it->second->getBuffer(count); 142 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]);143 ep_lib::MPI_Irecv(addr,count,EP_CHAR,rank,20,interComm,&pendingRequest[rank]); 143 144 bufferRequest[rank]=addr; 144 145 return true; 145 }146 } 146 147 else 147 148 return false; … … 152 153 void CContextServer::checkPendingRequest(void) 153 154 { 154 map<int, MPI_Request>::iterator it;155 map<int,ep_lib::MPI_Request>::iterator it; 155 156 list<int> recvRequest; 156 157 list<int>::iterator itRecv; … … 158 159 int flag; 159 160 int count; 160 MPI_Status status;161 ep_lib::MPI_Status status; 161 162 162 163 for(it=pendingRequest.begin();it!=pendingRequest.end();it++) … … 164 165 rank=it->first; 165 166 traceOff(); 166 MPI_Test(& it->second, &flag, &status);167 ep_lib::MPI_Test(& it->second, &flag, &status); 167 168 traceOn(); 168 169 if (flag==true) 169 170 { 170 171 recvRequest.push_back(rank); 171 MPI_Get_count(&status,MPI_CHAR,&count);172 ep_lib::MPI_Get_count(&status,EP_CHAR,&count); 172 173 processRequest(rank,bufferRequest[rank],count); 173 174 } … … 229 230 // The best way to properly solve this problem will be to use the event scheduler also in attached mode 230 231 // for now just set up a MPI barrier 231 if (!CServer::eventScheduler && CXios::isServer) MPI_Barrier(intraComm) ;232 if (!CServer::eventScheduler && CXios::isServer) ep_lib::MPI_Barrier(intraComm) ; 232 233 233 234 CTimer::get("Process events").resume(); … … 264 265 { 265 266 finished=true; 266 #pragma omp critical (_output)267 267 info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 268 268 context->finalize(); … … 272 272 { 273 273 rank = itMap->first; 274 #pragma omp critical (_output)275 274 report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 276 275 << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; 277 276 totalBuf += itMap->second; 278 277 } 279 #pragma omp critical (_output)280 278 report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 281 279 }
Note: See TracChangeset
for help on using the changeset viewer.