[490] | 1 | #include "globalScopeData.hpp" |
---|
[591] | 2 | #include "xios_spl.hpp" |
---|
[300] | 3 | #include "cxios.hpp" |
---|
[342] | 4 | #include "server.hpp" |
---|
[983] | 5 | #include "client.hpp" |
---|
[300] | 6 | #include "type.hpp" |
---|
| 7 | #include "context.hpp" |
---|
[352] | 8 | #include "object_template.hpp" |
---|
[300] | 9 | #include "oasis_cinterface.hpp" |
---|
| 10 | #include <boost/functional/hash.hpp> |
---|
| 11 | #include <boost/algorithm/string.hpp> |
---|
[382] | 12 | #include "mpi.hpp" |
---|
[347] | 13 | #include "tracer.hpp" |
---|
| 14 | #include "timer.hpp" |
---|
[492] | 15 | #include "event_scheduler.hpp" |
---|
[1587] | 16 | #include "string_tools.hpp" |
---|
[1646] | 17 | #ifdef _usingEP |
---|
[1601] | 18 | using namespace ep_lib; |
---|
[1646] | 19 | #endif |
---|
[300] | 20 | |
---|
[335] | 21 | namespace xios |
---|
[490] | 22 | { |
---|
[300] | 23 | MPI_Comm CServer::intraComm ; |
---|
[1180] | 24 | std::list<MPI_Comm> CServer::interCommLeft ; |
---|
| 25 | std::list<MPI_Comm> CServer::interCommRight ; |
---|
[655] | 26 | std::list<MPI_Comm> CServer::contextInterComms; |
---|
[1071] | 27 | std::list<MPI_Comm> CServer::contextIntraComms; |
---|
[1021] | 28 | int CServer::serverLevel = 0 ; |
---|
[1148] | 29 | int CServer::nbContexts = 0; |
---|
[983] | 30 | bool CServer::isRoot = false ; |
---|
[1077] | 31 | int CServer::rank_ = INVALID_RANK; |
---|
[490] | 32 | StdOFStream CServer::m_infoStream; |
---|
[523] | 33 | StdOFStream CServer::m_errorStream; |
---|
[490] | 34 | map<string,CContext*> CServer::contextList ; |
---|
[1152] | 35 | vector<int> CServer::sndServerGlobalRanks; |
---|
[300] | 36 | bool CServer::finished=false ; |
---|
| 37 | bool CServer::is_MPI_Initialized ; |
---|
[597] | 38 | CEventScheduler* CServer::eventScheduler = 0; |
---|
[983] | 39 | |
---|
| 40 | //--------------------------------------------------------------- |
---|
| 41 | /*! |
---|
| 42 | * \fn void CServer::initialize(void) |
---|
[1054] | 43 | * Creates intraComm for each possible type of servers (classical, primary or secondary). |
---|
[1148] | 44 | * Creates interComm and stores them into the following lists: |
---|
[1054] | 45 | * classical server -- interCommLeft |
---|
| 46 | * primary server -- interCommLeft and interCommRight |
---|
[1148] | 47 | * secondary server -- interCommLeft for each pool. |
---|
[1234] | 48 | * IMPORTANT: CXios::usingServer2 should NOT be used beyond this function. Use CServer::serverLevel instead. |
---|
[983] | 49 | */ |
---|
[300] | 50 | void CServer::initialize(void) |
---|
| 51 | { |
---|
[1152] | 52 | int rank ; |
---|
[490] | 53 | |
---|
[300] | 54 | // Not using OASIS |
---|
| 55 | if (!CXios::usingOasis) |
---|
| 56 | { |
---|
[490] | 57 | |
---|
[359] | 58 | CTimer::get("XIOS").resume() ; |
---|
[490] | 59 | |
---|
| 60 | boost::hash<string> hashString ; |
---|
[1021] | 61 | unsigned long hashServer = hashString(CXios::xiosCodeId); |
---|
[490] | 62 | |
---|
[300] | 63 | unsigned long* hashAll ; |
---|
[1152] | 64 | unsigned long* srvLevelAll ; |
---|
[490] | 65 | |
---|
[300] | 66 | int size ; |
---|
| 67 | int myColor ; |
---|
| 68 | int i,c ; |
---|
[1152] | 69 | MPI_Comm newComm; |
---|
[490] | 70 | |
---|
[983] | 71 | MPI_Comm_size(CXios::globalComm, &size) ; |
---|
[1077] | 72 | MPI_Comm_rank(CXios::globalComm, &rank_); |
---|
[1009] | 73 | |
---|
[300] | 74 | hashAll=new unsigned long[size] ; |
---|
[983] | 75 | MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; |
---|
[490] | 76 | |
---|
[1021] | 77 | map<unsigned long, int> colors ; |
---|
[300] | 78 | map<unsigned long, int> leaders ; |
---|
| 79 | map<unsigned long, int>::iterator it ; |
---|
[490] | 80 | |
---|
[1152] | 81 | // (1) Establish client leaders, distribute processes between two server levels |
---|
[1234] | 82 | std::vector<int> srvRanks; |
---|
[300] | 83 | for(i=0,c=0;i<size;i++) |
---|
| 84 | { |
---|
| 85 | if (colors.find(hashAll[i])==colors.end()) |
---|
| 86 | { |
---|
| 87 | colors[hashAll[i]]=c ; |
---|
| 88 | leaders[hashAll[i]]=i ; |
---|
| 89 | c++ ; |
---|
| 90 | } |
---|
[1152] | 91 | if (CXios::usingServer2) |
---|
[1180] | 92 | if (hashAll[i] == hashServer) |
---|
| 93 | srvRanks.push_back(i); |
---|
| 94 | } |
---|
[1234] | 95 | |
---|
| 96 | if (CXios::usingServer2) |
---|
[1180] | 97 | { |
---|
[1234] | 98 | int reqNbProc = srvRanks.size()*CXios::ratioServer2/100.; |
---|
| 99 | if (reqNbProc<1 || reqNbProc==srvRanks.size()) |
---|
[1152] | 100 | { |
---|
[1234] | 101 | error(0)<<"WARNING: void CServer::initialize(void)"<<endl |
---|
| 102 | << "It is impossible to dedicate the requested number of processes = "<<reqNbProc |
---|
| 103 | <<" to secondary server. XIOS will run in the classical server mode."<<endl; |
---|
[1152] | 104 | } |
---|
[1180] | 105 | else |
---|
| 106 | { |
---|
[1519] | 107 | if (CXios::nbPoolsServer2 == 0) CXios::nbPoolsServer2 = reqNbProc; |
---|
[1243] | 108 | int firstSndSrvRank = srvRanks.size()*(100.-CXios::ratioServer2)/100. ; |
---|
| 109 | int poolLeader = firstSndSrvRank; |
---|
| 110 | //*********** (1) Comment out the line below to set one process per pool |
---|
[1519] | 111 | sndServerGlobalRanks.push_back(srvRanks[poolLeader]); |
---|
[1243] | 112 | int nbPools = CXios::nbPoolsServer2; |
---|
| 113 | if ( nbPools > reqNbProc || nbPools < 1) |
---|
| 114 | { |
---|
| 115 | error(0)<<"WARNING: void CServer::initialize(void)"<<endl |
---|
| 116 | << "It is impossible to allocate the requested number of pools = "<<nbPools |
---|
| 117 | <<" on the secondary server. It will be set so that there is one process per pool."<<endl; |
---|
| 118 | nbPools = reqNbProc; |
---|
| 119 | } |
---|
| 120 | int remainder = ((int) (srvRanks.size()*CXios::ratioServer2/100.)) % nbPools; |
---|
| 121 | int procsPerPool = ((int) (srvRanks.size()*CXios::ratioServer2/100.)) / nbPools; |
---|
[1234] | 122 | for (i=0; i<srvRanks.size(); i++) |
---|
| 123 | { |
---|
[1243] | 124 | if (i >= firstSndSrvRank) |
---|
[1234] | 125 | { |
---|
[1243] | 126 | if (rank_ == srvRanks[i]) |
---|
| 127 | { |
---|
| 128 | serverLevel=2; |
---|
| 129 | } |
---|
| 130 | poolLeader += procsPerPool; |
---|
| 131 | if (remainder != 0) |
---|
| 132 | { |
---|
| 133 | ++poolLeader; |
---|
| 134 | --remainder; |
---|
| 135 | } |
---|
| 136 | //*********** (2) Comment out the two lines below to set one process per pool |
---|
[1519] | 137 | if (poolLeader < srvRanks.size()) |
---|
| 138 | sndServerGlobalRanks.push_back(srvRanks[poolLeader]); |
---|
[1243] | 139 | //*********** (3) Uncomment the line below to set one process per pool |
---|
[1519] | 140 | // sndServerGlobalRanks.push_back(srvRanks[i]); |
---|
[1234] | 141 | } |
---|
| 142 | else |
---|
| 143 | { |
---|
| 144 | if (rank_ == srvRanks[i]) serverLevel=1; |
---|
| 145 | } |
---|
| 146 | } |
---|
[1243] | 147 | if (serverLevel==2) |
---|
| 148 | { |
---|
[1601] | 149 | #pragma omp critical (_output) |
---|
[1646] | 150 | { |
---|
| 151 | info(50)<<"The number of secondary server pools is "<< sndServerGlobalRanks.size() <<endl ; |
---|
| 152 | } |
---|
[1243] | 153 | for (i=0; i<sndServerGlobalRanks.size(); i++) |
---|
| 154 | { |
---|
| 155 | if (rank_>= sndServerGlobalRanks[i]) |
---|
| 156 | { |
---|
| 157 | if ( i == sndServerGlobalRanks.size()-1) |
---|
| 158 | { |
---|
| 159 | myColor = colors.size() + sndServerGlobalRanks[i]; |
---|
| 160 | } |
---|
| 161 | else if (rank_< sndServerGlobalRanks[i+1]) |
---|
| 162 | { |
---|
| 163 | myColor = colors.size() + sndServerGlobalRanks[i]; |
---|
| 164 | break; |
---|
| 165 | } |
---|
| 166 | } |
---|
| 167 | } |
---|
| 168 | } |
---|
[1180] | 169 | } |
---|
[300] | 170 | } |
---|
[1243] | 171 | |
---|
[1152] | 172 | // (2) Create intraComm |
---|
[1243] | 173 | if (serverLevel != 2) myColor=colors[hashServer]; |
---|
[1152] | 174 | MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; |
---|
| 175 | |
---|
| 176 | // (3) Create interComm |
---|
[1021] | 177 | if (serverLevel == 0) |
---|
[983] | 178 | { |
---|
| 179 | int clientLeader; |
---|
| 180 | for(it=leaders.begin();it!=leaders.end();it++) |
---|
| 181 | { |
---|
| 182 | if (it->first!=hashServer) |
---|
| 183 | { |
---|
| 184 | clientLeader=it->second ; |
---|
| 185 | int intraCommSize, intraCommRank ; |
---|
| 186 | MPI_Comm_size(intraComm,&intraCommSize) ; |
---|
| 187 | MPI_Comm_rank(intraComm,&intraCommRank) ; |
---|
[490] | 188 | |
---|
[1009] | 189 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
[1601] | 190 | #pragma omp critical (_output) |
---|
| 191 | { |
---|
| 192 | info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
| 193 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
| 194 | } |
---|
| 195 | |
---|
[1180] | 196 | interCommLeft.push_back(newComm) ; |
---|
[983] | 197 | } |
---|
| 198 | } |
---|
| 199 | } |
---|
[1021] | 200 | else if (serverLevel == 1) |
---|
[983] | 201 | { |
---|
[1054] | 202 | int clientLeader, srvSndLeader; |
---|
| 203 | int srvPrmLeader ; |
---|
[1152] | 204 | |
---|
[1021] | 205 | for (it=leaders.begin();it!=leaders.end();it++) |
---|
[983] | 206 | { |
---|
[1021] | 207 | if (it->first != hashServer) |
---|
[983] | 208 | { |
---|
[1021] | 209 | clientLeader=it->second ; |
---|
| 210 | int intraCommSize, intraCommRank ; |
---|
| 211 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 212 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1601] | 213 | |
---|
| 214 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
| 215 | #pragma omp critical (_output) |
---|
| 216 | { |
---|
| 217 | info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1021] | 218 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
[1601] | 219 | } |
---|
[1021] | 220 | interCommLeft.push_back(newComm) ; |
---|
[983] | 221 | } |
---|
[1021] | 222 | } |
---|
[1009] | 223 | |
---|
[1152] | 224 | for (int i = 0; i < sndServerGlobalRanks.size(); ++i) |
---|
[983] | 225 | { |
---|
[1054] | 226 | int intraCommSize, intraCommRank ; |
---|
| 227 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 228 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1601] | 229 | |
---|
| 230 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; |
---|
| 231 | #pragma omp critical (_output) |
---|
| 232 | { |
---|
| 233 | info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1152] | 234 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; |
---|
[1601] | 235 | } |
---|
[1054] | 236 | interCommRight.push_back(newComm) ; |
---|
[1021] | 237 | } |
---|
[1168] | 238 | } |
---|
[1021] | 239 | else |
---|
| 240 | { |
---|
| 241 | int clientLeader; |
---|
| 242 | clientLeader = leaders[hashString(CXios::xiosCodeId)]; |
---|
| 243 | int intraCommSize, intraCommRank ; |
---|
| 244 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 245 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1601] | 246 | |
---|
| 247 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; |
---|
| 248 | #pragma omp critical (_output) |
---|
| 249 | { |
---|
| 250 | info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1021] | 251 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
[1601] | 252 | } |
---|
[983] | 253 | |
---|
[1021] | 254 | interCommLeft.push_back(newComm) ; |
---|
[1168] | 255 | } |
---|
[983] | 256 | |
---|
| 257 | delete [] hashAll ; |
---|
| 258 | |
---|
[300] | 259 | } |
---|
| 260 | // using OASIS |
---|
| 261 | else |
---|
| 262 | { |
---|
[1152] | 263 | int size; |
---|
[1130] | 264 | int myColor; |
---|
[1234] | 265 | int* srvGlobalRanks; |
---|
[490] | 266 | if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); |
---|
| 267 | |
---|
[359] | 268 | CTimer::get("XIOS").resume() ; |
---|
[655] | 269 | MPI_Comm localComm; |
---|
| 270 | oasis_get_localcomm(localComm); |
---|
[1234] | 271 | MPI_Comm_rank(localComm,&rank_) ; |
---|
[655] | 272 | |
---|
[1234] | 273 | // (1) Create server intraComm |
---|
[1130] | 274 | if (!CXios::usingServer2) |
---|
[1167] | 275 | { |
---|
[1130] | 276 | MPI_Comm_dup(localComm, &intraComm); |
---|
[1167] | 277 | } |
---|
[1130] | 278 | else |
---|
| 279 | { |
---|
[1180] | 280 | int globalRank; |
---|
[1152] | 281 | MPI_Comm_size(localComm,&size) ; |
---|
[1180] | 282 | MPI_Comm_rank(CXios::globalComm,&globalRank) ; |
---|
[1234] | 283 | srvGlobalRanks = new int[size] ; |
---|
| 284 | MPI_Allgather(&globalRank, 1, MPI_INT, srvGlobalRanks, 1, MPI_INT, localComm) ; |
---|
[1152] | 285 | |
---|
[1234] | 286 | int reqNbProc = size*CXios::ratioServer2/100.; |
---|
| 287 | if (reqNbProc < 1 || reqNbProc == size) |
---|
[1130] | 288 | { |
---|
[1234] | 289 | error(0)<<"WARNING: void CServer::initialize(void)"<<endl |
---|
| 290 | << "It is impossible to dedicate the requested number of processes = "<<reqNbProc |
---|
| 291 | <<" to secondary server. XIOS will run in the classical server mode."<<endl; |
---|
| 292 | MPI_Comm_dup(localComm, &intraComm); |
---|
[1130] | 293 | } |
---|
| 294 | else |
---|
| 295 | { |
---|
[1243] | 296 | int firstSndSrvRank = size*(100.-CXios::ratioServer2)/100. ; |
---|
| 297 | int poolLeader = firstSndSrvRank; |
---|
| 298 | //*********** (1) Comment out the line below to set one process per pool |
---|
| 299 | // sndServerGlobalRanks.push_back(srvGlobalRanks[poolLeader]); |
---|
| 300 | int nbPools = CXios::nbPoolsServer2; |
---|
| 301 | if ( nbPools > reqNbProc || nbPools < 1) |
---|
| 302 | { |
---|
| 303 | error(0)<<"WARNING: void CServer::initialize(void)"<<endl |
---|
| 304 | << "It is impossible to allocate the requested number of pools = "<<nbPools |
---|
| 305 | <<" on the secondary server. It will be set so that there is one process per pool."<<endl; |
---|
| 306 | nbPools = reqNbProc; |
---|
| 307 | } |
---|
| 308 | int remainder = ((int) (size*CXios::ratioServer2/100.)) % nbPools; |
---|
| 309 | int procsPerPool = ((int) (size*CXios::ratioServer2/100.)) / nbPools; |
---|
[1234] | 310 | for (int i=0; i<size; i++) |
---|
| 311 | { |
---|
[1243] | 312 | if (i >= firstSndSrvRank) |
---|
[1234] | 313 | { |
---|
[1243] | 314 | if (globalRank == srvGlobalRanks[i]) |
---|
| 315 | { |
---|
| 316 | serverLevel=2; |
---|
| 317 | } |
---|
| 318 | poolLeader += procsPerPool; |
---|
| 319 | if (remainder != 0) |
---|
| 320 | { |
---|
| 321 | ++poolLeader; |
---|
| 322 | --remainder; |
---|
| 323 | } |
---|
| 324 | //*********** (2) Comment out the two lines below to set one process per pool |
---|
| 325 | // if (poolLeader < size) |
---|
| 326 | // sndServerGlobalRanks.push_back(srvGlobalRanks[poolLeader]); |
---|
| 327 | //*********** (3) Uncomment the line below to set one process per pool |
---|
[1234] | 328 | sndServerGlobalRanks.push_back(srvGlobalRanks[i]); |
---|
| 329 | } |
---|
| 330 | else |
---|
| 331 | { |
---|
| 332 | if (globalRank == srvGlobalRanks[i]) serverLevel=1; |
---|
| 333 | } |
---|
| 334 | } |
---|
[1243] | 335 | if (serverLevel==2) |
---|
| 336 | { |
---|
[1646] | 337 | #pragma omp critical (_output) |
---|
| 338 | { |
---|
| 339 | info(50)<<"The number of secondary server pools is "<< sndServerGlobalRanks.size() <<endl ; |
---|
| 340 | } |
---|
[1243] | 341 | for (int i=0; i<sndServerGlobalRanks.size(); i++) |
---|
| 342 | { |
---|
| 343 | if (globalRank>= sndServerGlobalRanks[i]) |
---|
| 344 | { |
---|
| 345 | if (i == sndServerGlobalRanks.size()-1) |
---|
| 346 | { |
---|
| 347 | myColor = sndServerGlobalRanks[i]; |
---|
| 348 | } |
---|
| 349 | else if (globalRank< sndServerGlobalRanks[i+1]) |
---|
| 350 | { |
---|
| 351 | myColor = sndServerGlobalRanks[i]; |
---|
| 352 | break; |
---|
| 353 | } |
---|
| 354 | } |
---|
| 355 | } |
---|
| 356 | } |
---|
| 357 | if (serverLevel != 2) myColor=0; |
---|
[1234] | 358 | MPI_Comm_split(localComm, myColor, rank_, &intraComm) ; |
---|
[1130] | 359 | } |
---|
[1234] | 360 | } |
---|
[1130] | 361 | |
---|
[300] | 362 | string codesId=CXios::getin<string>("oasis_codes_id") ; |
---|
[1587] | 363 | vector<string> oasisCodeId=splitRegex(codesId,"\\s*,\\s*") ; |
---|
| 364 | |
---|
[300] | 365 | vector<string>::iterator it ; |
---|
| 366 | |
---|
| 367 | MPI_Comm newComm ; |
---|
| 368 | int globalRank ; |
---|
| 369 | MPI_Comm_rank(CXios::globalComm,&globalRank); |
---|
[490] | 370 | |
---|
[1234] | 371 | // (2) Create interComms with models |
---|
[1587] | 372 | for(it=oasisCodeId.begin();it!=oasisCodeId.end();it++) |
---|
[300] | 373 | { |
---|
| 374 | oasis_get_intercomm(newComm,*it) ; |
---|
[1234] | 375 | if ( serverLevel == 0 || serverLevel == 1) |
---|
[1150] | 376 | { |
---|
[1130] | 377 | interCommLeft.push_back(newComm) ; |
---|
[1150] | 378 | if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; |
---|
| 379 | } |
---|
[1180] | 380 | } |
---|
| 381 | |
---|
[1234] | 382 | // (3) Create interComms between primary and secondary servers |
---|
[1243] | 383 | int intraCommSize, intraCommRank ; |
---|
| 384 | MPI_Comm_size(intraComm,&intraCommSize) ; |
---|
| 385 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
| 386 | |
---|
[1180] | 387 | if (serverLevel == 1) |
---|
| 388 | { |
---|
| 389 | for (int i = 0; i < sndServerGlobalRanks.size(); ++i) |
---|
[1130] | 390 | { |
---|
[1180] | 391 | int srvSndLeader = sndServerGlobalRanks[i]; |
---|
[1646] | 392 | #pragma omp critical (_output) |
---|
| 393 | { |
---|
| 394 | info(50)<<"intercommCreate::client (server level 1) "<<globalRank<<" intraCommSize : "<<intraCommSize |
---|
[1243] | 395 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; |
---|
[1646] | 396 | } |
---|
[1180] | 397 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; |
---|
| 398 | interCommRight.push_back(newComm) ; |
---|
[1130] | 399 | } |
---|
[300] | 400 | } |
---|
[1180] | 401 | else if (serverLevel == 2) |
---|
| 402 | { |
---|
[1646] | 403 | #pragma omp critical (_output) |
---|
| 404 | { |
---|
| 405 | info(50)<<"intercommCreate::server (server level 2)"<<globalRank<<" intraCommSize : "<<intraCommSize |
---|
[1243] | 406 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvGlobalRanks[0] <<endl ; |
---|
[1646] | 407 | } |
---|
[1180] | 408 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ; |
---|
| 409 | interCommLeft.push_back(newComm) ; |
---|
| 410 | } |
---|
[1184] | 411 | if (CXios::usingServer2) delete [] srvGlobalRanks ; |
---|
[1587] | 412 | |
---|
| 413 | bool oasisEnddef=CXios::getin<bool>("call_oasis_enddef",true) ; |
---|
| 414 | if (!oasisEnddef) oasis_enddef() ; |
---|
[300] | 415 | } |
---|
[490] | 416 | |
---|
[1184] | 417 | |
---|
[1152] | 418 | MPI_Comm_rank(intraComm, &rank) ; |
---|
| 419 | if (rank==0) isRoot=true; |
---|
[490] | 420 | else isRoot=false; |
---|
[492] | 421 | |
---|
| 422 | eventScheduler = new CEventScheduler(intraComm) ; |
---|
[300] | 423 | } |
---|
[490] | 424 | |
---|
[300] | 425 | void CServer::finalize(void) |
---|
| 426 | { |
---|
[1646] | 427 | //CTimer::get("XIOS").suspend() ; |
---|
[697] | 428 | |
---|
[492] | 429 | delete eventScheduler ; |
---|
[655] | 430 | |
---|
| 431 | for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) |
---|
| 432 | MPI_Comm_free(&(*it)); |
---|
[983] | 433 | |
---|
[1071] | 434 | for (std::list<MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) |
---|
| 435 | MPI_Comm_free(&(*it)); |
---|
| 436 | |
---|
[1054] | 437 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) |
---|
| 438 | MPI_Comm_free(&(*it)); |
---|
[992] | 439 | |
---|
[655] | 440 | MPI_Comm_free(&intraComm); |
---|
| 441 | |
---|
[300] | 442 | if (!is_MPI_Initialized) |
---|
[490] | 443 | { |
---|
[300] | 444 | if (CXios::usingOasis) oasis_finalize(); |
---|
[1601] | 445 | //else MPI_Finalize() ; |
---|
[300] | 446 | } |
---|
[1646] | 447 | #pragma omp critical (_output) |
---|
| 448 | { |
---|
| 449 | report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; |
---|
| 450 | report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; |
---|
| 451 | report(0)<<"Performance report : Ratio : "<<CTimer::get("Process events").getCumulatedTime()/CTimer::get("XIOS server").getCumulatedTime()*100.<<"%"<<endl ; |
---|
| 452 | report(100)<<CTimer::getAllCumulatedTime()<<endl ; |
---|
| 453 | } |
---|
| 454 | } |
---|
[1601] | 455 | |
---|
[490] | 456 | |
---|
[1646] | 457 | |
---|
[300] | 458 | void CServer::eventLoop(void) |
---|
| 459 | { |
---|
| 460 | bool stop=false ; |
---|
[490] | 461 | |
---|
[347] | 462 | CTimer::get("XIOS server").resume() ; |
---|
[300] | 463 | while(!stop) |
---|
| 464 | { |
---|
| 465 | if (isRoot) |
---|
| 466 | { |
---|
| 467 | listenContext(); |
---|
[1148] | 468 | listenRootContext(); |
---|
[1587] | 469 | listenOasisEnddef() ; |
---|
| 470 | listenRootOasisEnddef() ; |
---|
[300] | 471 | if (!finished) listenFinalize() ; |
---|
| 472 | } |
---|
| 473 | else |
---|
| 474 | { |
---|
| 475 | listenRootContext(); |
---|
[1587] | 476 | listenRootOasisEnddef() ; |
---|
[300] | 477 | if (!finished) listenRootFinalize() ; |
---|
| 478 | } |
---|
[490] | 479 | |
---|
[300] | 480 | contextEventLoop() ; |
---|
| 481 | if (finished && contextList.empty()) stop=true ; |
---|
[956] | 482 | eventScheduler->checkEvent() ; |
---|
[300] | 483 | } |
---|
[347] | 484 | CTimer::get("XIOS server").suspend() ; |
---|
[300] | 485 | } |
---|
[490] | 486 | |
---|
[300] | 487 | void CServer::listenFinalize(void) |
---|
| 488 | { |
---|
[992] | 489 | list<MPI_Comm>::iterator it, itr; |
---|
[300] | 490 | int msg ; |
---|
| 491 | int flag ; |
---|
[490] | 492 | |
---|
[992] | 493 | for(it=interCommLeft.begin();it!=interCommLeft.end();it++) |
---|
[300] | 494 | { |
---|
| 495 | MPI_Status status ; |
---|
[347] | 496 | traceOff() ; |
---|
[300] | 497 | MPI_Iprobe(0,0,*it,&flag,&status) ; |
---|
[347] | 498 | traceOn() ; |
---|
[300] | 499 | if (flag==true) |
---|
| 500 | { |
---|
[1054] | 501 | MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; |
---|
[1646] | 502 | #pragma omp critical (_output) |
---|
| 503 | { |
---|
| 504 | info(20)<<" CServer : Receive client finalize"<<endl ; |
---|
| 505 | } |
---|
[1054] | 506 | // Sending server finalize message to secondary servers (if any) |
---|
| 507 | for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) |
---|
| 508 | { |
---|
| 509 | MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; |
---|
| 510 | } |
---|
[655] | 511 | MPI_Comm_free(&(*it)); |
---|
[992] | 512 | interCommLeft.erase(it) ; |
---|
[300] | 513 | break ; |
---|
| 514 | } |
---|
| 515 | } |
---|
[490] | 516 | |
---|
[1054] | 517 | if (interCommLeft.empty()) |
---|
[300] | 518 | { |
---|
| 519 | int i,size ; |
---|
| 520 | MPI_Comm_size(intraComm,&size) ; |
---|
| 521 | MPI_Request* requests= new MPI_Request[size-1] ; |
---|
| 522 | MPI_Status* status= new MPI_Status[size-1] ; |
---|
[490] | 523 | |
---|
[300] | 524 | for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ; |
---|
| 525 | MPI_Waitall(size-1,requests,status) ; |
---|
| 526 | |
---|
| 527 | finished=true ; |
---|
| 528 | delete [] requests ; |
---|
| 529 | delete [] status ; |
---|
| 530 | } |
---|
| 531 | } |
---|
[490] | 532 | |
---|
| 533 | |
---|
[300] | 534 | void CServer::listenRootFinalize() |
---|
| 535 | { |
---|
| 536 | int flag ; |
---|
| 537 | MPI_Status status ; |
---|
| 538 | int msg ; |
---|
[490] | 539 | |
---|
[347] | 540 | traceOff() ; |
---|
[300] | 541 | MPI_Iprobe(0,4,intraComm, &flag, &status) ; |
---|
[347] | 542 | traceOn() ; |
---|
[300] | 543 | if (flag==true) |
---|
| 544 | { |
---|
| 545 | MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ; |
---|
| 546 | finished=true ; |
---|
| 547 | } |
---|
| 548 | } |
---|
[490] | 549 | |
---|
[1587] | 550 | |
---|
| 551 | /*! |
---|
| 552 | * Root process is listening for an order sent by client to call "oasis_enddef". |
---|
| 553 | * The root client of a compound send the order (tag 5). It is probed and received. |
---|
| 554 | * When the order has been received from each coumpound, the server root process ping the order to the root processes of the secondary levels of servers (if any). |
---|
| 555 | * After, it also inform (asynchronous call) other processes of the communicator that the oasis_enddef call must be done |
---|
| 556 | */ |
---|
| 557 | |
---|
| 558 | void CServer::listenOasisEnddef(void) |
---|
| 559 | { |
---|
| 560 | int flag ; |
---|
| 561 | MPI_Status status ; |
---|
| 562 | list<MPI_Comm>::iterator it; |
---|
| 563 | int msg ; |
---|
| 564 | static int nbCompound=0 ; |
---|
| 565 | int size ; |
---|
| 566 | static bool sent=false ; |
---|
| 567 | static MPI_Request* allRequests ; |
---|
| 568 | static MPI_Status* allStatus ; |
---|
| 569 | |
---|
| 570 | |
---|
| 571 | if (sent) |
---|
| 572 | { |
---|
| 573 | MPI_Comm_size(intraComm,&size) ; |
---|
| 574 | MPI_Testall(size,allRequests, &flag, allStatus) ; |
---|
| 575 | if (flag==true) |
---|
| 576 | { |
---|
| 577 | delete [] allRequests ; |
---|
| 578 | delete [] allStatus ; |
---|
| 579 | sent=false ; |
---|
| 580 | } |
---|
| 581 | } |
---|
| 582 | |
---|
| 583 | |
---|
| 584 | for(it=interCommLeft.begin();it!=interCommLeft.end();it++) |
---|
| 585 | { |
---|
| 586 | MPI_Status status ; |
---|
| 587 | traceOff() ; |
---|
| 588 | MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5 |
---|
| 589 | traceOn() ; |
---|
| 590 | if (flag==true) |
---|
| 591 | { |
---|
| 592 | MPI_Recv(&msg,1,MPI_INT,0,5,*it,&status) ; // tags oasis_endded = 5 |
---|
| 593 | nbCompound++ ; |
---|
| 594 | if (nbCompound==interCommLeft.size()) |
---|
| 595 | { |
---|
| 596 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) |
---|
| 597 | { |
---|
| 598 | MPI_Send(&msg,1,MPI_INT,0,5,*it) ; // tags oasis_endded = 5 |
---|
| 599 | } |
---|
| 600 | MPI_Comm_size(intraComm,&size) ; |
---|
| 601 | allRequests= new MPI_Request[size] ; |
---|
| 602 | allStatus= new MPI_Status[size] ; |
---|
| 603 | for(int i=0;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5 |
---|
| 604 | sent=true ; |
---|
| 605 | } |
---|
| 606 | } |
---|
| 607 | } |
---|
| 608 | } |
---|
| 609 | |
---|
| 610 | /*! |
---|
| 611 | * Processes probes message from root process if oasis_enddef call must be done. |
---|
| 612 | * When the order is received it is scheduled to be treated in a synchronized way by all server processes of the communicator |
---|
| 613 | */ |
---|
| 614 | void CServer::listenRootOasisEnddef(void) |
---|
| 615 | { |
---|
| 616 | int flag ; |
---|
| 617 | MPI_Status status ; |
---|
| 618 | const int root=0 ; |
---|
| 619 | int msg ; |
---|
| 620 | static bool eventSent=false ; |
---|
| 621 | |
---|
| 622 | if (eventSent) |
---|
| 623 | { |
---|
| 624 | boost::hash<string> hashString; |
---|
| 625 | size_t hashId = hashString("oasis_enddef"); |
---|
| 626 | if (eventScheduler->queryEvent(0,hashId)) |
---|
| 627 | { |
---|
| 628 | oasis_enddef() ; |
---|
| 629 | eventSent=false ; |
---|
| 630 | } |
---|
| 631 | } |
---|
| 632 | |
---|
| 633 | traceOff() ; |
---|
| 634 | MPI_Iprobe(root,5,intraComm, &flag, &status) ; |
---|
| 635 | traceOn() ; |
---|
| 636 | if (flag==true) |
---|
| 637 | { |
---|
| 638 | MPI_Recv(&msg,1,MPI_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5 |
---|
| 639 | boost::hash<string> hashString; |
---|
| 640 | size_t hashId = hashString("oasis_enddef"); |
---|
| 641 | eventScheduler->registerEvent(0,hashId); |
---|
| 642 | eventSent=true ; |
---|
| 643 | } |
---|
| 644 | } |
---|
| 645 | |
---|
| 646 | |
---|
| 647 | |
---|
| 648 | |
---|
| 649 | |
---|
[300] | 650 | void CServer::listenContext(void) |
---|
| 651 | { |
---|
[490] | 652 | |
---|
[300] | 653 | MPI_Status status ; |
---|
| 654 | int flag ; |
---|
[1158] | 655 | static char* buffer ; |
---|
[300] | 656 | static MPI_Request request ; |
---|
| 657 | static bool recept=false ; |
---|
| 658 | int rank ; |
---|
| 659 | int count ; |
---|
[490] | 660 | |
---|
[300] | 661 | if (recept==false) |
---|
| 662 | { |
---|
[347] | 663 | traceOff() ; |
---|
[1745] | 664 | #ifdef _usingEP |
---|
[1601] | 665 | MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; |
---|
[1745] | 666 | #else |
---|
| 667 | MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; |
---|
| 668 | #endif |
---|
[347] | 669 | traceOn() ; |
---|
[490] | 670 | if (flag==true) |
---|
[300] | 671 | { |
---|
[1665] | 672 | |
---|
| 673 | #ifdef _usingEP |
---|
| 674 | rank=status.ep_src; |
---|
| 675 | #else |
---|
[300] | 676 | rank=status.MPI_SOURCE ; |
---|
[1601] | 677 | #endif |
---|
[300] | 678 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 679 | buffer=new char[count] ; |
---|
[1158] | 680 | MPI_Irecv((void*)buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ; |
---|
[490] | 681 | recept=true ; |
---|
[300] | 682 | } |
---|
| 683 | } |
---|
| 684 | else |
---|
| 685 | { |
---|
[347] | 686 | traceOff() ; |
---|
[300] | 687 | MPI_Test(&request,&flag,&status) ; |
---|
[347] | 688 | traceOn() ; |
---|
[300] | 689 | if (flag==true) |
---|
| 690 | { |
---|
[1665] | 691 | |
---|
| 692 | #ifdef _usingEP |
---|
| 693 | rank=status.ep_src; |
---|
| 694 | #else |
---|
[300] | 695 | rank=status.MPI_SOURCE ; |
---|
[1601] | 696 | #endif |
---|
[300] | 697 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
[1158] | 698 | recvContextMessage((void*)buffer,count) ; |
---|
| 699 | delete [] buffer ; |
---|
[490] | 700 | recept=false ; |
---|
[300] | 701 | } |
---|
| 702 | } |
---|
| 703 | } |
---|
[490] | 704 | |
---|
[300] | 705 | void CServer::recvContextMessage(void* buff,int count) |
---|
| 706 | { |
---|
[983] | 707 | static map<string,contextMessage> recvContextId; |
---|
[300] | 708 | map<string,contextMessage>::iterator it ; |
---|
| 709 | CBufferIn buffer(buff,count) ; |
---|
| 710 | string id ; |
---|
| 711 | int clientLeader ; |
---|
| 712 | int nbMessage ; |
---|
| 713 | |
---|
| 714 | buffer>>id>>nbMessage>>clientLeader ; |
---|
[490] | 715 | |
---|
[300] | 716 | it=recvContextId.find(id) ; |
---|
| 717 | if (it==recvContextId.end()) |
---|
[490] | 718 | { |
---|
[300] | 719 | contextMessage msg={0,0} ; |
---|
| 720 | pair<map<string,contextMessage>::iterator,bool> ret ; |
---|
| 721 | ret=recvContextId.insert(pair<string,contextMessage>(id,msg)) ; |
---|
| 722 | it=ret.first ; |
---|
[490] | 723 | } |
---|
[300] | 724 | it->second.nbRecv+=1 ; |
---|
| 725 | it->second.leaderRank+=clientLeader ; |
---|
[490] | 726 | |
---|
[300] | 727 | if (it->second.nbRecv==nbMessage) |
---|
[490] | 728 | { |
---|
[300] | 729 | int size ; |
---|
| 730 | MPI_Comm_size(intraComm,&size) ; |
---|
[1148] | 731 | // MPI_Request* requests= new MPI_Request[size-1] ; |
---|
| 732 | // MPI_Status* status= new MPI_Status[size-1] ; |
---|
| 733 | MPI_Request* requests= new MPI_Request[size] ; |
---|
| 734 | MPI_Status* status= new MPI_Status[size] ; |
---|
[490] | 735 | |
---|
[1148] | 736 | CMessage msg ; |
---|
| 737 | msg<<id<<it->second.leaderRank; |
---|
| 738 | int messageSize=msg.size() ; |
---|
| 739 | void * sendBuff = new char[messageSize] ; |
---|
| 740 | CBufferOut sendBuffer(sendBuff,messageSize) ; |
---|
| 741 | sendBuffer<<msg ; |
---|
| 742 | |
---|
| 743 | // Include root itself in order not to have a divergence |
---|
| 744 | for(int i=0; i<size; i++) |
---|
[300] | 745 | { |
---|
[1352] | 746 | MPI_Isend(sendBuff,sendBuffer.count(),MPI_CHAR,i,2,intraComm,&requests[i]) ; |
---|
[300] | 747 | } |
---|
| 748 | |
---|
| 749 | recvContextId.erase(it) ; |
---|
| 750 | delete [] requests ; |
---|
| 751 | delete [] status ; |
---|
| 752 | |
---|
| 753 | } |
---|
[490] | 754 | } |
---|
| 755 | |
---|
[300] | 756 | void CServer::listenRootContext(void) |
---|
| 757 | { |
---|
| 758 | MPI_Status status ; |
---|
| 759 | int flag ; |
---|
[1270] | 760 | static std::vector<void*> buffers; |
---|
| 761 | static std::vector<MPI_Request> requests ; |
---|
| 762 | static std::vector<int> counts ; |
---|
[1271] | 763 | static std::vector<bool> isEventRegistered ; |
---|
[1323] | 764 | static std::vector<bool> isEventQueued ; |
---|
[1270] | 765 | MPI_Request request; |
---|
| 766 | |
---|
[300] | 767 | int rank ; |
---|
| 768 | const int root=0 ; |
---|
[1148] | 769 | boost::hash<string> hashString; |
---|
| 770 | size_t hashId = hashString("RegisterContext"); |
---|
[490] | 771 | |
---|
[1270] | 772 | // (1) Receive context id from the root, save it into a buffer |
---|
[1271] | 773 | traceOff() ; |
---|
| 774 | MPI_Iprobe(root,2,intraComm, &flag, &status) ; |
---|
| 775 | traceOn() ; |
---|
| 776 | if (flag==true) |
---|
[300] | 777 | { |
---|
[1271] | 778 | counts.push_back(0); |
---|
| 779 | MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ; |
---|
| 780 | buffers.push_back(new char[counts.back()]) ; |
---|
[1601] | 781 | MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&request) ; |
---|
[1271] | 782 | requests.push_back(request); |
---|
| 783 | isEventRegistered.push_back(false); |
---|
[1323] | 784 | isEventQueued.push_back(false); |
---|
[1271] | 785 | nbContexts++; |
---|
[300] | 786 | } |
---|
[1271] | 787 | |
---|
| 788 | for (int ctxNb = 0; ctxNb < nbContexts; ctxNb++ ) |
---|
[300] | 789 | { |
---|
[1271] | 790 | // (2) If context id is received, register an event |
---|
[1601] | 791 | if(!isEventRegistered[ctxNb]) MPI_Test(&requests[ctxNb],&flag,&status) ; |
---|
[1271] | 792 | if (flag==true && !isEventRegistered[ctxNb]) |
---|
[300] | 793 | { |
---|
[1271] | 794 | eventScheduler->registerEvent(ctxNb,hashId); |
---|
| 795 | isEventRegistered[ctxNb] = true; |
---|
[300] | 796 | } |
---|
[1271] | 797 | // (3) If event has been scheduled, call register context |
---|
[1323] | 798 | if (eventScheduler->queryEvent(ctxNb,hashId) && !isEventQueued[ctxNb]) |
---|
[1271] | 799 | { |
---|
| 800 | registerContext(buffers[ctxNb],counts[ctxNb]) ; |
---|
[1323] | 801 | isEventQueued[ctxNb] = true; |
---|
[1271] | 802 | delete [] buffers[ctxNb] ; |
---|
| 803 | } |
---|
[300] | 804 | } |
---|
[1271] | 805 | |
---|
[490] | 806 | } |
---|
| 807 | |
---|
[655] | 808 | void CServer::registerContext(void* buff, int count, int leaderRank) |
---|
[300] | 809 | { |
---|
| 810 | string contextId; |
---|
[655] | 811 | CBufferIn buffer(buff, count); |
---|
[1148] | 812 | // buffer >> contextId; |
---|
| 813 | buffer >> contextId>>leaderRank; |
---|
[983] | 814 | CContext* context; |
---|
[300] | 815 | |
---|
[1646] | 816 | #pragma omp critical (_output) |
---|
| 817 | { |
---|
| 818 | info(20) << "CServer : Register new Context : " << contextId << endl; |
---|
| 819 | } |
---|
[490] | 820 | |
---|
[680] | 821 | if (contextList.find(contextId) != contextList.end()) |
---|
| 822 | ERROR("void CServer::registerContext(void* buff, int count, int leaderRank)", |
---|
| 823 | << "Context '" << contextId << "' has already been registred"); |
---|
[490] | 824 | |
---|
[983] | 825 | context=CContext::create(contextId); |
---|
[655] | 826 | contextList[contextId]=context; |
---|
| 827 | |
---|
[1148] | 828 | // Primary or classical server: create communication channel with a client |
---|
| 829 | // (1) create interComm (with a client) |
---|
| 830 | // (2) initialize client and server (contextClient and contextServer) |
---|
[1071] | 831 | MPI_Comm inter; |
---|
[1054] | 832 | if (serverLevel < 2) |
---|
| 833 | { |
---|
| 834 | MPI_Comm contextInterComm; |
---|
| 835 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); |
---|
| 836 | MPI_Intercomm_merge(contextInterComm,1,&inter); |
---|
| 837 | MPI_Barrier(inter); |
---|
| 838 | context->initServer(intraComm,contextInterComm); |
---|
| 839 | contextInterComms.push_back(contextInterComm); |
---|
[1071] | 840 | |
---|
[1601] | 841 | MPI_Comm_free(&inter); |
---|
[1054] | 842 | } |
---|
[1148] | 843 | // Secondary server: create communication channel with a primary server |
---|
| 844 | // (1) duplicate interComm with a primary server |
---|
| 845 | // (2) initialize client and server (contextClient and contextServer) |
---|
| 846 | // Remark: in the case of the secondary server there is no need to create an interComm calling MPI_Intercomm_create, |
---|
| 847 | // because interComm of CContext is defined on the same processes as the interComm of CServer. |
---|
| 848 | // So just duplicate it. |
---|
[1054] | 849 | else if (serverLevel == 2) |
---|
| 850 | { |
---|
[1071] | 851 | MPI_Comm_dup(interCommLeft.front(), &inter); |
---|
| 852 | contextInterComms.push_back(inter); |
---|
| 853 | context->initServer(intraComm, contextInterComms.back()); |
---|
[1054] | 854 | } |
---|
| 855 | |
---|
[1148] | 856 | // Primary server: |
---|
| 857 | // (1) send create context message to secondary servers |
---|
| 858 | // (2) initialize communication channels with secondary servers (create contextClient and contextServer) |
---|
[1021] | 859 | if (serverLevel == 1) |
---|
[983] | 860 | { |
---|
[1054] | 861 | int i = 0, size; |
---|
| 862 | MPI_Comm_size(intraComm, &size) ; |
---|
| 863 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) |
---|
| 864 | { |
---|
| 865 | StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); |
---|
[1352] | 866 | CMessage msg; |
---|
| 867 | int messageSize; |
---|
[1077] | 868 | msg<<str<<size<<rank_ ; |
---|
[1054] | 869 | messageSize = msg.size() ; |
---|
| 870 | buff = new char[messageSize] ; |
---|
| 871 | CBufferOut buffer(buff,messageSize) ; |
---|
| 872 | buffer<<msg ; |
---|
[1152] | 873 | MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ; |
---|
[1071] | 874 | MPI_Comm_dup(*it, &inter); |
---|
| 875 | contextInterComms.push_back(inter); |
---|
| 876 | MPI_Comm_dup(intraComm, &inter); |
---|
| 877 | contextIntraComms.push_back(inter); |
---|
| 878 | context->initClient(contextIntraComms.back(), contextInterComms.back()) ; |
---|
[1054] | 879 | delete [] buff ; |
---|
| 880 | } |
---|
[983] | 881 | } |
---|
[490] | 882 | } |
---|
| 883 | |
---|
[1377] | 884 | void CServer::contextEventLoop(bool enableEventsProcessing /*= true*/) |
---|
[300] | 885 | { |
---|
[1130] | 886 | bool isFinalized ; |
---|
[300] | 887 | map<string,CContext*>::iterator it ; |
---|
[983] | 888 | |
---|
[490] | 889 | for(it=contextList.begin();it!=contextList.end();it++) |
---|
[300] | 890 | { |
---|
[1130] | 891 | isFinalized=it->second->isFinalized(); |
---|
| 892 | if (isFinalized) |
---|
[300] | 893 | { |
---|
| 894 | contextList.erase(it) ; |
---|
| 895 | break ; |
---|
| 896 | } |
---|
[1054] | 897 | else |
---|
[1377] | 898 | it->second->checkBuffersAndListen(enableEventsProcessing); |
---|
[300] | 899 | } |
---|
| 900 | } |
---|
[490] | 901 | |
---|
[1148] | 902 | //! Get rank of the current process in the intraComm |
---|
[490] | 903 | int CServer::getRank() |
---|
| 904 | { |
---|
[1167] | 905 | int rank; |
---|
| 906 | MPI_Comm_rank(intraComm,&rank); |
---|
| 907 | return rank; |
---|
[490] | 908 | } |
---|
| 909 | |
---|
[1168] | 910 | vector<int>& CServer::getSecondaryServerGlobalRanks() |
---|
| 911 | { |
---|
| 912 | return sndServerGlobalRanks; |
---|
| 913 | } |
---|
| 914 | |
---|
[523] | 915 | /*! |
---|
| 916 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
| 917 | * The file name will be suffix+rank+extension. |
---|
| 918 | * |
---|
| 919 | * \param fileName[in] protype file name |
---|
| 920 | * \param ext [in] extension of the file |
---|
| 921 | * \param fb [in/out] the file buffer |
---|
| 922 | */ |
---|
| 923 | void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
| 924 | { |
---|
| 925 | StdStringStream fileNameClient; |
---|
| 926 | int numDigit = 0; |
---|
| 927 | int size = 0; |
---|
[1021] | 928 | int id; |
---|
[523] | 929 | MPI_Comm_size(CXios::globalComm, &size); |
---|
| 930 | while (size) |
---|
| 931 | { |
---|
| 932 | size /= 10; |
---|
| 933 | ++numDigit; |
---|
| 934 | } |
---|
[1167] | 935 | id = rank_; //getRank(); |
---|
[497] | 936 | |
---|
[1021] | 937 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << id << ext; |
---|
[523] | 938 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
| 939 | if (!fb->is_open()) |
---|
| 940 | ERROR("void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
[1542] | 941 | << std::endl << "Can not open <" << fileNameClient.str() << "> file to write the server log(s)."); |
---|
[523] | 942 | } |
---|
[490] | 943 | |
---|
[523] | 944 | /*! |
---|
| 945 | * \brief Open a file stream to write the info logs |
---|
| 946 | * Open a file stream with a specific file name suffix+rank |
---|
| 947 | * to write the info logs. |
---|
| 948 | * \param fileName [in] protype file name |
---|
| 949 | */ |
---|
| 950 | void CServer::openInfoStream(const StdString& fileName) |
---|
| 951 | { |
---|
| 952 | std::filebuf* fb = m_infoStream.rdbuf(); |
---|
| 953 | openStream(fileName, ".out", fb); |
---|
[490] | 954 | |
---|
[523] | 955 | info.write2File(fb); |
---|
| 956 | report.write2File(fb); |
---|
| 957 | } |
---|
[490] | 958 | |
---|
[523] | 959 | //! Write the info logs to standard output |
---|
| 960 | void CServer::openInfoStream() |
---|
| 961 | { |
---|
| 962 | info.write2StdOut(); |
---|
| 963 | report.write2StdOut(); |
---|
| 964 | } |
---|
[490] | 965 | |
---|
[523] | 966 | //! Close the info logs file if it opens |
---|
| 967 | void CServer::closeInfoStream() |
---|
| 968 | { |
---|
| 969 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
| 970 | } |
---|
| 971 | |
---|
| 972 | /*! |
---|
| 973 | * \brief Open a file stream to write the error log |
---|
| 974 | * Open a file stream with a specific file name suffix+rank |
---|
| 975 | * to write the error log. |
---|
| 976 | * \param fileName [in] protype file name |
---|
| 977 | */ |
---|
| 978 | void CServer::openErrorStream(const StdString& fileName) |
---|
| 979 | { |
---|
| 980 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
| 981 | openStream(fileName, ".err", fb); |
---|
| 982 | |
---|
| 983 | error.write2File(fb); |
---|
| 984 | } |
---|
| 985 | |
---|
| 986 | //! Write the error log to standard error output |
---|
| 987 | void CServer::openErrorStream() |
---|
| 988 | { |
---|
| 989 | error.write2StdErr(); |
---|
| 990 | } |
---|
| 991 | |
---|
| 992 | //! Close the error log file if it opens |
---|
| 993 | void CServer::closeErrorStream() |
---|
| 994 | { |
---|
| 995 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
| 996 | } |
---|
[300] | 997 | } |
---|