Changeset 2019 for XIOS/dev/dev_trunk_graph/src/client.cpp
- Timestamp:
- 01/22/21 12:00:29 (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_graph/src/client.cpp
r1590 r2019 12 12 #include "buffer_client.hpp" 13 13 #include "string_tools.hpp" 14 #include "ressources_manager.hpp" 15 #include "services_manager.hpp" 16 #include <functional> 17 #include <cstdio> 18 #include "workflow_graph.hpp" 14 19 15 20 namespace xios 16 21 { 17 22 23 const double serverPublishDefaultTimeout=10; 24 18 25 MPI_Comm CClient::intraComm ; 19 26 MPI_Comm CClient::interComm ; 27 MPI_Comm CClient::clientsComm_ ; 28 20 29 std::list<MPI_Comm> CClient::contextInterComms; 21 30 int CClient::serverLeader ; … … 24 33 StdOFStream CClient::m_infoStream; 25 34 StdOFStream CClient::m_errorStream; 35 CPoolRessource* CClient::poolRessource_=nullptr ; 36 26 37 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 27 38 … … 37 48 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 38 49 { 50 51 MPI_Comm clientComm ; 52 // initialize MPI if not initialized 39 53 int initialized ; 40 54 MPI_Initialized(&initialized) ; 41 55 if (initialized) is_MPI_Initialized=true ; 42 56 else is_MPI_Initialized=false ; 43 int rank ; 44 45 // don't use OASIS 46 if (!CXios::usingOasis) 47 { 48 // localComm isn't given 49 if (localComm == MPI_COMM_NULL) 50 { 57 58 MPI_Comm globalComm=CXios::getGlobalComm() ; 59 60 ///////////////////////////////////////// 61 ///////////// PART 1 //////////////////// 62 ///////////////////////////////////////// 63 64 65 // localComm isn't given 66 if (localComm == MPI_COMM_NULL) 67 { 68 69 // don't use OASIS 70 if (!CXios::usingOasis) 71 { 72 51 73 if (!is_MPI_Initialized) 52 74 { … … 54 76 } 55 77 CTimer::get("XIOS").resume() ; 56 CTimer::get("XIOS init/finalize").resume() ; 57 boost::hash<string> hashString ; 58 59 unsigned long hashClient=hashString(codeId) ; 60 unsigned long hashServer=hashString(CXios::xiosCodeId) ; 61 unsigned long* hashAll ; 62 int size ; 63 int myColor ; 64 int i,c ; 65 MPI_Comm newComm ; 66 67 MPI_Comm_size(CXios::globalComm,&size) ; 68 MPI_Comm_rank(CXios::globalComm,&rank_); 69 70 hashAll=new unsigned long[size] ; 71 72 MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; 73 74 map<unsigned long, int> colors ; 75 map<unsigned long, int> leaders ; 76 77 for(i=0,c=0;i<size;i++) 78 CTimer::get("XIOS init/finalize",false).resume() ; 79 80 // split the global communicator 81 // get hash from all model to attribute a unique color (int) and then split to get client communicator 82 // every mpi process of globalComm (MPI_COMM_WORLD) must participate 83 84 int commRank, commSize ; 85 MPI_Comm_rank(globalComm,&commRank) ; 86 MPI_Comm_size(globalComm,&commSize) ; 87 88 std::hash<string> hashString ; 89 size_t hashClient=hashString(codeId) ; 90 91 size_t* hashAll = new size_t[commSize] ; 92 MPI_Allgather(&hashClient,1,MPI_UNSIGNED_LONG,hashAll,1,MPI_LONG,globalComm) ; 93 94 int color=0 ; 95 set<size_t> listHash ; 96 for(int i=0 ; i<=commRank ; i++) 97 if (listHash.count(hashAll[i])==0) 98 { 99 listHash.insert(hashAll[i]) ; 100 color=color+1 ; 101 } 102 delete[] hashAll ; 103 104 MPI_Comm_split(globalComm, color, commRank, &clientComm) ; 105 } 106 else // using oasis to split communicator 107 { 108 if (!is_MPI_Initialized) oasis_init(codeId) ; 109 oasis_get_localcomm(clientComm) ; 110 } 111 } 112 else // localComm is given 113 { 114 MPI_Comm_dup(localComm,&clientComm) ; 115 } 116 117 118 ///////////////////////////////////////// 119 ///////////// PART 2 //////////////////// 120 ///////////////////////////////////////// 121 122 123 // Create the XIOS communicator for every process which is related 124 // to XIOS, as well on client side as on server side 125 126 MPI_Comm xiosGlobalComm ; 127 string strIds=CXios::getin<string>("clients_code_id","") ; 128 vector<string> clientsCodeId=splitRegex(strIds,"\\s*,\\s*") ; 129 if (strIds.empty()) 130 { 131 // no code Ids given, suppose XIOS initialisation is global 132 int commRank, commGlobalRank, serverLeader, clientLeader,serverRemoteLeader,clientRemoteLeader ; 133 MPI_Comm splitComm,interComm ; 134 MPI_Comm_rank(globalComm,&commGlobalRank) ; 135 MPI_Comm_split(globalComm, 0, commGlobalRank, &splitComm) ; 136 int splitCommSize, globalCommSize ; 137 138 MPI_Comm_size(splitComm,&splitCommSize) ; 139 MPI_Comm_size(globalComm,&globalCommSize) ; 140 if (splitCommSize==globalCommSize) // no server 141 { 142 MPI_Comm_dup(globalComm,&xiosGlobalComm) ; 143 CXios::setXiosComm(xiosGlobalComm) ; 144 } 145 else 146 { 147 MPI_Comm_rank(splitComm,&commRank) ; 148 if (commRank==0) clientLeader=commGlobalRank ; 149 else clientLeader=0 ; 150 serverLeader=0 ; 151 MPI_Allreduce(&clientLeader,&clientRemoteLeader,1,MPI_INT,MPI_SUM,globalComm) ; 152 MPI_Allreduce(&serverLeader,&serverRemoteLeader,1,MPI_INT,MPI_SUM,globalComm) ; 153 MPI_Intercomm_create(splitComm, 0, globalComm, serverRemoteLeader,1341,&interComm) ; 154 MPI_Intercomm_merge(interComm,true,&xiosGlobalComm) ; 155 CXios::setXiosComm(xiosGlobalComm) ; 156 } 157 } 158 else 159 { 160 161 xiosGlobalCommByFileExchange(clientComm, codeId) ; 162 163 } 164 165 int commRank ; 166 MPI_Comm_rank(CXios::getXiosComm(), &commRank) ; 167 MPI_Comm_split(CXios::getXiosComm(),false,commRank, &clientsComm_) ; 168 169 // is using server or not ? 170 int xiosCommSize, clientsCommSize ; 171 MPI_Comm_size(CXios::getXiosComm(), &xiosCommSize) ; 172 MPI_Comm_size(clientsComm_, &clientsCommSize) ; 173 if (xiosCommSize==clientsCommSize) CXios::setUsingServer() ; 174 else CXios::setNotUsingServer() ; 175 176 177 CXios::setGlobalRegistry(new CRegistry(clientsComm_)) ; 178 ///////////////////////////////////////// 179 ///////////// PART 3 //////////////////// 180 ///////////////////////////////////////// 181 182 CXios::launchDaemonsManager(false) ; 183 poolRessource_ = new CPoolRessource(clientComm, codeId) ; 184 185 ///////////////////////////////////////// 186 ///////////// PART 4 //////////////////// 187 ///////////////////////////////////////// 188 189 returnComm = clientComm ; 190 } 191 192 193 void CClient::xiosGlobalCommByFileExchange(MPI_Comm clientComm, const string& codeId) 194 { 195 196 MPI_Comm globalComm=CXios::getGlobalComm() ; 197 MPI_Comm xiosGlobalComm ; 198 199 string strIds=CXios::getin<string>("clients_code_id","") ; 200 vector<string> clientsCodeId=splitRegex(strIds,"\\s*,\\s*") ; 201 202 int commRank, globalRank, clientRank, serverRank ; 203 MPI_Comm_rank(clientComm, &commRank) ; 204 MPI_Comm_rank(globalComm, &globalRank) ; 205 string clientFileName("__xios_publisher::"+codeId+"__to_remove__") ; 206 207 int error ; 208 209 if (commRank==0) // if root process publish name 210 { 211 std::ofstream ofs (clientFileName, std::ofstream::out); 212 ofs<<globalRank ; 213 ofs.close(); 214 215 // get server root rank 216 217 std::ifstream ifs ; 218 string fileName=("__xios_publisher::"+CXios::xiosCodeId+"__to_remove__") ; 219 220 double timeout = CXios::getin<double>("server_puplish_timeout",serverPublishDefaultTimeout) ; 221 double time ; 222 223 do 224 { 225 CTimer::get("server_publish_timeout").resume() ; 226 ifs.clear() ; 227 ifs.open(fileName, std::ifstream::in) ; 228 CTimer::get("server_publish_timeout").suspend() ; 229 } while (ifs.fail() && CTimer::get("server_publish_timeout").getCumulatedTime()<timeout) ; 230 231 if (CTimer::get("server_publish_timeout").getCumulatedTime()>=timeout || ifs.fail()) 232 { 233 ifs.clear() ; 234 ifs.close() ; 235 ifs.clear() ; 236 error=true ; 237 } 238 else 239 { 240 ifs>>serverRank ; 241 ifs.close() ; 242 error=false ; 243 } 244 245 } 246 247 MPI_Bcast(&error,1,MPI_INT,0,clientComm) ; 248 249 if (error==false) // you have a server 250 { 251 MPI_Comm intraComm ; 252 MPI_Comm_dup(clientComm,&intraComm) ; 253 MPI_Comm interComm ; 254 255 int pos=0 ; 256 for(int i=0 ; codeId!=clientsCodeId[i]; i++) pos=pos+1 ; 257 258 bool high=true ; 259 for(int i=pos ; i<clientsCodeId.size(); i++) 260 { 261 MPI_Intercomm_create(intraComm, 0, globalComm, serverRank, 3141, &interComm); 262 MPI_Comm_free(&intraComm) ; 263 MPI_Intercomm_merge(interComm,high, &intraComm ) ; 264 high=false ; 265 } 266 xiosGlobalComm=intraComm ; 267 } 268 else // no server detected 269 { 270 vector<int> clientsRank(clientsCodeId.size()) ; 271 272 if (commRank==0) 273 { 274 for(int i=0;i<clientsRank.size();i++) 78 275 { 79 if (colors.find(hashAll[i])==colors.end()) 276 std::ifstream ifs ; 277 string fileName=("__xios_publisher::"+clientsCodeId[i]+"__to_remove__") ; 278 do 80 279 { 81 colors[hashAll[i]] =c ; 82 leaders[hashAll[i]]=i ; 83 c++ ; 280 ifs.clear() ; 281 ifs.open(fileName, std::ifstream::in) ; 282 } while (ifs.fail()) ; 283 ifs>>clientsRank[i] ; 284 ifs.close() ; 285 } 286 } 287 288 int client ; 289 MPI_Comm intraComm ; 290 MPI_Comm_dup(clientComm,&intraComm) ; 291 MPI_Comm interComm ; 292 293 int pos=0 ; 294 for(int i=0 ; codeId!=clientsCodeId[i]; i++) pos=pos+1 ; 295 296 bool high=true ; 297 for(int i=pos+1 ; i<clientsCodeId.size(); i++) 298 { 299 if (codeId==clientsCodeId[0]) // first model play the server rule 300 { 301 MPI_Intercomm_create(intraComm, 0, globalComm, clientsRank[i], 3141, &interComm); 302 MPI_Intercomm_merge(interComm,false, &intraComm ) ; 303 } 304 else 305 { 306 MPI_Intercomm_create(intraComm, 0, globalComm, clientsRank[0], 3141, &interComm); 307 MPI_Intercomm_merge(interComm,high, &intraComm ) ; 308 high=false ; 309 } 310 } 311 xiosGlobalComm=intraComm ; 312 } 313 314 MPI_Barrier(xiosGlobalComm); 315 if (commRank==0) std::remove(clientFileName.c_str()) ; 316 MPI_Barrier(xiosGlobalComm); 317 318 CXios::setXiosComm(xiosGlobalComm) ; 319 320 MPI_Comm commUnfree ; 321 MPI_Comm_dup(clientComm, &commUnfree ) ; 322 323 } 324 325 // to check on other architecture 326 void CClient::xiosGlobalCommByPublishing(MPI_Comm clientComm, const string& codeId) 327 { 328 329 // untested. need to be developped an a true MPI compliant library 330 331 /* 332 // try to discover other client/server 333 // do you have a xios server ? 334 char portName[MPI_MAX_PORT_NAME]; 335 int ierr ; 336 int commRank ; 337 MPI_Comm_rank(clientComm,&commRank) ; 338 339 MPI_Barrier(globalComm) ; 340 if (commRank==0) 341 { 342 343 MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); 344 const char* serviceName=CXios::xiosCodeId.c_str() ; 345 ierr=MPI_Lookup_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); 346 MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); 347 } 348 ierr=MPI_SUCCESS ; 349 MPI_Bcast(&ierr,1,MPI_INT,0,clientComm) ; 350 351 if (ierr==MPI_SUCCESS) // you have a server 352 { 353 MPI_Comm intraComm=clientComm ; 354 MPI_Comm interComm ; 355 for(int i=0 ; i<clientsCodeId.size(); i++) 356 { 357 MPI_Comm_connect(portName, MPI_INFO_NULL, 0, intraComm, &interComm); 358 MPI_Intercomm_merge(interComm, true, &intraComm ) ; 359 } 360 xiosGlobalComm=intraComm ; 361 } 362 else // you don't have any server 363 { 364 if (codeId==clientsCodeId[0]) // first code will publish his name 365 { 366 367 if (commRank==0) // if root process publish name 368 { 369 MPI_Open_port(MPI_INFO_NULL, portName); 370 MPI_Publish_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); 371 } 372 373 MPI_Comm intraComm=clientComm ; 374 MPI_Comm interComm ; 375 for(int i=0 ; i<clientsCodeId.size()-1; i++) 376 { 377 MPI_Comm_accept(portName, MPI_INFO_NULL, 0, intraComm, &interComm); 378 MPI_Intercomm_merge(interComm,false, &intraComm ) ; 84 379 } 85 380 } 86 87 // Verify whether we are on server mode or not 88 CXios::setNotUsingServer(); 89 for (i=0; i < size; ++i) 381 else // other clients are connecting to the first one 90 382 { 91 if ( hashServer == hashAll[i])383 if (commRank==0) 92 384 { 93 CXios::setUsingServer(); 94 break; 385 386 MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); 387 ierr=MPI_Lookup_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); 388 MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); 389 } 390 391 MPI_Bcast(&ierr,1,MPI_INT,0,clientComm) ; 392 393 if (ierr==MPI_SUCCESS) // you can connect 394 { 395 MPI_Comm intraComm=clientComm ; 396 MPI_Comm interComm ; 397 for(int i=0 ; i<clientsCodeId.size()-1; i++) 398 { 399 MPI_Comm_connect(portName, MPI_INFO_NULL, 0, intraComm, &interComm); 400 MPI_Intercomm_merge(interComm, true, &intraComm ) ; 401 } 402 xiosGlobalComm=intraComm ; 95 403 } 96 404 } 97 98 myColor=colors[hashClient]; 99 MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 100 101 if (CXios::usingServer) 102 { 103 int clientLeader=leaders[hashClient] ; 104 serverLeader=leaders[hashServer] ; 105 int intraCommSize, intraCommRank ; 106 MPI_Comm_size(intraComm,&intraCommSize) ; 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 109 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 110 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 111 //rank_ = intraCommRank; 112 } 113 else 114 { 115 MPI_Comm_dup(intraComm,&interComm) ; 116 } 117 delete [] hashAll ; 118 } 119 // localComm argument is given 120 else 121 { 122 if (CXios::usingServer) 123 { 124 //ERROR("void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm)", << " giving a local communictor is not compatible with using server mode") ; 125 } 126 else 127 { 128 MPI_Comm_dup(localComm,&intraComm) ; 129 MPI_Comm_dup(intraComm,&interComm) ; 130 } 131 } 132 } 133 // using OASIS 134 else 135 { 136 // localComm isn't given 137 if (localComm == MPI_COMM_NULL) 138 { 139 if (!is_MPI_Initialized) oasis_init(codeId) ; 140 oasis_get_localcomm(localComm) ; 141 } 142 MPI_Comm_dup(localComm,&intraComm) ; 143 144 CTimer::get("XIOS").resume() ; 145 CTimer::get("XIOS init/finalize").resume() ; 146 147 if (CXios::usingServer) 148 { 149 MPI_Status status ; 150 MPI_Comm_rank(intraComm,&rank_) ; 151 152 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 153 if (rank_==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; 154 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; 155 } 156 else MPI_Comm_dup(intraComm,&interComm) ; 157 } 158 159 MPI_Comm_dup(intraComm,&returnComm) ; 160 } 405 } 406 */ 407 } 408 161 409 162 410 ///--------------------------------------------------------------- … … 170 418 void CClient::registerContext(const string& id, MPI_Comm contextComm) 171 419 { 172 CContext::setCurrent(id) ; 173 CContext* context=CContext::create(id); 174 StdString idServer(id); 175 idServer += "_server"; 176 177 if (CXios::isServer && !context->hasServer) 178 // Attached mode 179 { 180 MPI_Comm contextInterComm ; 181 MPI_Comm_dup(contextComm,&contextInterComm) ; 182 CContext* contextServer = CContext::create(idServer); 183 184 // Firstly, initialize context on client side 185 context->initClient(contextComm,contextInterComm, contextServer); 186 187 // Secondly, initialize context on server side 188 contextServer->initServer(contextComm,contextInterComm, context); 189 190 // Finally, we should return current context to context client 191 CContext::setCurrent(id); 192 193 contextInterComms.push_back(contextInterComm); 194 } 195 else 196 { 197 int size,rank,globalRank ; 198 size_t message_size ; 199 int leaderRank ; 200 MPI_Comm contextInterComm ; 201 202 MPI_Comm_size(contextComm,&size) ; 203 MPI_Comm_rank(contextComm,&rank) ; 204 MPI_Comm_rank(CXios::globalComm,&globalRank) ; 205 if (rank!=0) globalRank=0 ; 206 207 CMessage msg ; 208 msg<<idServer<<size<<globalRank ; 209 // msg<<id<<size<<globalRank ; 210 211 int messageSize=msg.size() ; 212 char * buff = new char[messageSize] ; 213 CBufferOut buffer((void*)buff,messageSize) ; 214 buffer<<msg ; 215 216 MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 217 218 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 219 info(10)<<"Register new Context : "<<id<<endl ; 220 MPI_Comm inter ; 221 MPI_Intercomm_merge(contextInterComm,0,&inter) ; 222 MPI_Barrier(inter) ; 223 224 context->initClient(contextComm,contextInterComm) ; 225 226 contextInterComms.push_back(contextInterComm); 227 MPI_Comm_free(&inter); 228 delete [] buff ; 229 230 } 231 } 420 int commRank, commSize ; 421 MPI_Comm_rank(contextComm,&commRank) ; 422 MPI_Comm_size(contextComm,&commSize) ; 423 424 getPoolRessource()->createService(contextComm, id, 0, CServicesManager::CLIENT, 1) ; 425 getPoolRessource()->createService(contextComm, CXios::defaultServerId, 0, CServicesManager::IO_SERVER, 1) ; 426 427 if (commRank==0) while (!CXios::getServicesManager()->hasService(getPoolRessource()->getId(), id, 0)) { CXios::getDaemonsManager()->eventLoop();} 428 429 if (commRank==0) CXios::getContextsManager()->createServerContext(getPoolRessource()->getId(), id, 0, id) ; 430 int type=CServicesManager::CLIENT ; 431 string name = CXios::getContextsManager()->getServerContextName(getPoolRessource()->getId(), id, 0, type, id) ; 432 while (!CXios::getContextsManager()->hasContext(name, contextComm) ) 433 { 434 CXios::getDaemonsManager()->eventLoop() ; 435 } 436 437 } 438 439 232 440 233 441 /*! … … 260 468 } 261 469 262 263 470 void CClient::finalize(void) 264 471 { 265 int rank ; 266 int msg=0 ; 267 268 MPI_Comm_rank(intraComm,&rank) ; 269 270 if (!CXios::isServer) 271 { 272 MPI_Comm_rank(intraComm,&rank) ; 273 if (rank==0) 274 { 275 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 276 } 277 } 278 279 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 280 MPI_Comm_free(&(*it)); 281 MPI_Comm_free(&interComm); 282 MPI_Comm_free(&intraComm); 283 284 CTimer::get("XIOS init/finalize").suspend() ; 472 473 MPI_Barrier(clientsComm_) ; 474 int commRank ; 475 MPI_Comm_rank(clientsComm_, &commRank) ; 476 if (commRank==0) CXios::getRessourcesManager()->finalize() ; 477 478 auto globalRegistry=CXios::getGlobalRegistry() ; 479 globalRegistry->hierarchicalGatherRegistry() ; 480 481 if (commRank==0) 482 { 483 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 484 globalRegistry->toFile("xios_registry.bin") ; 485 } 486 delete globalRegistry ; 487 488 CTimer::get("XIOS init/finalize",false).suspend() ; 285 489 CTimer::get("XIOS").suspend() ; 490 491 CXios::finalizeDaemonsManager() ; 286 492 287 493 if (!is_MPI_Initialized) … … 301 507 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 302 508 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 303 } 509 510 CWorkflowGraph::drawWorkFlowGraph_client(); 511 } 512 304 513 305 514 /*! … … 325 534 int size = 0; 326 535 int rank; 327 MPI_Comm_size(CXios::globalComm, &size); 536 MPI_Comm_size(CXios::getGlobalComm(), &size); 537 MPI_Comm_rank(CXios::getGlobalComm(),&rank); 328 538 while (size) 329 539 { … … 332 542 } 333 543 334 if (CXios::usingOasis) 335 { 336 MPI_Comm_rank(CXios::globalComm,&rank); 337 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 338 } 339 else 340 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 341 544 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 342 545 343 546 fb->open(fileNameClient.str().c_str(), std::ios::out);
Note: See TracChangeset
for help on using the changeset viewer.