1 | #include "globalScopeData.hpp" |
---|
2 | #include "xios_spl.hpp" |
---|
3 | #include "cxios.hpp" |
---|
4 | #include "client.hpp" |
---|
5 | #include <boost/functional/hash.hpp> |
---|
6 | #include "type.hpp" |
---|
7 | #include "context.hpp" |
---|
8 | #include "context_client.hpp" |
---|
9 | #include "oasis_cinterface.hpp" |
---|
10 | #include "mpi.hpp" |
---|
11 | #include "timer.hpp" |
---|
12 | #include "buffer_client.hpp" |
---|
13 | #include "string_tools.hpp" |
---|
14 | #include "ressources_manager.hpp" |
---|
15 | #include "services_manager.hpp" |
---|
16 | #include <functional> |
---|
17 | #include <cstdio> |
---|
18 | |
---|
19 | |
---|
20 | namespace xios |
---|
21 | { |
---|
22 | |
---|
23 | const double serverPublishDefaultTimeout=10; |
---|
24 | |
---|
25 | MPI_Comm CClient::intraComm ; |
---|
26 | MPI_Comm CClient::interComm ; |
---|
27 | MPI_Comm CClient::clientsComm_ ; |
---|
28 | |
---|
29 | std::list<MPI_Comm> CClient::contextInterComms; |
---|
30 | int CClient::serverLeader ; |
---|
31 | bool CClient::is_MPI_Initialized ; |
---|
32 | int CClient::rank_ = INVALID_RANK; |
---|
33 | StdOFStream CClient::m_infoStream; |
---|
34 | StdOFStream CClient::m_errorStream; |
---|
35 | CPoolRessource* CClient::poolRessource_=nullptr ; |
---|
36 | |
---|
37 | MPI_Comm& CClient::getInterComm(void) { return (interComm); } |
---|
38 | |
---|
39 | ///--------------------------------------------------------------- |
---|
40 | /*! |
---|
41 | * \fn void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) |
---|
42 | * Function creates intraComm (CClient::intraComm) for client group with id=codeId and interComm (CClient::interComm) between client and server groups. |
---|
43 | * \param [in] codeId identity of context. |
---|
44 | * \param [in/out] localComm local communicator. |
---|
45 | * \param [in/out] returnComm (intra)communicator of client group. |
---|
46 | */ |
---|
47 | |
---|
48 | void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) |
---|
49 | { |
---|
50 | |
---|
51 | MPI_Comm clientComm ; |
---|
52 | // initialize MPI if not initialized |
---|
53 | int initialized ; |
---|
54 | MPI_Initialized(&initialized) ; |
---|
55 | if (initialized) is_MPI_Initialized=true ; |
---|
56 | else is_MPI_Initialized=false ; |
---|
57 | |
---|
58 | MPI_Comm globalComm=CXios::getGlobalComm() ; |
---|
59 | |
---|
60 | ///////////////////////////////////////// |
---|
61 | ///////////// PART 1 //////////////////// |
---|
62 | ///////////////////////////////////////// |
---|
63 | |
---|
64 | |
---|
65 | // localComm isn't given |
---|
66 | if (localComm == MPI_COMM_NULL) |
---|
67 | { |
---|
68 | |
---|
69 | // don't use OASIS |
---|
70 | if (!CXios::usingOasis) |
---|
71 | { |
---|
72 | |
---|
73 | if (!is_MPI_Initialized) |
---|
74 | { |
---|
75 | MPI_Init(NULL, NULL); |
---|
76 | } |
---|
77 | CTimer::get("XIOS").resume() ; |
---|
78 | CTimer::get("XIOS init/finalize",false).resume() ; |
---|
79 | |
---|
80 | // split the global communicator |
---|
81 | // get hash from all model to attribute a unique color (int) and then split to get client communicator |
---|
82 | // every mpi process of globalComm (MPI_COMM_WORLD) must participate |
---|
83 | |
---|
84 | int commRank, commSize ; |
---|
85 | MPI_Comm_rank(globalComm,&commRank) ; |
---|
86 | MPI_Comm_size(globalComm,&commSize) ; |
---|
87 | |
---|
88 | std::hash<string> hashString ; |
---|
89 | size_t hashClient=hashString(codeId) ; |
---|
90 | |
---|
91 | size_t* hashAll = new size_t[commSize] ; |
---|
92 | MPI_Allgather(&hashClient,1,MPI_UNSIGNED_LONG,hashAll,1,MPI_LONG,globalComm) ; |
---|
93 | |
---|
94 | int color=0 ; |
---|
95 | set<size_t> listHash ; |
---|
96 | for(int i=0 ; i<=commRank ; i++) |
---|
97 | if (listHash.count(hashAll[i])==0) |
---|
98 | { |
---|
99 | listHash.insert(hashAll[i]) ; |
---|
100 | color=color+1 ; |
---|
101 | } |
---|
102 | delete[] hashAll ; |
---|
103 | |
---|
104 | MPI_Comm_split(globalComm, color, commRank, &clientComm) ; |
---|
105 | } |
---|
106 | else // using oasis to split communicator |
---|
107 | { |
---|
108 | if (!is_MPI_Initialized) oasis_init(codeId) ; |
---|
109 | oasis_get_localcomm(clientComm) ; |
---|
110 | } |
---|
111 | } |
---|
112 | else // localComm is given |
---|
113 | { |
---|
114 | MPI_Comm_dup(localComm,&clientComm) ; |
---|
115 | } |
---|
116 | |
---|
117 | |
---|
118 | ///////////////////////////////////////// |
---|
119 | ///////////// PART 2 //////////////////// |
---|
120 | ///////////////////////////////////////// |
---|
121 | |
---|
122 | |
---|
123 | // Create the XIOS communicator for every process which is related |
---|
124 | // to XIOS, as well on client side as on server side |
---|
125 | |
---|
126 | MPI_Comm xiosGlobalComm ; |
---|
127 | string strIds=CXios::getin<string>("clients_code_id","") ; |
---|
128 | vector<string> clientsCodeId=splitRegex(strIds,"\\s*,\\s*") ; |
---|
129 | if (strIds.empty()) |
---|
130 | { |
---|
131 | // no code Ids given, suppose XIOS initialisation is global |
---|
132 | int commRank, commGlobalRank, serverLeader, clientLeader,serverRemoteLeader,clientRemoteLeader ; |
---|
133 | MPI_Comm splitComm,interComm ; |
---|
134 | MPI_Comm_rank(globalComm,&commGlobalRank) ; |
---|
135 | MPI_Comm_split(globalComm, 0, commGlobalRank, &splitComm) ; |
---|
136 | int splitCommSize, globalCommSize ; |
---|
137 | |
---|
138 | MPI_Comm_size(splitComm,&splitCommSize) ; |
---|
139 | MPI_Comm_size(globalComm,&globalCommSize) ; |
---|
140 | if (splitCommSize==globalCommSize) // no server |
---|
141 | { |
---|
142 | MPI_Comm_dup(globalComm,&xiosGlobalComm) ; |
---|
143 | CXios::setXiosComm(xiosGlobalComm) ; |
---|
144 | } |
---|
145 | else |
---|
146 | { |
---|
147 | MPI_Comm_rank(splitComm,&commRank) ; |
---|
148 | if (commRank==0) clientLeader=commGlobalRank ; |
---|
149 | else clientLeader=0 ; |
---|
150 | serverLeader=0 ; |
---|
151 | MPI_Allreduce(&clientLeader,&clientRemoteLeader,1,MPI_INT,MPI_SUM,globalComm) ; |
---|
152 | MPI_Allreduce(&serverLeader,&serverRemoteLeader,1,MPI_INT,MPI_SUM,globalComm) ; |
---|
153 | MPI_Intercomm_create(splitComm, 0, globalComm, serverRemoteLeader,1341,&interComm) ; |
---|
154 | MPI_Intercomm_merge(interComm,true,&xiosGlobalComm) ; |
---|
155 | CXios::setXiosComm(xiosGlobalComm) ; |
---|
156 | } |
---|
157 | } |
---|
158 | else |
---|
159 | { |
---|
160 | |
---|
161 | xiosGlobalCommByFileExchange(clientComm, codeId) ; |
---|
162 | |
---|
163 | } |
---|
164 | |
---|
165 | int commRank ; |
---|
166 | MPI_Comm_rank(CXios::getXiosComm(), &commRank) ; |
---|
167 | MPI_Comm_split(CXios::getXiosComm(),false,commRank, &clientsComm_) ; |
---|
168 | |
---|
169 | // is using server or not ? |
---|
170 | int xiosCommSize, clientsCommSize ; |
---|
171 | MPI_Comm_size(CXios::getXiosComm(), &xiosCommSize) ; |
---|
172 | MPI_Comm_size(clientsComm_, &clientsCommSize) ; |
---|
173 | if (xiosCommSize==clientsCommSize) CXios::setUsingServer() ; |
---|
174 | else CXios::setNotUsingServer() ; |
---|
175 | |
---|
176 | |
---|
177 | CXios::setGlobalRegistry(new CRegistry(clientsComm_)) ; |
---|
178 | ///////////////////////////////////////// |
---|
179 | ///////////// PART 3 //////////////////// |
---|
180 | ///////////////////////////////////////// |
---|
181 | |
---|
182 | CXios::launchDaemonsManager(false) ; |
---|
183 | poolRessource_ = new CPoolRessource(clientComm, codeId) ; |
---|
184 | |
---|
185 | ///////////////////////////////////////// |
---|
186 | ///////////// PART 4 //////////////////// |
---|
187 | ///////////////////////////////////////// |
---|
188 | |
---|
189 | returnComm = clientComm ; |
---|
190 | } |
---|
191 | |
---|
192 | |
---|
193 | void CClient::xiosGlobalCommByFileExchange(MPI_Comm clientComm, const string& codeId) |
---|
194 | { |
---|
195 | |
---|
196 | MPI_Comm globalComm=CXios::getGlobalComm() ; |
---|
197 | MPI_Comm xiosGlobalComm ; |
---|
198 | |
---|
199 | string strIds=CXios::getin<string>("clients_code_id","") ; |
---|
200 | vector<string> clientsCodeId=splitRegex(strIds,"\\s*,\\s*") ; |
---|
201 | |
---|
202 | int commRank, globalRank, clientRank, serverRank ; |
---|
203 | MPI_Comm_rank(clientComm, &commRank) ; |
---|
204 | MPI_Comm_rank(globalComm, &globalRank) ; |
---|
205 | string clientFileName("__xios_publisher::"+codeId+"__to_remove__") ; |
---|
206 | |
---|
207 | int error ; |
---|
208 | |
---|
209 | if (commRank==0) // if root process publish name |
---|
210 | { |
---|
211 | std::ofstream ofs (clientFileName, std::ofstream::out); |
---|
212 | ofs<<globalRank ; |
---|
213 | ofs.close(); |
---|
214 | |
---|
215 | // get server root rank |
---|
216 | |
---|
217 | std::ifstream ifs ; |
---|
218 | string fileName=("__xios_publisher::"+CXios::xiosCodeId+"__to_remove__") ; |
---|
219 | |
---|
220 | double timeout = CXios::getin<double>("server_puplish_timeout",serverPublishDefaultTimeout) ; |
---|
221 | double time ; |
---|
222 | |
---|
223 | do |
---|
224 | { |
---|
225 | CTimer::get("server_publish_timeout").resume() ; |
---|
226 | ifs.clear() ; |
---|
227 | ifs.open(fileName, std::ifstream::in) ; |
---|
228 | CTimer::get("server_publish_timeout").suspend() ; |
---|
229 | } while (ifs.fail() && CTimer::get("server_publish_timeout").getCumulatedTime()<timeout) ; |
---|
230 | |
---|
231 | if (CTimer::get("server_publish_timeout").getCumulatedTime()>=timeout || ifs.fail()) |
---|
232 | { |
---|
233 | ifs.clear() ; |
---|
234 | ifs.close() ; |
---|
235 | ifs.clear() ; |
---|
236 | error=true ; |
---|
237 | } |
---|
238 | else |
---|
239 | { |
---|
240 | ifs>>serverRank ; |
---|
241 | ifs.close() ; |
---|
242 | error=false ; |
---|
243 | } |
---|
244 | |
---|
245 | } |
---|
246 | |
---|
247 | MPI_Bcast(&error,1,MPI_INT,0,clientComm) ; |
---|
248 | |
---|
249 | if (error==false) // you have a server |
---|
250 | { |
---|
251 | MPI_Comm intraComm ; |
---|
252 | MPI_Comm_dup(clientComm,&intraComm) ; |
---|
253 | MPI_Comm interComm ; |
---|
254 | |
---|
255 | int pos=0 ; |
---|
256 | for(int i=0 ; codeId!=clientsCodeId[i]; i++) pos=pos+1 ; |
---|
257 | |
---|
258 | bool high=true ; |
---|
259 | for(int i=pos ; i<clientsCodeId.size(); i++) |
---|
260 | { |
---|
261 | MPI_Intercomm_create(intraComm, 0, globalComm, serverRank, 3141, &interComm); |
---|
262 | MPI_Comm_free(&intraComm) ; |
---|
263 | MPI_Intercomm_merge(interComm,high, &intraComm ) ; |
---|
264 | high=false ; |
---|
265 | } |
---|
266 | xiosGlobalComm=intraComm ; |
---|
267 | } |
---|
268 | else // no server detected |
---|
269 | { |
---|
270 | vector<int> clientsRank(clientsCodeId.size()) ; |
---|
271 | |
---|
272 | if (commRank==0) |
---|
273 | { |
---|
274 | for(int i=0;i<clientsRank.size();i++) |
---|
275 | { |
---|
276 | std::ifstream ifs ; |
---|
277 | string fileName=("__xios_publisher::"+clientsCodeId[i]+"__to_remove__") ; |
---|
278 | do |
---|
279 | { |
---|
280 | ifs.clear() ; |
---|
281 | ifs.open(fileName, std::ifstream::in) ; |
---|
282 | } while (ifs.fail()) ; |
---|
283 | ifs>>clientsRank[i] ; |
---|
284 | ifs.close() ; |
---|
285 | } |
---|
286 | } |
---|
287 | |
---|
288 | int client ; |
---|
289 | MPI_Comm intraComm ; |
---|
290 | MPI_Comm_dup(clientComm,&intraComm) ; |
---|
291 | MPI_Comm interComm ; |
---|
292 | |
---|
293 | int pos=0 ; |
---|
294 | for(int i=0 ; codeId!=clientsCodeId[i]; i++) pos=pos+1 ; |
---|
295 | |
---|
296 | bool high=true ; |
---|
297 | for(int i=pos+1 ; i<clientsCodeId.size(); i++) |
---|
298 | { |
---|
299 | if (codeId==clientsCodeId[0]) // first model play the server rule |
---|
300 | { |
---|
301 | MPI_Intercomm_create(intraComm, 0, globalComm, clientsRank[i], 3141, &interComm); |
---|
302 | MPI_Intercomm_merge(interComm,false, &intraComm ) ; |
---|
303 | } |
---|
304 | else |
---|
305 | { |
---|
306 | MPI_Intercomm_create(intraComm, 0, globalComm, clientsRank[0], 3141, &interComm); |
---|
307 | MPI_Intercomm_merge(interComm,high, &intraComm ) ; |
---|
308 | high=false ; |
---|
309 | } |
---|
310 | } |
---|
311 | xiosGlobalComm=intraComm ; |
---|
312 | } |
---|
313 | |
---|
314 | MPI_Barrier(xiosGlobalComm); |
---|
315 | if (commRank==0) std::remove(clientFileName.c_str()) ; |
---|
316 | MPI_Barrier(xiosGlobalComm); |
---|
317 | |
---|
318 | CXios::setXiosComm(xiosGlobalComm) ; |
---|
319 | |
---|
320 | MPI_Comm commUnfree ; |
---|
321 | MPI_Comm_dup(clientComm, &commUnfree ) ; |
---|
322 | |
---|
323 | } |
---|
324 | |
---|
325 | // to check on other architecture |
---|
326 | void CClient::xiosGlobalCommByPublishing(MPI_Comm clientComm, const string& codeId) |
---|
327 | { |
---|
328 | |
---|
329 | // untested. need to be developped an a true MPI compliant library |
---|
330 | |
---|
331 | /* |
---|
332 | // try to discover other client/server |
---|
333 | // do you have a xios server ? |
---|
334 | char portName[MPI_MAX_PORT_NAME]; |
---|
335 | int ierr ; |
---|
336 | int commRank ; |
---|
337 | MPI_Comm_rank(clientComm,&commRank) ; |
---|
338 | |
---|
339 | MPI_Barrier(globalComm) ; |
---|
340 | if (commRank==0) |
---|
341 | { |
---|
342 | |
---|
343 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); |
---|
344 | const char* serviceName=CXios::xiosCodeId.c_str() ; |
---|
345 | ierr=MPI_Lookup_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); |
---|
346 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); |
---|
347 | } |
---|
348 | ierr=MPI_SUCCESS ; |
---|
349 | MPI_Bcast(&ierr,1,MPI_INT,0,clientComm) ; |
---|
350 | |
---|
351 | if (ierr==MPI_SUCCESS) // you have a server |
---|
352 | { |
---|
353 | MPI_Comm intraComm=clientComm ; |
---|
354 | MPI_Comm interComm ; |
---|
355 | for(int i=0 ; i<clientsCodeId.size(); i++) |
---|
356 | { |
---|
357 | MPI_Comm_connect(portName, MPI_INFO_NULL, 0, intraComm, &interComm); |
---|
358 | MPI_Intercomm_merge(interComm, true, &intraComm ) ; |
---|
359 | } |
---|
360 | xiosGlobalComm=intraComm ; |
---|
361 | } |
---|
362 | else // you don't have any server |
---|
363 | { |
---|
364 | if (codeId==clientsCodeId[0]) // first code will publish his name |
---|
365 | { |
---|
366 | |
---|
367 | if (commRank==0) // if root process publish name |
---|
368 | { |
---|
369 | MPI_Open_port(MPI_INFO_NULL, portName); |
---|
370 | MPI_Publish_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); |
---|
371 | } |
---|
372 | |
---|
373 | MPI_Comm intraComm=clientComm ; |
---|
374 | MPI_Comm interComm ; |
---|
375 | for(int i=0 ; i<clientsCodeId.size()-1; i++) |
---|
376 | { |
---|
377 | MPI_Comm_accept(portName, MPI_INFO_NULL, 0, intraComm, &interComm); |
---|
378 | MPI_Intercomm_merge(interComm,false, &intraComm ) ; |
---|
379 | } |
---|
380 | } |
---|
381 | else // other clients are connecting to the first one |
---|
382 | { |
---|
383 | if (commRank==0) |
---|
384 | { |
---|
385 | |
---|
386 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); |
---|
387 | ierr=MPI_Lookup_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); |
---|
388 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); |
---|
389 | } |
---|
390 | |
---|
391 | MPI_Bcast(&ierr,1,MPI_INT,0,clientComm) ; |
---|
392 | |
---|
393 | if (ierr==MPI_SUCCESS) // you can connect |
---|
394 | { |
---|
395 | MPI_Comm intraComm=clientComm ; |
---|
396 | MPI_Comm interComm ; |
---|
397 | for(int i=0 ; i<clientsCodeId.size()-1; i++) |
---|
398 | { |
---|
399 | MPI_Comm_connect(portName, MPI_INFO_NULL, 0, intraComm, &interComm); |
---|
400 | MPI_Intercomm_merge(interComm, true, &intraComm ) ; |
---|
401 | } |
---|
402 | xiosGlobalComm=intraComm ; |
---|
403 | } |
---|
404 | } |
---|
405 | } |
---|
406 | */ |
---|
407 | } |
---|
408 | |
---|
409 | |
---|
410 | ///--------------------------------------------------------------- |
---|
411 | /*! |
---|
412 | * \fn void CClient::registerContext(const string& id, MPI_Comm contextComm) |
---|
413 | * \brief Sends a request to create a context to server. Creates client/server contexts. |
---|
414 | * \param [in] id id of context. |
---|
415 | * \param [in] contextComm. |
---|
416 | * Function is only called by client. |
---|
417 | */ |
---|
418 | void CClient::registerContext(const string& id, MPI_Comm contextComm) |
---|
419 | { |
---|
420 | int commRank, commSize ; |
---|
421 | MPI_Comm_rank(contextComm,&commRank) ; |
---|
422 | MPI_Comm_size(contextComm,&commSize) ; |
---|
423 | |
---|
424 | getPoolRessource()->createService(contextComm, id, 0, CServicesManager::CLIENT, 1) ; |
---|
425 | getPoolRessource()->createService(contextComm, CXios::defaultServerId, 0, CServicesManager::IO_SERVER, 1) ; |
---|
426 | |
---|
427 | if (commRank==0) while (!CXios::getServicesManager()->hasService(getPoolRessource()->getId(), id, 0)) { CXios::getDaemonsManager()->eventLoop();} |
---|
428 | |
---|
429 | if (commRank==0) CXios::getContextsManager()->createServerContext(getPoolRessource()->getId(), id, 0, id) ; |
---|
430 | int type=CServicesManager::CLIENT ; |
---|
431 | string name = CXios::getContextsManager()->getServerContextName(getPoolRessource()->getId(), id, 0, type, id) ; |
---|
432 | while (!CXios::getContextsManager()->hasContext(name, contextComm) ) |
---|
433 | { |
---|
434 | CXios::getDaemonsManager()->eventLoop() ; |
---|
435 | } |
---|
436 | |
---|
437 | } |
---|
438 | |
---|
439 | |
---|
440 | |
---|
441 | /*! |
---|
442 | * \fn void CClient::callOasisEnddef(void) |
---|
443 | * \brief Send the order to the servers to call "oasis_enddef". It must be done by each compound of models before calling oasis_enddef on client side |
---|
444 | * Function is only called by client. |
---|
445 | */ |
---|
446 | void CClient::callOasisEnddef(void) |
---|
447 | { |
---|
448 | bool oasisEnddef=CXios::getin<bool>("call_oasis_enddef",true) ; |
---|
449 | if (!oasisEnddef) ERROR("void CClient::callOasisEnddef(void)", <<"Function xios_oasis_enddef called but variable <call_oasis_enddef> is set to false."<<endl |
---|
450 | <<"Variable <call_oasis_enddef> must be set to true"<<endl) ; |
---|
451 | if (CXios::isServer) |
---|
452 | // Attached mode |
---|
453 | { |
---|
454 | // nothing to do |
---|
455 | } |
---|
456 | else |
---|
457 | { |
---|
458 | int rank ; |
---|
459 | int msg=0 ; |
---|
460 | |
---|
461 | MPI_Comm_rank(intraComm,&rank) ; |
---|
462 | if (rank==0) |
---|
463 | { |
---|
464 | MPI_Send(&msg,1,MPI_INT,0,5,interComm) ; // tags oasis_endded = 5 |
---|
465 | } |
---|
466 | |
---|
467 | } |
---|
468 | } |
---|
469 | |
---|
470 | void CClient::finalize(void) |
---|
471 | { |
---|
472 | |
---|
473 | MPI_Barrier(clientsComm_) ; |
---|
474 | int commRank ; |
---|
475 | MPI_Comm_rank(clientsComm_, &commRank) ; |
---|
476 | if (commRank==0) CXios::getRessourcesManager()->finalize() ; |
---|
477 | |
---|
478 | auto globalRegistry=CXios::getGlobalRegistry() ; |
---|
479 | globalRegistry->hierarchicalGatherRegistry() ; |
---|
480 | |
---|
481 | if (commRank==0) |
---|
482 | { |
---|
483 | info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; |
---|
484 | globalRegistry->toFile("xios_registry.bin") ; |
---|
485 | } |
---|
486 | delete globalRegistry ; |
---|
487 | |
---|
488 | CTimer::get("XIOS init/finalize",false).suspend() ; |
---|
489 | CTimer::get("XIOS").suspend() ; |
---|
490 | |
---|
491 | CXios::finalizeDaemonsManager() ; |
---|
492 | |
---|
493 | if (!is_MPI_Initialized) |
---|
494 | { |
---|
495 | if (CXios::usingOasis) oasis_finalize(); |
---|
496 | else MPI_Finalize() ; |
---|
497 | } |
---|
498 | |
---|
499 | info(20) << "Client side context is finalized"<<endl ; |
---|
500 | report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; |
---|
501 | report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; |
---|
502 | report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; |
---|
503 | report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; |
---|
504 | report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; |
---|
505 | // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; |
---|
506 | report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; |
---|
507 | report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; |
---|
508 | report(100)<<CTimer::getAllCumulatedTime()<<endl ; |
---|
509 | |
---|
510 | |
---|
511 | } |
---|
512 | |
---|
513 | |
---|
514 | /*! |
---|
515 | * Return global rank without oasis and current rank in model intraComm in case of oasis |
---|
516 | */ |
---|
517 | int CClient::getRank() |
---|
518 | { |
---|
519 | return rank_; |
---|
520 | } |
---|
521 | |
---|
522 | /*! |
---|
523 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
524 | * The file name will be suffix+rank+extension. |
---|
525 | * |
---|
526 | * \param fileName[in] protype file name |
---|
527 | * \param ext [in] extension of the file |
---|
528 | * \param fb [in/out] the file buffer |
---|
529 | */ |
---|
530 | void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
531 | { |
---|
532 | StdStringStream fileNameClient; |
---|
533 | int numDigit = 0; |
---|
534 | int size = 0; |
---|
535 | int rank; |
---|
536 | MPI_Comm_size(CXios::getGlobalComm(), &size); |
---|
537 | MPI_Comm_rank(CXios::getGlobalComm(),&rank); |
---|
538 | while (size) |
---|
539 | { |
---|
540 | size /= 10; |
---|
541 | ++numDigit; |
---|
542 | } |
---|
543 | |
---|
544 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; |
---|
545 | |
---|
546 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
547 | if (!fb->is_open()) |
---|
548 | ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
549 | << std::endl << "Can not open <" << fileNameClient.str() << "> file to write the client log(s)."); |
---|
550 | } |
---|
551 | |
---|
552 | /*! |
---|
553 | * \brief Open a file stream to write the info logs |
---|
554 | * Open a file stream with a specific file name suffix+rank |
---|
555 | * to write the info logs. |
---|
556 | * \param fileName [in] protype file name |
---|
557 | */ |
---|
558 | void CClient::openInfoStream(const StdString& fileName) |
---|
559 | { |
---|
560 | std::filebuf* fb = m_infoStream.rdbuf(); |
---|
561 | openStream(fileName, ".out", fb); |
---|
562 | |
---|
563 | info.write2File(fb); |
---|
564 | report.write2File(fb); |
---|
565 | } |
---|
566 | |
---|
567 | //! Write the info logs to standard output |
---|
568 | void CClient::openInfoStream() |
---|
569 | { |
---|
570 | info.write2StdOut(); |
---|
571 | report.write2StdOut(); |
---|
572 | } |
---|
573 | |
---|
574 | //! Close the info logs file if it opens |
---|
575 | void CClient::closeInfoStream() |
---|
576 | { |
---|
577 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
578 | } |
---|
579 | |
---|
580 | /*! |
---|
581 | * \brief Open a file stream to write the error log |
---|
582 | * Open a file stream with a specific file name suffix+rank |
---|
583 | * to write the error log. |
---|
584 | * \param fileName [in] protype file name |
---|
585 | */ |
---|
586 | void CClient::openErrorStream(const StdString& fileName) |
---|
587 | { |
---|
588 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
589 | openStream(fileName, ".err", fb); |
---|
590 | |
---|
591 | error.write2File(fb); |
---|
592 | } |
---|
593 | |
---|
594 | //! Write the error log to standard error output |
---|
595 | void CClient::openErrorStream() |
---|
596 | { |
---|
597 | error.write2StdErr(); |
---|
598 | } |
---|
599 | |
---|
600 | //! Close the error log file if it opens |
---|
601 | void CClient::closeErrorStream() |
---|
602 | { |
---|
603 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
604 | } |
---|
605 | } |
---|