1 | #include "xios_spl.hpp" |
---|
2 | #include "context_client.hpp" |
---|
3 | #include "context_server.hpp" |
---|
4 | #include "event_client.hpp" |
---|
5 | #include "buffer_out.hpp" |
---|
6 | #include "buffer_client.hpp" |
---|
7 | #include "type.hpp" |
---|
8 | #include "event_client.hpp" |
---|
9 | #include "context.hpp" |
---|
10 | #include "mpi.hpp" |
---|
11 | #include "timer.hpp" |
---|
12 | #include "cxios.hpp" |
---|
13 | #include "server.hpp" |
---|
14 | |
---|
15 | namespace xios |
---|
16 | { |
---|
17 | /*! |
---|
18 | \param [in] parent Pointer to context on client side |
---|
19 | \param [in] intraComm_ communicator of group client |
---|
20 | \param [in] interComm_ communicator of group server |
---|
21 | \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). |
---|
22 | */ |
---|
23 | CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) |
---|
24 | : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) |
---|
25 | { |
---|
26 | context = parent; |
---|
27 | intraComm = intraComm_; |
---|
28 | interComm = interComm_; |
---|
29 | MPI_Comm_rank(intraComm, &clientRank); |
---|
30 | MPI_Comm_size(intraComm, &clientSize); |
---|
31 | |
---|
32 | int flag; |
---|
33 | MPI_Comm_test_inter(interComm, &flag); |
---|
34 | if (flag) MPI_Comm_remote_size(interComm, &serverSize); |
---|
35 | else MPI_Comm_size(interComm, &serverSize); |
---|
36 | |
---|
37 | computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); |
---|
38 | |
---|
39 | timeLine = 0; |
---|
40 | } |
---|
41 | |
---|
42 | void CContextClient::computeLeader(int clientRank, int clientSize, int serverSize, |
---|
43 | std::list<int>& rankRecvLeader, |
---|
44 | std::list<int>& rankRecvNotLeader) |
---|
45 | { |
---|
46 | if ((0 == clientSize) || (0 == serverSize)) return; |
---|
47 | |
---|
48 | if (clientSize < serverSize) |
---|
49 | { |
---|
50 | int serverByClient = serverSize / clientSize; |
---|
51 | int remain = serverSize % clientSize; |
---|
52 | int rankStart = serverByClient * clientRank; |
---|
53 | |
---|
54 | if (clientRank < remain) |
---|
55 | { |
---|
56 | serverByClient++; |
---|
57 | rankStart += clientRank; |
---|
58 | } |
---|
59 | else |
---|
60 | rankStart += remain; |
---|
61 | |
---|
62 | for (int i = 0; i < serverByClient; i++) |
---|
63 | rankRecvLeader.push_back(rankStart + i); |
---|
64 | |
---|
65 | rankRecvNotLeader.resize(0); |
---|
66 | } |
---|
67 | else |
---|
68 | { |
---|
69 | int clientByServer = clientSize / serverSize; |
---|
70 | int remain = clientSize % serverSize; |
---|
71 | |
---|
72 | if (clientRank < (clientByServer + 1) * remain) |
---|
73 | { |
---|
74 | if (clientRank % (clientByServer + 1) == 0) |
---|
75 | rankRecvLeader.push_back(clientRank / (clientByServer + 1)); |
---|
76 | else |
---|
77 | rankRecvNotLeader.push_back(clientRank / (clientByServer + 1)); |
---|
78 | } |
---|
79 | else |
---|
80 | { |
---|
81 | int rank = clientRank - (clientByServer + 1) * remain; |
---|
82 | if (rank % clientByServer == 0) |
---|
83 | rankRecvLeader.push_back(remain + rank / clientByServer); |
---|
84 | else |
---|
85 | rankRecvNotLeader.push_back(remain + rank / clientByServer); |
---|
86 | } |
---|
87 | } |
---|
88 | } |
---|
89 | |
---|
90 | /*! |
---|
91 | In case of attached mode, the current context must be reset to context for client |
---|
92 | \param [in] event Event sent to server |
---|
93 | */ |
---|
94 | void CContextClient::sendEvent(CEventClient& event) |
---|
95 | { |
---|
96 | list<int> ranks = event.getRanks(); |
---|
97 | |
---|
98 | if (CXios::checkEventSync) |
---|
99 | { |
---|
100 | int typeId, classId, typeId_in, classId_in ; |
---|
101 | size_t timeLine_out; |
---|
102 | typeId_in=event.getTypeId() ; |
---|
103 | classId_in=event.getClassId() ; |
---|
104 | // MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 |
---|
105 | MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, intraComm) ; |
---|
106 | MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
107 | MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
108 | if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) |
---|
109 | { |
---|
110 | ERROR("void CContextClient::sendEvent(CEventClient& event)", |
---|
111 | << "Event are not coherent between client."); |
---|
112 | } |
---|
113 | } |
---|
114 | |
---|
115 | if (!event.isEmpty()) |
---|
116 | { |
---|
117 | list<int> sizes = event.getSizes(); |
---|
118 | |
---|
119 | // We force the getBuffers call to be non-blocking on classical servers |
---|
120 | list<CBufferOut*> buffList; |
---|
121 | bool couldBuffer = getBuffers(ranks, sizes, buffList, (!CXios::isClient && (CServer::serverLevel == 0) )); |
---|
122 | // bool couldBuffer = getBuffers(ranks, sizes, buffList, CXios::isServer ); |
---|
123 | |
---|
124 | if (couldBuffer) |
---|
125 | { |
---|
126 | event.send(timeLine, sizes, buffList); |
---|
127 | |
---|
128 | checkBuffers(ranks); |
---|
129 | |
---|
130 | if (isAttachedModeEnabled()) // couldBuffer is always true in attached mode |
---|
131 | { |
---|
132 | waitEvent(ranks); |
---|
133 | CContext::setCurrent(context->getId()); |
---|
134 | } |
---|
135 | } |
---|
136 | else |
---|
137 | { |
---|
138 | tmpBufferedEvent.ranks = ranks; |
---|
139 | tmpBufferedEvent.sizes = sizes; |
---|
140 | |
---|
141 | for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++) |
---|
142 | tmpBufferedEvent.buffers.push_back(new CBufferOut(*it)); |
---|
143 | info(100)<<"DEBUG : temporaly event created : timeline "<<timeLine<<endl ; |
---|
144 | event.send(timeLine, tmpBufferedEvent.sizes, tmpBufferedEvent.buffers); |
---|
145 | } |
---|
146 | } |
---|
147 | |
---|
148 | timeLine++; |
---|
149 | } |
---|
150 | |
---|
151 | /*! |
---|
152 | * Send the temporarily buffered event (if any). |
---|
153 | * |
---|
154 | * \return true if a temporarily buffered event could be sent, false otherwise |
---|
155 | */ |
---|
156 | bool CContextClient::sendTemporarilyBufferedEvent() |
---|
157 | { |
---|
158 | bool couldSendTmpBufferedEvent = false; |
---|
159 | |
---|
160 | if (hasTemporarilyBufferedEvent()) |
---|
161 | { |
---|
162 | list<CBufferOut*> buffList; |
---|
163 | if (getBuffers(tmpBufferedEvent.ranks, tmpBufferedEvent.sizes, buffList, true)) // Non-blocking call |
---|
164 | { |
---|
165 | list<CBufferOut*>::iterator it, itBuffer; |
---|
166 | |
---|
167 | for (it = tmpBufferedEvent.buffers.begin(), itBuffer = buffList.begin(); it != tmpBufferedEvent.buffers.end(); it++, itBuffer++) |
---|
168 | (*itBuffer)->put((char*)(*it)->start(), (*it)->count()); |
---|
169 | |
---|
170 | info(100)<<"DEBUG : temporaly event sent "<<endl ; |
---|
171 | checkBuffers(tmpBufferedEvent.ranks); |
---|
172 | |
---|
173 | tmpBufferedEvent.clear(); |
---|
174 | |
---|
175 | couldSendTmpBufferedEvent = true; |
---|
176 | } |
---|
177 | } |
---|
178 | |
---|
179 | return couldSendTmpBufferedEvent; |
---|
180 | } |
---|
181 | |
---|
182 | /*! |
---|
183 | If client is also server (attached mode), after sending event, it should process right away |
---|
184 | the incoming event. |
---|
185 | \param [in] ranks list rank of server connected this client |
---|
186 | */ |
---|
187 | void CContextClient::waitEvent(list<int>& ranks) |
---|
188 | { |
---|
189 | parentServer->server->setPendingEvent(); |
---|
190 | while (checkBuffers(ranks)) |
---|
191 | { |
---|
192 | parentServer->server->listen(); |
---|
193 | parentServer->server->checkPendingRequest(); |
---|
194 | } |
---|
195 | |
---|
196 | while (parentServer->server->hasPendingEvent()) |
---|
197 | { |
---|
198 | parentServer->server->eventLoop(); |
---|
199 | } |
---|
200 | } |
---|
201 | |
---|
202 | /*! |
---|
203 | * Get buffers for each connection to the servers. This function blocks until there is enough room in the buffers unless |
---|
204 | * it is explicitly requested to be non-blocking. |
---|
205 | * |
---|
206 | * \param [in] serverList list of rank of connected server |
---|
207 | * \param [in] sizeList size of message corresponding to each connection |
---|
208 | * \param [out] retBuffers list of buffers that can be used to store an event |
---|
209 | * \param [in] nonBlocking whether this function should be non-blocking |
---|
210 | * \return whether the already allocated buffers could be used |
---|
211 | */ |
---|
212 | bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, |
---|
213 | bool nonBlocking /*= false*/) |
---|
214 | { |
---|
215 | list<int>::const_iterator itServer, itSize; |
---|
216 | list<CClientBuffer*> bufferList; |
---|
217 | map<int,CClientBuffer*>::const_iterator it; |
---|
218 | list<CClientBuffer*>::iterator itBuffer; |
---|
219 | bool areBuffersFree; |
---|
220 | |
---|
221 | for (itServer = serverList.begin(); itServer != serverList.end(); itServer++) |
---|
222 | { |
---|
223 | it = buffers.find(*itServer); |
---|
224 | if (it == buffers.end()) |
---|
225 | { |
---|
226 | newBuffer(*itServer); |
---|
227 | it = buffers.find(*itServer); |
---|
228 | } |
---|
229 | bufferList.push_back(it->second); |
---|
230 | } |
---|
231 | |
---|
232 | CTimer::get("Blocking time").resume(); |
---|
233 | do |
---|
234 | { |
---|
235 | areBuffersFree = true; |
---|
236 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
237 | areBuffersFree &= (*itBuffer)->isBufferFree(*itSize); |
---|
238 | |
---|
239 | if (!areBuffersFree) |
---|
240 | { |
---|
241 | checkBuffers(); |
---|
242 | if (CServer::serverLevel == 0) |
---|
243 | context->server->listen(); |
---|
244 | |
---|
245 | else if (CServer::serverLevel == 1) |
---|
246 | { |
---|
247 | context->server->listen(); |
---|
248 | for (int i = 0; i < context->serverPrimServer.size(); ++i) |
---|
249 | context->serverPrimServer[i]->listen(); |
---|
250 | CServer::contextEventLoop(false) ; // avoid dead-lock at finalize... |
---|
251 | } |
---|
252 | |
---|
253 | else if (CServer::serverLevel == 2) |
---|
254 | context->server->listen(); |
---|
255 | |
---|
256 | } |
---|
257 | } while (!areBuffersFree && !nonBlocking); |
---|
258 | |
---|
259 | CTimer::get("Blocking time").suspend(); |
---|
260 | |
---|
261 | if (areBuffersFree) |
---|
262 | { |
---|
263 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
264 | retBuffers.push_back((*itBuffer)->getBuffer(*itSize)); |
---|
265 | } |
---|
266 | |
---|
267 | return areBuffersFree; |
---|
268 | } |
---|
269 | |
---|
270 | /*! |
---|
271 | Make a new buffer for a certain connection to server with specific rank |
---|
272 | \param [in] rank rank of connected server |
---|
273 | */ |
---|
274 | void CContextClient::newBuffer(int rank) |
---|
275 | { |
---|
276 | if (!mapBufferSize_.count(rank)) |
---|
277 | { |
---|
278 | error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; |
---|
279 | mapBufferSize_[rank] = CXios::minBufferSize; |
---|
280 | maxEventSizes[rank] = CXios::minBufferSize; |
---|
281 | } |
---|
282 | CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxEventSizes[rank], maxBufferedEvents); |
---|
283 | // Notify the server |
---|
284 | CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize)); |
---|
285 | bufOut->put(mapBufferSize_[rank]); // Stupid C++ |
---|
286 | buffer->checkBuffer(); |
---|
287 | } |
---|
288 | |
---|
289 | /*! |
---|
290 | Verify state of buffers. Buffer is under pending state if there is no message on it |
---|
291 | \return state of buffers, pending(true), ready(false) |
---|
292 | */ |
---|
293 | bool CContextClient::checkBuffers(void) |
---|
294 | { |
---|
295 | map<int,CClientBuffer*>::iterator itBuff; |
---|
296 | bool pending = false; |
---|
297 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
298 | pending |= itBuff->second->checkBuffer(); |
---|
299 | return pending; |
---|
300 | } |
---|
301 | |
---|
302 | //! Release all buffers |
---|
303 | void CContextClient::releaseBuffers() |
---|
304 | { |
---|
305 | map<int,CClientBuffer*>::iterator itBuff; |
---|
306 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
307 | { |
---|
308 | delete itBuff->second; |
---|
309 | } |
---|
310 | buffers.clear(); |
---|
311 | } |
---|
312 | |
---|
313 | /*! |
---|
314 | Verify state of buffers corresponding to a connection |
---|
315 | \param [in] ranks list rank of server to which client connects to |
---|
316 | \return state of buffers, pending(true), ready(false) |
---|
317 | */ |
---|
318 | bool CContextClient::checkBuffers(list<int>& ranks) |
---|
319 | { |
---|
320 | list<int>::iterator it; |
---|
321 | bool pending = false; |
---|
322 | for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(); |
---|
323 | return pending; |
---|
324 | } |
---|
325 | |
---|
326 | /*! |
---|
327 | * Set the buffer size for each connection. Warning: This function is collective. |
---|
328 | * |
---|
329 | * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer |
---|
330 | * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event |
---|
331 | */ |
---|
332 | void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize) |
---|
333 | { |
---|
334 | mapBufferSize_ = mapSize; |
---|
335 | maxEventSizes = maxEventSize; |
---|
336 | |
---|
337 | // Compute the maximum number of events that can be safely buffered. |
---|
338 | double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max(); |
---|
339 | for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it) |
---|
340 | { |
---|
341 | double ratio = double(it->second) / maxEventSizes[it->first]; |
---|
342 | if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; |
---|
343 | } |
---|
344 | MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); |
---|
345 | |
---|
346 | if (minBufferSizeEventSizeRatio < 1.0) |
---|
347 | { |
---|
348 | ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)", |
---|
349 | << "The buffer sizes and the maximum events sizes are incoherent."); |
---|
350 | } |
---|
351 | else if (minBufferSizeEventSizeRatio == std::numeric_limits<double>::max()) |
---|
352 | minBufferSizeEventSizeRatio = 1.0; // In this case, maxBufferedEvents will never be used but we want to avoid any floating point exception |
---|
353 | |
---|
354 | maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server |
---|
355 | + size_t(minBufferSizeEventSizeRatio) // one local buffer can always be fully used |
---|
356 | + 1; // the other local buffer might contain only one event |
---|
357 | } |
---|
358 | |
---|
359 | /*! |
---|
360 | Get leading server in the group of connected server |
---|
361 | \return ranks of leading servers |
---|
362 | */ |
---|
363 | const std::list<int>& CContextClient::getRanksServerNotLeader(void) const |
---|
364 | { |
---|
365 | return ranksServerNotLeader; |
---|
366 | } |
---|
367 | |
---|
368 | /*! |
---|
369 | Check if client connects to leading server |
---|
370 | \return connected(true), not connected (false) |
---|
371 | */ |
---|
372 | bool CContextClient::isServerNotLeader(void) const |
---|
373 | { |
---|
374 | return !ranksServerNotLeader.empty(); |
---|
375 | } |
---|
376 | |
---|
377 | /*! |
---|
378 | Get leading server in the group of connected server |
---|
379 | \return ranks of leading servers |
---|
380 | */ |
---|
381 | const std::list<int>& CContextClient::getRanksServerLeader(void) const |
---|
382 | { |
---|
383 | return ranksServerLeader; |
---|
384 | } |
---|
385 | |
---|
386 | /*! |
---|
387 | Check if client connects to leading server |
---|
388 | \return connected(true), not connected (false) |
---|
389 | */ |
---|
390 | bool CContextClient::isServerLeader(void) const |
---|
391 | { |
---|
392 | return !ranksServerLeader.empty(); |
---|
393 | } |
---|
394 | |
---|
395 | /*! |
---|
396 | * Check if the attached mode is used. |
---|
397 | * |
---|
398 | * \return true if and only if attached mode is used |
---|
399 | */ |
---|
400 | bool CContextClient::isAttachedModeEnabled() const |
---|
401 | { |
---|
402 | return (parentServer != 0); |
---|
403 | } |
---|
404 | |
---|
405 | /*! |
---|
406 | * Finalize context client and do some reports. Function is non-blocking. |
---|
407 | */ |
---|
408 | void CContextClient::finalize(void) |
---|
409 | { |
---|
410 | map<int,CClientBuffer*>::iterator itBuff; |
---|
411 | bool stop = false; |
---|
412 | |
---|
413 | CTimer::get("Blocking time").resume(); |
---|
414 | while (hasTemporarilyBufferedEvent()) |
---|
415 | { |
---|
416 | checkBuffers(); |
---|
417 | sendTemporarilyBufferedEvent(); |
---|
418 | } |
---|
419 | CTimer::get("Blocking time").suspend(); |
---|
420 | |
---|
421 | CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE); |
---|
422 | if (isServerLeader()) |
---|
423 | { |
---|
424 | CMessage msg; |
---|
425 | const std::list<int>& ranks = getRanksServerLeader(); |
---|
426 | for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) |
---|
427 | { |
---|
428 | info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; |
---|
429 | event.push(*itRank, 1, msg); |
---|
430 | } |
---|
431 | sendEvent(event); |
---|
432 | } |
---|
433 | else sendEvent(event); |
---|
434 | |
---|
435 | CTimer::get("Blocking time").resume(); |
---|
436 | // while (!stop) |
---|
437 | { |
---|
438 | checkBuffers(); |
---|
439 | if (hasTemporarilyBufferedEvent()) |
---|
440 | sendTemporarilyBufferedEvent(); |
---|
441 | |
---|
442 | stop = true; |
---|
443 | // for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop &= !itBuff->second->hasPendingRequest(); |
---|
444 | } |
---|
445 | CTimer::get("Blocking time").suspend(); |
---|
446 | |
---|
447 | std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
448 | iteMap = mapBufferSize_.end(), itMap; |
---|
449 | |
---|
450 | StdSize totalBuf = 0; |
---|
451 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
452 | { |
---|
453 | report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl |
---|
454 | << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; |
---|
455 | totalBuf += itMap->second; |
---|
456 | } |
---|
457 | report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; |
---|
458 | |
---|
459 | //releaseBuffers(); // moved to CContext::finalize() |
---|
460 | } |
---|
461 | |
---|
462 | |
---|
463 | /*! |
---|
464 | */ |
---|
465 | bool CContextClient::havePendingRequests(void) |
---|
466 | { |
---|
467 | bool pending = false; |
---|
468 | map<int,CClientBuffer*>::iterator itBuff; |
---|
469 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
470 | pending |= itBuff->second->hasPendingRequest(); |
---|
471 | return pending; |
---|
472 | } |
---|
473 | |
---|
474 | |
---|
475 | } |
---|