source: XIOS/trunk/src/context_client.cpp @ 1031

Last change on this file since 1031 was 988, checked in by mhnguyen, 8 years ago

Correcting a bug on reading non-distributed data in server mode.

+) Non distributed data read by all servers then transfered back to ALL clients

Test
+) On Curie
+) Work

  • Property copyright set to
    Software name : XIOS (Xml I/O Server)
    http://forge.ipsl.jussieu.fr/ioserver
    Creation date : January 2009
    Licence : CeCCIL version2
    see license file in root directory : Licence_CeCILL_V2-en.txt
    or http://www.cecill.info/licences/Licence_CeCILL_V2-en.html
    Holder : CEA/LSCE (Laboratoire des Sciences du CLimat et de l'Environnement)
    CNRS/IPSL (Institut Pierre Simon Laplace)
    Project Manager : Yann Meurdesoif
    yann.meurdesoif@cea.fr
  • Property svn:eol-style set to native
File size: 11.1 KB
RevLine 
[591]1#include "xios_spl.hpp"
[300]2#include "context_client.hpp"
3#include "context_server.hpp"
4#include "event_client.hpp"
5#include "buffer_out.hpp"
6#include "buffer_client.hpp"
7#include "type.hpp"
8#include "event_client.hpp"
9#include "context.hpp"
[382]10#include "mpi.hpp"
[347]11#include "timer.hpp"
[401]12#include "cxios.hpp"
[300]13
[335]14namespace xios
[300]15{
[512]16    /*!
17    \param [in] parent Pointer to context on client side
18    \param [in] intraComm_ communicator of group client
19    \param [in] interComm_ communicator of group server
20    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode)
21    */
[595]22    CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer)
[917]23     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
[300]24    {
[595]25      context = parent;
26      intraComm = intraComm_;
27      interComm = interComm_;
28      MPI_Comm_rank(intraComm, &clientRank);
29      MPI_Comm_size(intraComm, &clientSize);
[509]30
[595]31      int flag;
32      MPI_Comm_test_inter(interComm, &flag);
33      if (flag) MPI_Comm_remote_size(interComm, &serverSize);
34      else  MPI_Comm_size(interComm, &serverSize);
[509]35
[595]36      if (clientSize < serverSize)
37      {
38        int serverByClient = serverSize / clientSize;
39        int remain = serverSize % clientSize;
40        int rankStart = serverByClient * clientRank;
[300]41
[595]42        if (clientRank < remain)
43        {
44          serverByClient++;
45          rankStart += clientRank;
46        }
47        else
48          rankStart += remain;
49
50        for (int i = 0; i < serverByClient; i++)
51          ranksServerLeader.push_back(rankStart + i);
[988]52
53        ranksServerNotLeader.resize(0);
[595]54      }
55      else
56      {
57        int clientByServer = clientSize / serverSize;
58        int remain = clientSize % serverSize;
59
60        if (clientRank < (clientByServer + 1) * remain)
61        {
62          if (clientRank % (clientByServer + 1) == 0)
63            ranksServerLeader.push_back(clientRank / (clientByServer + 1));
[988]64          else
65            ranksServerNotLeader.push_back(clientRank / (clientByServer + 1));
[595]66        }
67        else
68        {
69          int rank = clientRank - (clientByServer + 1) * remain;
70          if (rank % clientByServer == 0)
71            ranksServerLeader.push_back(remain + rank / clientByServer);
[988]72          else
73            ranksServerNotLeader.push_back(remain + rank / clientByServer);
74        }       
[595]75      }
76
77      timeLine = 0;
[300]78    }
79
[512]80    /*!
81    In case of attached mode, the current context must be reset to context for client
82    \param [in] event Event sent to server
83    */
[300]84    void CContextClient::sendEvent(CEventClient& event)
85    {
[731]86      list<int> ranks = event.getRanks();
[595]87      if (!event.isEmpty())
[300]88      {
[731]89        list<int> sizes = event.getSizes();
[300]90
[595]91        list<CBufferOut*> buffList = getBuffers(ranks, sizes);
[509]92
[731]93        event.send(timeLine, sizes, buffList);
94
[595]95        checkBuffers(ranks);
[300]96      }
97
[704]98      if (isAttachedModeEnabled())
[511]99      {
100        waitEvent(ranks);
101        CContext::setCurrent(context->getId());
102      }
103
[595]104      timeLine++;
[300]105    }
[509]106
[512]107    /*!
108    If client is also server (attached mode), after sending event, it should process right away
109    the incoming event.
110    \param [in] ranks list rank of server connected this client
111    */
[300]112    void CContextClient::waitEvent(list<int>& ranks)
113    {
[595]114      parentServer->server->setPendingEvent();
115      while (checkBuffers(ranks))
[300]116      {
[595]117        parentServer->server->listen();
118        parentServer->server->checkPendingRequest();
[300]119      }
[386]120
[595]121      while (parentServer->server->hasPendingEvent())
[386]122      {
[595]123       parentServer->server->eventLoop();
[386]124      }
[300]125    }
126
[512]127    /*!
128    Setup buffer for each connection to server and verify their state to put content into them
129    \param [in] serverList list of rank of connected server
130    \param [in] sizeList size of message corresponding to each connection
131    \return List of buffer input which event can be placed
132    */
[300]133    list<CBufferOut*> CContextClient::getBuffers(list<int>& serverList, list<int>& sizeList)
134    {
[595]135      list<int>::iterator itServer, itSize;
136      list<CClientBuffer*> bufferList;
137      map<int,CClientBuffer*>::iterator it;
138      list<CClientBuffer*>::iterator itBuffer;
139      list<CBufferOut*>  retBuffer;
[884]140      bool areBuffersFree;
[300]141
[595]142      for (itServer = serverList.begin(); itServer != serverList.end(); itServer++)
[300]143      {
[595]144        it = buffers.find(*itServer);
145        if (it == buffers.end())
[300]146        {
[595]147          newBuffer(*itServer);
148          it = buffers.find(*itServer);
[509]149        }
[595]150        bufferList.push_back(it->second);
[300]151      }
[347]152
153      CTimer::get("Blocking time").resume();
[884]154      do
[300]155      {
[884]156        areBuffersFree = true;
[595]157        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[884]158          areBuffersFree &= (*itBuffer)->isBufferFree(*itSize);
159
160        if (!areBuffersFree)
[300]161        {
[884]162          checkBuffers();
163          context->server->listen();
[300]164        }
[884]165      } while (!areBuffersFree);
[347]166      CTimer::get("Blocking time").suspend();
167
[595]168      for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[300]169      {
[595]170        retBuffer.push_back((*itBuffer)->getBuffer(*itSize));
[300]171      }
[595]172      return retBuffer;
[300]173   }
[509]174
[512]175   /*!
176   Make a new buffer for a certain connection to server with specific rank
177   \param [in] rank rank of connected server
178   */
[300]179   void CContextClient::newBuffer(int rank)
180   {
[724]181      if (!mapBufferSize_.count(rank))
182      {
183        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl;
184        mapBufferSize_[rank] = CXios::minBufferSize;
185      }
[917]186      CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxBufferedEvents);
[725]187      // Notify the server
188      CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize));
189      bufOut->put(mapBufferSize_[rank]); // Stupid C++
190      buffer->checkBuffer();
[509]191   }
[300]192
[512]193   /*!
194   Verify state of buffers. Buffer is under pending state if there is no message on it
195   \return state of buffers, pending(true), ready(false)
196   */
[300]197   bool CContextClient::checkBuffers(void)
198   {
[595]199      map<int,CClientBuffer*>::iterator itBuff;
200      bool pending = false;
201      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) pending |= itBuff->second->checkBuffer();
202      return pending;
[509]203   }
[300]204
[512]205   //! Release all buffers
[300]206   void CContextClient::releaseBuffers(void)
207   {
[595]208      map<int,CClientBuffer*>::iterator itBuff;
209      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) delete itBuff->second;
[509]210   }
[300]211
[512]212   /*!
213   Verify state of buffers corresponding to a connection
214   \param [in] ranks list rank of server to which client connects to
215   \return state of buffers, pending(true), ready(false)
216   */
[300]217   bool CContextClient::checkBuffers(list<int>& ranks)
218   {
[595]219      list<int>::iterator it;
220      bool pending = false;
221      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer();
222      return pending;
[509]223   }
[300]224
[512]225   /*!
[917]226    * Set the buffer size for each connection. Warning: This function is collective.
227    *
228    * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer
229    * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event
[512]230   */
[917]231   void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)
[509]232   {
233     mapBufferSize_ = mapSize;
[917]234
235     // Compute the maximum number of events that can be safely buffered.
236     double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max();
237     for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it)
238     {
239       double ratio = double(it->second) / maxEventSize.at(it->first);
240       if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio;
241     }
242     MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
243
244     if (minBufferSizeEventSizeRatio < 1.0)
245       ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)",
246             << "The buffer sizes and the maximum events sizes are incoherent.");
247
248     maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server
249                          + size_t(minBufferSizeEventSizeRatio)  // one local buffer can always be fully used
250                          + 1;                                   // the other local buffer might contain only one event
[509]251   }
252
[595]253  /*!
254  Get leading server in the group of connected server
255  \return ranks of leading servers
256  */
[988]257  const std::list<int>& CContextClient::getRanksServerNotLeader(void) const
258  {
259    return ranksServerNotLeader;
260  }
261
262  /*!
263  Check if client connects to leading server
264  \return connected(true), not connected (false)
265  */
266  bool CContextClient::isServerNotLeader(void) const
267  {
268    return !ranksServerNotLeader.empty();
269  }
270
271  /*!
272  Get leading server in the group of connected server
273  \return ranks of leading servers
274  */
[595]275  const std::list<int>& CContextClient::getRanksServerLeader(void) const
276  {
277    return ranksServerLeader;
278  }
[509]279
[595]280  /*!
281  Check if client connects to leading server
282  \return connected(true), not connected (false)
283  */
284  bool CContextClient::isServerLeader(void) const
285  {
286    return !ranksServerLeader.empty();
287  }
[300]288
[704]289  /*!
290   * Check if the attached mode is used.
291   *
292   * \return true if and only if attached mode is used
293   */
294  bool CContextClient::isAttachedModeEnabled() const
295  {
296    return (parentServer != 0);
297  }
[697]298
[512]299   /*!
300   Finalize context client and do some reports
301   */
[300]302   void CContextClient::finalize(void)
303   {
[595]304     map<int,CClientBuffer*>::iterator itBuff;
305     bool stop = true;
[731]306
[595]307     CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE);
[300]308     if (isServerLeader())
309     {
[595]310       CMessage msg;
311       const std::list<int>& ranks = getRanksServerLeader();
312       for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)
313         event.push(*itRank, 1, msg);
314       sendEvent(event);
[300]315     }
[595]316     else sendEvent(event);
[509]317
[347]318     CTimer::get("Blocking time").resume();
[595]319     while (stop)
[300]320     {
[595]321       checkBuffers();
322       stop = false;
323       for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop |= itBuff->second->hasPendingRequest();
[300]324     }
[347]325     CTimer::get("Blocking time").suspend();
[509]326
[595]327     std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(),
328                                           iteMap = mapBufferSize_.end(), itMap;
[511]329     StdSize totalBuf = 0;
330     for (itMap = itbMap; itMap != iteMap; ++itMap)
331     {
[595]332       report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl
333                  << "  +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;
[511]334       totalBuf += itMap->second;
335     }
[595]336     report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;
[511]337
[595]338     releaseBuffers();
[300]339   }
[509]340}
Note: See TracBrowser for help on using the repository browser.