source: XIOS/trunk/src/node/distribution_client.cpp @ 551

Last change on this file since 551 was 551, checked in by mhnguyen, 9 years ago

Redesigning grid structure

+) Add an intermediate class to calculate distribution on client and servers
+) Change all index of attributes to zero (0), instead of one(1)

Test
+) On Curie
+) Test new features passes but some data are still shifted

File size: 25.0 KB
Line 
1#include "distribution_client.hpp"
2
3namespace xios {
4
5CDistributionClient::CDistributionClient(int rank, int dims, CArray<size_t,1>* globalIndex)
6   : CDistribution(rank, dims, globalIndex),
7   axisDomainOrder_(), indexGlobalOnServer_(), isConnectedServerComputed_(false)
8{
9}
10
11CDistributionClient::CDistributionClient(int rank, CGrid* grid)
12   : CDistribution(rank, 0, 0), isConnectedServerComputed_(false)
13{
14  readDistributionInfo(grid);
15  createGlobalIndex();
16}
17
18CDistributionClient::~CDistributionClient()
19{
20  if (0 != this->globalIndex_) delete globalIndex_;
21  if (0 != localDataIndex_) delete localDataIndex_;
22}
23
24/*!
25  Read information of a grid to generate distribution.
26  Every grid is composed of several axis or/and domain(s). Their information are processed
27stored and used to calculate index distribution between client and server
28  \param [in] grid Grid to read
29*/
30void CDistributionClient::readDistributionInfo(CGrid* grid)
31{
32  std::vector<CDomain*> domList = grid->getDomains();
33  std::vector<CAxis*> axisList = grid->getAxis();
34  CArray<bool,1>& axisDomainOrder = grid->axisDomainOrder;
35
36  std::vector<CDomain*>::iterator itbDom, iteDom, itDom;
37  std::vector<CAxis*>::iterator itbAxis, iteAxis, itAxis;
38
39  itbDom  = itDom  = domList.begin();  iteDom  = domList.end();
40  itbAxis = itAxis = axisList.begin(); iteAxis = axisList.end();
41
42  // First of all, every attribute of domain and axis should be checked
43  for (;itDom != iteDom; ++itDom) (*itDom)->checkAttributesOnClient();
44  for (;itAxis != iteAxis; ++itAxis) (*itAxis)->checkAttributes();
45
46  // Then check mask of grid
47  grid->checkMask();
48  CArray<bool,3>& gridMask = grid->mask;
49
50  ////////////////////////////////////////////////////////
51
52  int gridDim = domList.size()*2 + axisList.size();
53
54  // For now, just suppose that gridMask is all true, but we need to cope with this problem
55  //  std::vector<std::vector<bool> > gridMask(gridDim);
56//  int idxDomain = 0, idxAxis = 0;
57//  for (int i = 0; i < axisDomainOrder.size(); ++i)
58//  {
59//    if (axisDomainOrder(i))
60//    {
61//      gridMask[idxDomain*2+i].resize(domList[idxDomain]->ni);
62//      gridMask[idxDomain*2+i+1].resize(domList[idxDomain]->nj);
63//      ++idxDomain;
64//    }
65//    else
66//    {
67//      gridMask[i].resize(axisList[idxAxis]->ni);
68//      ++idxAxis;
69//    }
70//  }
71
72  readDistributionInfo(domList, axisList, axisDomainOrder, gridMask);
73}
74
75/*!
76  Read information from domain(s) and axis to generate distribution.
77  All information related to domain, e.g ibegin, jbegin, ni, nj, ni_glo, nj_glo
78as well as related to axis, e.g dataNIndex, dataIndex will be stored to compute
79the distribution between clients and servers. Till now, every data structure of domain has been kept
80like before, e.g: data_n_index to make sure a compability, however, it should be changed?
81  \param [in] domList List of domains of grid
82  \param [in] axisList List of axis of grid
83  \param [in] axisDomainOrder order of axis and domain inside a grid. True if domain, false if axis
84  \param [in] gridMask Mask of grid, for now, keep it 3 dimension, but it needs changing
85*/
86void CDistributionClient::readDistributionInfo(const std::vector<CDomain*>& domList,
87                                               const std::vector<CAxis*>& axisList,
88                                               const CArray<bool,1>& axisDomainOrder,
89                                               const CArray<bool,3>& gridMask)
90{
91  numElement_ = axisDomainOrder.numElements(); // Number of element, e.x: Axis, Domain
92
93  axisDomainOrder_.resize(numElement_);
94  axisDomainOrder_ = axisDomainOrder;
95
96  // Each domain or axis has its mask, of course
97  domainMasks_.resize(domList.size());
98  for (int i = 0; i < domainMasks_.size();++i)
99  {
100    domainMasks_[i].resize(domList[i]->mask.extent(0), domList[i]->mask.extent(1));
101    domainMasks_[i] = domList[i]->mask;
102  }
103
104  axisMasks_.resize(axisList.size());
105  for (int i = 0; i < axisMasks_.size(); ++i)
106  {
107    axisMasks_[i].resize(axisList[i]->mask.numElements());
108    axisMasks_[i] = axisList[i]->mask;
109  }
110
111  gridMask_.resize(gridMask.extent(0), gridMask.extent(1), gridMask.extent(2));
112  gridMask_ = gridMask;
113
114  // Because domain and axis can be in any order (axis1, domain1, axis2, axis3, )
115  // their position should be specified. In axisDomainOrder, domain == true, axis == false
116  int idx = 0;
117  indexMap_.resize(numElement_);
118  this->dims_ = numElement_;
119  for (int i = 0; i < numElement_; ++i)
120  {
121    indexMap_[i] = idx;
122    if (true == axisDomainOrder(i))
123    {
124      ++(this->dims_);
125      idx += 2;
126    }
127  }
128
129  // Size of each dimension (local and global)
130  nLocal_.resize(this->dims_);
131  nGlob_.resize(this->dims_);
132  nBeginLocal_.resize(this->dims_,0);
133  nBeginGlobal_.resize(this->dims_,0);
134  nZoomBegin_.resize(this->dims_);
135  nZoomEnd_.resize(this->dims_);
136
137  // Data_n_index of domain or axis (For now, axis uses its size as data_n_index
138  dataNIndex_.resize(numElement_);
139  dataDims_.resize(numElement_);
140  dataBegin_.resize(this->dims_);
141
142  // Data_*_index of each dimension
143  dataIndex_.resize(this->dims_);
144
145  // A trick to determine position of each domain in domainList
146  int domIndex = 0, axisIndex = 0;
147  idx = 0;
148
149  // Update all the vectors above
150  while (idx < numElement_)
151  {
152    bool isDomain = axisDomainOrder(idx);
153
154    // If this is a domain
155    if (isDomain)
156    {
157      // On the j axis
158      nLocal_.at(indexMap_[idx]+1) = domList[domIndex]->nj.getValue();
159      nGlob_.at(indexMap_[idx]+1)  = domList[domIndex]->nj_glo.getValue();
160      nBeginLocal_.at(indexMap_[idx]+1) = 0;
161      nBeginGlobal_.at(indexMap_[idx]+1) = domList[domIndex]->jbegin;
162      nZoomBegin_.at((indexMap_[idx]+1)) = domList[domIndex]->zoom_jbegin;
163      nZoomEnd_.at((indexMap_[idx]+1))   = domList[domIndex]->zoom_jbegin + domList[domIndex]->zoom_nj-1;
164
165      dataBegin_.at(indexMap_[idx]+1) = (2 == domList[domIndex]->data_dim) ? domList[domIndex]->data_jbegin.getValue() : -1;
166      dataIndex_.at(indexMap_[idx]+1).resize(domList[domIndex]->data_j_index.numElements());
167      dataIndex_.at(indexMap_[idx]+1) = domList[domIndex]->data_j_index;
168
169      // On the i axis
170      nLocal_.at(indexMap_[idx]) = domList[domIndex]->ni.getValue();
171      nGlob_.at(indexMap_[idx]) = domList[domIndex]->ni_glo.getValue();
172      nBeginLocal_.at(indexMap_[idx]) = 0;
173      nBeginGlobal_.at(indexMap_[idx]) = domList[domIndex]->ibegin;
174      nZoomBegin_.at((indexMap_[idx])) = domList[domIndex]->zoom_ibegin;
175      nZoomEnd_.at((indexMap_[idx]))   = domList[domIndex]->zoom_ibegin + domList[domIndex]->zoom_ni-1;
176
177      dataBegin_.at(indexMap_[idx]) = domList[domIndex]->data_ibegin.getValue();
178      dataIndex_.at(indexMap_[idx]).resize(domList[domIndex]->data_i_index.numElements());
179      dataIndex_.at(indexMap_[idx]) = domList[domIndex]->data_i_index;
180
181      dataNIndex_.at(idx) = domList[domIndex]->data_n_index.getValue();
182      dataDims_.at(idx) = domList[domIndex]->data_dim.getValue();
183      ++domIndex;
184    }
185    else // So it's an axis
186    {
187      nLocal_.at(indexMap_[idx]) = axisList[axisIndex]->zoom_size.getValue();
188      nGlob_.at(indexMap_[idx]) = axisList[axisIndex]->size.getValue();
189      nBeginLocal_.at(indexMap_[idx]) = axisList[axisIndex]->zoom_begin.getValue(); //ibegin.getValue();
190      nBeginGlobal_.at(indexMap_[idx]) = axisList[axisIndex]->ibegin.getValue();
191      nZoomBegin_.at((indexMap_[idx])) = axisList[axisIndex]->zoom_begin;
192      nZoomEnd_.at((indexMap_[idx])) = axisList[axisIndex]->zoom_begin + axisList[axisIndex]->zoom_size-1;
193
194      dataBegin_.at(indexMap_[idx]) = axisList[axisIndex]->data_begin.getValue();
195      dataIndex_.at(indexMap_[idx]).resize(axisList[axisIndex]->data_index.numElements());
196      dataIndex_.at(indexMap_[idx]) = axisList[axisIndex]->data_index;
197      dataNIndex_.at(idx) = axisList[axisIndex]->data_index.numElements();
198      dataDims_.at(idx) = 1;
199      ++axisIndex;
200    }
201    ++idx;
202  }
203}
204
205/*!
206  Create local index of domain(s).
207  A domain can have data index which even contains the "ghost" points. Very often, these
208data surround the true data. In order to send correct data to server,
209a client need to know index of the true data.
210*/
211void CDistributionClient::createLocalDomainDataIndex()
212{
213  int numDomain = 0;
214  for (int i = 0; i < axisDomainOrder_.numElements(); ++i)
215    if (axisDomainOrder_(i)) ++numDomain;
216
217  localDomainIndex_.resize(numDomain*2);
218  indexDomainData_.resize(numDomain);
219
220  int idxDomain = 0;
221  for (int i = 0; i < axisDomainOrder_.numElements(); ++i)
222  {
223    if (axisDomainOrder_(i))
224    {
225      int iIdx, jIdx = 0, count = 0;
226      indexDomainData_[idxDomain].resize(dataNIndex_[i], false);
227      for (int j = 0; j < dataNIndex_[i]; ++j)
228      {
229        iIdx = getDomainIndex(dataIndex_[indexMap_[i]](j), dataIndex_[indexMap_[i]+1](j),
230                              dataBegin_[indexMap_[i]], dataBegin_[indexMap_[i]+1],
231                              dataDims_[i], nLocal_[indexMap_[i]], jIdx);
232
233        if ((iIdx >= nBeginLocal_[indexMap_[i]]) && (iIdx < nLocal_[indexMap_[i]]) &&
234           (jIdx >= nBeginLocal_[indexMap_[i]+1]) && (jIdx < nLocal_[indexMap_[i]+1]) &&
235           (domainMasks_[idxDomain](iIdx, jIdx)))
236        {
237          (localDomainIndex_[idxDomain]).push_back(iIdx);
238          (localDomainIndex_[idxDomain*2+1]).push_back(jIdx);
239          indexDomainData_[idxDomain][j] = true;
240        }
241      }
242      ++idxDomain;
243    }
244  }
245}
246
247/*!
248  Create local index of axis.
249*/
250void CDistributionClient::createLocalAxisDataIndex()
251{
252  int numAxis = 0;
253  for (int i = 0; i < axisDomainOrder_.numElements(); ++i)
254    if (!axisDomainOrder_(i)) ++numAxis;
255
256  localAxisIndex_.resize(numAxis);
257
258  int idxAxis = 0;
259  for (int i = 0; i < axisDomainOrder_.numElements(); ++i)
260  {
261    if (!axisDomainOrder_(i))
262    {
263      int iIdx = 0;
264      for (int j = 0; j < dataNIndex_[i]; ++j)
265      {
266        iIdx = getAxisIndex(dataIndex_[indexMap_[i]](j), dataBegin_[indexMap_[i]], nLocal_[indexMap_[i]]);
267        if ((iIdx >= nBeginLocal_[indexMap_[i]]) &&
268           (iIdx < nLocal_[indexMap_[i]]) && (axisMasks_[idxAxis](iIdx)))
269        {
270          localAxisIndex_[idxAxis].push_back(iIdx);
271        }
272      }
273      ++idxAxis;
274    }
275  }
276}
277
278/*!
279   Create global index on client
280   In order to do the mapping between client-server, each client creates its own
281global index of sending data. This global index is then used to calculate to which server
282the client needs to send it data as well as which part of data belongs to the server.
283So as to make clients and server coherent in order of index, global index is calculated by
284take into account of C-convention, the rightmost dimension varies faster.
285*/
286void CDistributionClient::createGlobalIndex()
287{
288  createLocalDomainDataIndex();
289  createLocalAxisDataIndex();
290
291  int idxDomain = 0, idxAxis = 0;
292  std::vector<int> eachElementSize(numElement_);
293
294  // Precompute size of the loop
295  for (int i = 0; i < numElement_; ++i)
296  {
297    if(axisDomainOrder_(i))
298    {
299      eachElementSize[i] = localDomainIndex_[idxDomain].size();
300      idxDomain += 2;
301    }
302    else
303    {
304      eachElementSize[i] = localAxisIndex_[idxAxis].size();
305      ++idxAxis;
306    }
307  }
308
309  //   Compute size of the global index on client
310  std::vector<int> idxLoop(numElement_,0);
311  std::vector<int> currentIndex(this->dims_);
312  int innerLoopSize = eachElementSize[0];
313  size_t idx = 0, indexLocalDataOnClientCount = 0, indexSend2ServerCount = 0;
314  size_t ssize = 1;
315  for (int i = 0; i < numElement_; ++i) ssize *= eachElementSize[i];
316  while (idx < ssize)
317  {
318    for (int i = 0; i < numElement_-1; ++i)
319    {
320      if (idxLoop[i] == eachElementSize[i])
321      {
322        idxLoop[i] = 0;
323        ++idxLoop[i+1];
324      }
325    }
326
327    // Outer index
328    idxDomain = idxAxis = 0;
329    for (int i = 1; i < numElement_; ++i)
330    {
331      if (axisDomainOrder_(i))
332      {
333        currentIndex[indexMap_[i]]   = localDomainIndex_[idxDomain][idxLoop[i]];
334        currentIndex[indexMap_[i]+1] = localDomainIndex_[idxDomain+1][idxLoop[i]];
335        idxDomain += 2;
336      }
337      else
338      {
339        currentIndex[indexMap_[i]]   = localAxisIndex_[idxAxis][idxLoop[i]];
340        ++idxAxis;
341      }
342    }
343
344    // Inner most index
345    idxDomain = idxAxis = 0;
346    for (int i = 0; i < innerLoopSize; ++i)
347    {
348      if (axisDomainOrder_(0))
349      {
350        currentIndex[0] = localDomainIndex_[idxDomain][i];
351        currentIndex[1] = localDomainIndex_[idxDomain+1][i];
352      }
353      else currentIndex[0]   = localAxisIndex_[idxAxis][i];
354
355      if (gridMask_(currentIndex[0], currentIndex[1], currentIndex[2]))
356      {
357        ++indexLocalDataOnClientCount;
358        bool isIndexOnServer = true;
359        for (int j = 0; j < this->dims_; ++j)
360          isIndexOnServer = isIndexOnServer && ((currentIndex[j]+nBeginGlobal_[j]) <= nZoomEnd_[j])
361                                            && (nZoomBegin_[j] <= (currentIndex[j]+nBeginGlobal_[j]));
362        if (isIndexOnServer) ++indexSend2ServerCount;
363      }
364
365    }
366    idxLoop[0] += innerLoopSize;
367    idx += innerLoopSize;
368  }
369
370  // Fill in the global index
371  this->globalIndex_ = new CArray<size_t,1>(indexSend2ServerCount);
372  localDataIndex_ = new CArray<int,1>(indexLocalDataOnClientCount);
373
374  eachElementSize = dataNIndex_;
375  innerLoopSize = eachElementSize[0];
376  ssize = 1; for (int i = 0; i < numElement_; ++i) ssize *= eachElementSize[i];
377  idxLoop.assign(numElement_,0);
378  idx = indexLocalDataOnClientCount = indexSend2ServerCount = 0;
379  int count = 0;
380  while (idx < ssize)
381  {
382    for (int i = 0; i < numElement_-1; ++i)
383    {
384      if (idxLoop[i] == eachElementSize[i])
385      {
386        idxLoop[i] = 0;
387        ++idxLoop[i+1];
388      }
389    }
390
391    // Outer index
392    idxDomain = idxAxis = 0;
393    bool isIndexDataCorrect = false;
394    for (int i = 1; i < numElement_; ++i)
395    {
396      if (axisDomainOrder_(i))
397      {
398        if (indexDomainData_[idxDomain][idxLoop[i]])
399        {
400          currentIndex[indexMap_[i]]   = localDomainIndex_[idxDomain][idxLoop[i]];
401          currentIndex[indexMap_[i]+1] = localDomainIndex_[idxDomain*2+1][idxLoop[i]];
402          isIndexDataCorrect = true;
403        }
404        ++idxDomain;
405      }
406      else
407      {
408        currentIndex[indexMap_[i]]   = localAxisIndex_[idxAxis][idxLoop[i]];
409        ++idxAxis;
410      }
411    }
412
413    // Inner most index
414    idxDomain = idxAxis = 0;
415    int correctIndexDomain = 0;
416    for (int i = 0; i < innerLoopSize; ++i)
417    {
418      if (axisDomainOrder_(0))
419      {
420        if (indexDomainData_[idxDomain][i])
421        {
422          currentIndex[0] = localDomainIndex_[idxDomain][correctIndexDomain];
423          currentIndex[1] = localDomainIndex_[idxDomain+1][correctIndexDomain];
424          isIndexDataCorrect = true;
425          ++correctIndexDomain;
426        } else isIndexDataCorrect = false;
427      }
428      else
429      {
430        currentIndex[0]   = localAxisIndex_[idxAxis][i];
431      }
432
433      if (isIndexDataCorrect && gridMask_(currentIndex[0], currentIndex[1], currentIndex[2]))
434      {
435        (*localDataIndex_)(indexLocalDataOnClientCount) = count;
436        ++indexLocalDataOnClientCount;
437
438        bool isIndexOnServer = true;
439        for (int j = 0; j < this->dims_; ++j)
440          isIndexOnServer = isIndexOnServer && ((currentIndex[j]+nBeginGlobal_[j]) <= nZoomEnd_[j])
441                                            && (nZoomBegin_[j] <= (currentIndex[j]+nBeginGlobal_[j]));
442        if (isIndexOnServer)
443        {
444          size_t mulDim = 1;
445          size_t globalIndex = currentIndex[0] + nBeginGlobal_[0];
446          for (int k = 1; k < this->dims_; ++k)
447          {
448            mulDim *= nGlob_[k-1];
449            globalIndex += (currentIndex[k] + nBeginGlobal_[k])*mulDim;
450          }
451          (*this->globalIndex_)(indexSend2ServerCount) = globalIndex;
452          ++indexSend2ServerCount;
453        }
454      }
455      ++count;
456    }
457    idxLoop[0] += innerLoopSize;
458    idx += innerLoopSize;
459  }
460
461//  std::cout << "global index " << *this->globalIndex_ << std::endl;
462//  std::cout << "local index " << *localDataIndex_ << std::endl;
463}
464
465/*!
466  Retrieve index i and index j of a domain from its data index
467  Data contains not only true data, which are sent to servers, but also ghost data, which
468very often play a role of border of each local data, so does data index. Because data of a domain
469can be one dimension, or two dimensions, there is a need to convert data index to domain index
470  \param [in] dataIIndex index of i data
471  \param [in] dataJIndex index of j data
472  \param [in] dataIBegin index begin of i data
473  \param [in] dataJBegin index begin of j data
474  \param [in] dataDim dimension of data (1 or 2)
475  \param [in] ni local size ni of domain
476  \param [out] j j index of domain
477  \return i index of domain
478*/
479int CDistributionClient::getDomainIndex(const int& dataIIndex, const int& dataJIndex,
480                                        const int& dataIBegin, const int& dataJBegin,
481                                        const int& dataDim, const int& ni, int& j)
482{
483  int tempI = dataIIndex + dataIBegin,
484      tempJ = (1 == dataDim) ? -1
485                             : (dataJIndex + dataJBegin);
486  int i = (dataDim == 1) ? (tempI - 1) % ni
487                     : (tempI - 1) ;
488  j = (dataDim == 1) ? (tempI - 1) / ni
489                     : (tempJ - 1) ;
490
491  return i;
492}
493
494/*!
495  Retrieve index of an axis from its data index
496  \param [in] dataIndex index of data
497  \param [in] dataBegin index begin of data
498  \param [in] ni local size of axis
499  \return index of domain
500*/
501int CDistributionClient::getAxisIndex(const int& dataIndex, const int& dataBegin, const int& ni)
502{
503   int tempI = dataIndex + dataBegin;
504   return ((tempI-1)%ni);
505//   return ((tempI)%ni);
506}
507
508/*!
509  Compute global index of each server distributed by band
510  The classic distribution of servers: each server takes charges of writing data divided
511into blocks on the second dimension of grid. If the grid contain a domain, this second dimension is nj.
512  \param [in] nServer number of server
513  \return vector of pointer to array of global index of servers
514*/
515std::vector<CArray<size_t,1>* > CDistributionClient::computeServerBandDistribution(int nServer)
516{
517  // It's not intelligent to allocate dynamic memory inside one function and dellocate in another
518  // but it's a way to free a large amount of unnecessary memory
519  // This function must NEVER made into public.
520  size_t ssize = 1, idx = 0;
521  for (int i = 0; i < nGlob_.size(); ++i) ssize *= nGlob_[i];
522  std::vector<int> idxLoop(this->dims_,0);
523  std::vector<int> indexServer(nServer,0);
524  int njRangeSize;
525  std::vector<int> njRangeBegin(nServer,0);
526  std::vector<int> njRangeEnd(nServer,0);
527  std::vector<CArray<size_t,1>* > globalIndexServer(nServer);
528
529  int innerLoopSize = nGlob_[0], idxServer;
530  if (1<nGlob_.size())
531  {
532    for (int i = 0; i < nServer; ++i)
533    {
534      if (0 < i) njRangeBegin[i] = njRangeEnd[i-1];
535      njRangeSize = nGlob_[1] / nServer;
536      if (i < nGlob_[1]%nServer) ++njRangeSize;
537      njRangeEnd[i] = njRangeSize + njRangeBegin[i];
538    }
539    njRangeEnd[nServer-1] = nGlob_[1];
540
541    // Compute size of each global index server array
542    while (idx < ssize)
543    {
544      for (int i = 0; i < this->dims_-1; ++i)
545      {
546        if (idxLoop[i] == nGlob_[i])
547        {
548          idxLoop[i] = 0;
549          ++idxLoop[i+1];
550        }
551      }
552
553      for (int i = 0; i < nServer; ++i)
554        if ((njRangeBegin[i]<=idxLoop[1]) && (idxLoop[1] < njRangeEnd[i]))
555        {
556          idxServer = i;
557          break;
558        }
559
560      indexServer[idxServer] += innerLoopSize;
561      idxLoop[0] += innerLoopSize;
562      idx += innerLoopSize;
563    }
564
565
566    for (int i = 0; i < nServer; ++i) globalIndexServer[i] = new CArray<size_t,1>(indexServer[i]);
567
568    // Fill in each global index server array
569    idx = 0;
570    idxLoop.assign(this->dims_,0);
571    indexServer.assign(nServer,0);
572    size_t globalIndex = 0;
573    while (idx < ssize)
574    {
575      for (int i = 0; i < this->dims_-1; ++i)
576      {
577        if (idxLoop[i] == nGlob_[i])
578        {
579          idxLoop[i] = 0;
580          ++idxLoop[i+1];
581        }
582      }
583
584      for (int i = 0; i < nServer; ++i)
585        if ((njRangeBegin[i]<=idxLoop[1]) && (idxLoop[1] < njRangeEnd[i]))
586        {
587          idxServer = i;
588          break;
589        }
590
591      for (int i = 0; i < innerLoopSize; ++i)
592      {
593        (*globalIndexServer[idxServer])(indexServer[idxServer]) = globalIndex;
594        ++indexServer[idxServer];
595        ++globalIndex;
596      }
597      idxLoop[0] += innerLoopSize;
598      idx += innerLoopSize;
599    }
600  }
601
602  return globalIndexServer;
603}
604
605/*!
606  Compute index mapping between cliens and servers
607  On using global index of data on clients and servers, each client calculates which part
608of data will be sent to the corresponding server. After the functions is called, client can use
609all computed information to send correct data to server
610  \param [in] nServer number of server
611  \param [in] distributionType type of distribution, like band or plan
612*/
613void CDistributionClient::computeServerIndexMapping(int nServer, ServerDistributionType distributionType)
614{
615  std::vector<CArray<size_t,1>* > globalIndexServer;
616
617  switch (distributionType)
618  {
619    case BAND_DISTRIBUTION:
620      globalIndexServer = computeServerBandDistribution(nServer);
621      break;
622    default:
623      break;
624  }
625
626  std::vector<CArray<size_t,1>::const_iterator> itBegin(nServer), itEnd(nServer), it(nServer);
627
628  for (int i = 0; i < nServer; ++i)
629  {
630    itBegin[i] = globalIndexServer[i]->begin();
631    itEnd[i]   = globalIndexServer[i]->end();
632  }
633
634  size_t ssize = (this->globalIndex_)->numElements();
635  for (int i = 0; i < ssize; ++i)
636  {
637    for (int j = 0; j < nServer; ++j)
638    {
639      // Just temporarily, it's so so bad.
640//      if (std::binary_search(itBegin[j], itEnd[j], (*this->globalIndex_)(i)))
641//      if (itEnd[j] != std::find(itBegin[j], itEnd[j], (*this->globalIndex_)(i)))
642      it[j] = std::find(itBegin[j], itEnd[j], (*this->globalIndex_)(i));
643      if (itEnd[j] != it[j])
644      {
645//        (indexGlobalOnServer_[j]).push_back((*this->globalIndex_)(i));
646        // Just try to calculate local index server on client side
647        (indexGlobalOnServer_[j]).push_back(std::distance(itBegin[j], it[j]));
648        (localIndexSend2Server_[j]).push_back(i);
649        continue;
650      }
651    }
652  }
653
654
655  for (int i = 0; i < nServer; ++i)
656  {
657    if (indexGlobalOnServer_[i].empty()) indexGlobalOnServer_.erase(i);
658    if (localIndexSend2Server_[i].empty()) localIndexSend2Server_.erase(i);
659  }
660
661  for (int i = 0; i < nServer; ++i)
662    if (0 != globalIndexServer[i]) delete globalIndexServer[i];
663}
664
665/*!
666  Compute how many clients each server will receive data from
667  On client can send data to several servers as well as one server can receive data originated from
668some clients. In order to write data correctly, each server must know from how many clients it receives data
669  \param [in] nbServer number of servers
670  \param [in] nClient number of clients
671  \param [in] clientIntraComm MPI communication of clients
672  \return mapping of server rank and the number of connected clients
673*/
674std::map<int,int> CDistributionClient::computeConnectedClients(int nbServer, int nbClient, MPI_Comm& clientIntraComm)
675{
676  if (isConnectedServerComputed_) return connectedClients_;
677  std::map<int, std::vector<size_t> >::const_iterator itbMap, iteMap, it;
678  itbMap = it = indexGlobalOnServer_.begin();
679  iteMap = indexGlobalOnServer_.end();
680
681  std::vector<int> connectedServer;
682  std::vector<bool> isConnected(nbServer,false);
683
684  for (it = itbMap; it != iteMap; ++it)
685  {
686    for (int serverNum = 0; serverNum < nbServer; ++serverNum)
687      if (it->first == serverNum) isConnected[serverNum] = true;
688  }
689
690  for(int serverNum = 0; serverNum<nbServer; ++serverNum)
691    if (isConnected[serverNum])
692      connectedServer.push_back(serverNum);
693
694
695  int nbConnectedServer=connectedServer.size();
696  int* recvCount=new int[nbClient];
697  int* displ=new int[nbClient];
698  int* sendBuff=new int[nbConnectedServer];
699  valarray<int> clientRes(0,nbServer);
700
701  for(int n=0;n<nbConnectedServer;n++) sendBuff[n]=connectedServer[n] ;
702
703  // get connected server for everybody
704  MPI_Allgather(&nbConnectedServer,1,MPI_INT,recvCount,1,MPI_INT,clientIntraComm) ;
705
706  displ[0]=0 ;
707  for(int n=1;n<nbClient;n++) displ[n]=displ[n-1]+recvCount[n-1] ;
708  int recvSize=displ[nbClient-1]+recvCount[nbClient-1] ;
709  int* recvBuff=new int[recvSize] ;
710
711
712  MPI_Allgatherv(sendBuff,nbConnectedServer,MPI_INT,recvBuff,recvCount,displ,MPI_INT,clientIntraComm) ;
713  for(int n=0;n<recvSize;n++) clientRes[recvBuff[n]]++ ;
714
715//  std::map<int,int> nbSenders;
716  for(int n=0;n<nbConnectedServer;n++)
717  {
718    connectedClients_[connectedServer[n]] = clientRes[connectedServer[n]];
719  }
720
721  isConnectedServerComputed_ = true;
722
723  delete [] recvCount ;
724  delete [] displ ;
725  delete [] sendBuff ;
726  delete [] recvBuff ;
727
728  return connectedClients_;
729}
730
731/*!
732  Return local index of data that is send to server
733  \return mapping of server rank and local index of sending data on the client
734*/
735const std::map<int, std::vector<int> >& CDistributionClient::getLocalIndexSendToServer() const
736{
737  return localIndexSend2Server_;
738}
739
740/*!
741  Return local data index of client
742*/
743const CArray<int,1>& CDistributionClient::getLocalDataIndexOnClient() const
744{
745  return (*localDataIndex_);
746}
747
748/*!
749  Return global index of data on each connected server.
750  On receiving data sent from client(s), each server with this global index, is able to
751know where the data should be written.
752  \return mapping of server rank and its global index.
753*/
754const std::map<int, std::vector<size_t> >& CDistributionClient::getGlobalIndexOnServer() const
755{
756  return indexGlobalOnServer_;
757}
758
759} // namespace xios
Note: See TracBrowser for help on using the repository browser.