source: XIOS/trunk/src/filter/transformation_mapping.cpp @ 622

Last change on this file since 622 was 622, checked in by mhnguyen, 9 years ago

Final testing transfomation algorithm: inverse axis (local commit)

+) Make some minor change to make sure one element (axis or domain) be able to have several similar transformation

Test
+) On Curie
+) test_new_feature: test passed with correct data written

File size: 6.7 KB
Line 
1#include "transformation_mapping.hpp"
2#include <boost/unordered_map.hpp>
3#include "context.hpp"
4#include "context_client.hpp"
5#include "distribution_client.hpp"
6
7namespace xios {
8
9CTransformationMapping::CTransformationMapping(CGrid* destination, CGrid* source)
10  : gridSource_(source), gridDestination_(destination)
11{
12  CContext* context = CContext::getCurrent();
13  CContextClient* client=context->client;
14  int clientRank = client->clientRank;
15
16  CDistributionClient distributionClientDest(client->clientRank, gridSource_);
17
18  const CArray<size_t,1>& globalIndexGridSrc = distributionClientDest.getGlobalDataIndexSendToServer(); //gridSource_->getDistributionClient()->getGlobalDataIndexSendToServer();
19  boost::unordered_map<size_t,int> globalIndexOfServer;
20  int globalIndexSize = globalIndexGridSrc.numElements();
21  for (int idx = 0; idx < globalIndexSize; ++idx)
22  {
23    globalIndexOfServer[globalIndexGridSrc(idx)] = clientRank;
24  }
25
26  gridIndexClientClientMapping_ = new CClientServerMappingDistributed(globalIndexOfServer,
27                                                                      client->intraComm,
28                                                                      true);
29}
30
31CTransformationMapping::~CTransformationMapping()
32{
33  if (0 != gridIndexClientClientMapping_) delete gridIndexClientClientMapping_;
34}
35
36/*!
37  Suppose that we have transformations between two grids, which are represented in form of mapping between global indexes of these two grids,
38this function tries to find out which clients a client needs to send and receive these global indexes to accomplish the transformations.
39  The grid destination is the grid whose global indexes demande global indexes from the grid source
40  Grid destination and grid source are also distributed among clients but in different manners.
41  \param [in] globaIndexMapFromDestToSource mapping representing the transformations
42*/
43void CTransformationMapping::computeTransformationMapping(const std::map<size_t, std::set<size_t> >& globaIndexMapFromDestToSource)
44{
45  CContext* context = CContext::getCurrent();
46  CContextClient* client=context->client;
47
48  int numMappingPoints = 0;
49  std::map<size_t, std::set<size_t> >::const_iterator itbMap = globaIndexMapFromDestToSource.begin(), itMap,
50                                                      iteMap = globaIndexMapFromDestToSource.end();
51  for (itMap = itbMap; itMap != iteMap; ++itMap)
52  {
53    numMappingPoints += (itMap->second).size();
54  }
55
56  // All global indexes of a client on grid destination
57  CArray<size_t,1> globalIndexMap(numMappingPoints);
58  // Not only one index on grid destination can demande two indexes from grid source
59  // but an index on grid destination have to be sent to two indexes of grid destination
60  std::map<size_t, std::vector<size_t> > globalIndexMapFromSrcToDest;
61  std::set<size_t>::const_iterator itbSet, itSet, iteSet;
62  int idx = 0;
63  for (itMap = itbMap; itMap != iteMap; ++itMap)
64  {
65    itbSet = (itMap->second).begin();
66    iteSet = (itMap->second).end();
67    for (itSet = itbSet; itSet != iteSet; ++itSet)
68    {
69      globalIndexMap(idx) = *itSet;
70      globalIndexMapFromSrcToDest[*itSet].push_back(itMap->first);
71      ++idx;
72    }
73  }
74
75  // Find out on which clients the necessary indexes of grid source are.
76  gridIndexClientClientMapping_->computeServerIndexMapping(globalIndexMap);
77  const std::map<int, std::vector<size_t> >& globalIndexSentFromGridSource = gridIndexClientClientMapping_->getGlobalIndexOnServer();
78  std::map<int, std::vector<size_t> >::const_iterator itbMapSrc = globalIndexSentFromGridSource.begin(), itMapSrc,
79                                                      iteMapSrc = globalIndexSentFromGridSource.end();
80  std::vector<size_t>::const_iterator itbVec, itVec, iteVec;
81  for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc)
82  {
83    int sourceRank = itMapSrc->first;
84    itbVec = (itMapSrc->second).begin();
85    iteVec = (itMapSrc->second).end();
86    for (itVec = itbVec; itVec != iteVec; ++itVec)
87    {
88       (globalIndexReceivedOnGridDestMapping_[sourceRank]).push_back(globalIndexMapFromSrcToDest[*itVec]);
89    }
90  }
91
92  // Inform client about the destination to which it needs to send global indexes
93  int nbClient = client->clientSize;
94  int* sendBuff = new int[nbClient];
95  int* recvBuff = new int[nbClient];
96  for (int i = 0; i < nbClient; ++i) sendBuff[i] = 0;
97
98  // First of all, inform the number of destination a client needs to send global index
99  for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc) sendBuff[itMapSrc->first] = 1;
100  MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm);
101  int numClientToReceive = recvBuff[client->clientRank];
102
103  // Then specify the size of receiving buffer, because we use synch send/receive so only necessary to know maximum size
104  for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc) sendBuff[itMapSrc->first] = (itMapSrc->second).size();
105  MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_MAX, client->intraComm);
106
107  int buffSize = recvBuff[client->clientRank];
108  unsigned long* recvBuffGlobalIndex;
109  if (0 != buffSize) recvBuffGlobalIndex = new unsigned long [buffSize];
110
111  // Inform all "source clients" about index that they need to send
112  for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc)
113  {
114    MPI_Request request;
115    unsigned long* sendPtr = const_cast<unsigned long*>(&(itMapSrc->second)[0]);
116    MPI_Isend(sendPtr,
117              (itMapSrc->second).size(),
118              MPI_UNSIGNED_LONG,
119              itMapSrc->first,
120              11,
121              client->intraComm,
122              &request);
123  }
124
125  // Now all the "source clients" try listening messages from other "destination clients"
126  int numClientReceived = 0;  // number of client to which data has been already sent
127  int countBuff;
128  while (numClientReceived < numClientToReceive)
129  {
130    MPI_Status status;
131    MPI_Recv(recvBuffGlobalIndex,
132             buffSize,
133             MPI_UNSIGNED_LONG,
134             MPI_ANY_SOURCE,
135             11,
136             client->intraComm,
137             &status);
138
139    MPI_Get_count(&status, MPI_UNSIGNED_LONG, &countBuff);
140    int clientDestRank = status.MPI_SOURCE;
141    for (int idx = 0; idx < countBuff; ++idx)
142    {
143      globalIndexSendToGridDestMapping_[clientDestRank].push_back(recvBuffGlobalIndex[idx]);
144    }
145    ++numClientReceived;
146  }
147
148  delete [] sendBuff;
149  delete [] recvBuff;
150  if (0 != buffSize) delete [] recvBuffGlobalIndex;
151}
152
153const std::map<int,std::vector<std::vector<size_t> > >& CTransformationMapping::getGlobalIndexReceivedOnGridDestMapping() const
154{
155  return globalIndexReceivedOnGridDestMapping_;
156}
157
158const std::map<int,std::vector<size_t> >& CTransformationMapping::getGlobalIndexSendToGridDestMapping() const
159{
160  return globalIndexSendToGridDestMapping_;
161}
162
163}
Note: See TracBrowser for help on using the repository browser.