source: XIOS3/trunk/src/policy.cpp @ 2622

Last change on this file since 2622 was 2589, checked in by jderouillat, 10 months ago

Specify the usage of the xios namespace to overload the MPI funtions

File size: 3.9 KB
Line 
1/*!
2   \file policy.cpp
3   \author Ha NGUYEN
4   \since 06 Oct 2015
5   \date 06 Oct 2015
6
7   \brief Some useful policies for templated classes
8 */
9
10#include "policy.hpp"
11#include <cmath>
12
13namespace xios
14{
15///*!
16//  Calculate MPI communicator for each level of hierarchy.
17//  \param[in] mpiCommRoot MPI communicator of the level 0 (usually communicator of all clients)
18//  \param[in] levels number of level in hierarchy
19//*/
20//void DivideCommByTwo::computeMPICommLevel(const MPI_Comm& mpiCommRoot, int levels)
21//{
22//  int nbProc;
23//  MPI_Comm_size(mpiCommRoot,&nbProc);
24//  if (levels > nbProc) levels = std::log10(nbProc) * 3.3219; // log2(x) = log2(10) * log10(x); stupid C++98
25//  else if (1 > levels) levels = 1;
26//
27//  commLevel_.push_back(mpiCommRoot);
28//  divideMPICommLevel(mpiCommRoot, levels);
29//}
30//
31///*!
32//  Divide each MPI communicator into sub-communicator. Recursive function
33//  \param [in] mpiCommLevel MPI communicator of current level
34//  \param [in] level current level
35//*/
36//void DivideCommByTwo::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level)
37//{
38//  int clientRank;
39//  MPI_Comm_rank(mpiCommLevel,&clientRank);
40//
41//   --level;
42//  if (0 < level)
43//  {
44//   int color = clientRank % 2;
45//   commLevel_.push_back(MPI_Comm());
46//   xios::MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back()));
47//   divideMPICommLevel(commLevel_.back(), level);
48//  }
49//}
50
51DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm)
52  : internalComm_(mpiComm), level_(0), groupBegin_(), nbInGroup_(), computed_(false)
53{
54
55}
56
57void DivideAdaptiveComm::computeMPICommLevel()
58{
59  if (computed_) return;
60  computed_ = true;
61
62  int mpiSize, mpiRank;
63  MPI_Comm_size(internalComm_,&mpiSize);
64  MPI_Comm_rank(internalComm_,&mpiRank);
65
66  int maxChild=1;
67  int m;
68  do
69  {
70    m=1;
71    ++maxChild;
72    for(int i=0;i<maxChild;++i) m *= maxChild;
73   } while(m<mpiSize);
74
75  int maxLevel=0;
76  for(int size=1; size<=mpiSize; size*=maxChild) ++maxLevel;
77
78  int pos, n, idx;
79  level_=0;
80  int begin=0;
81  int end=mpiSize-1;
82  int nb=end-begin+1;
83
84  nbInGroup_ = groupBegin_= std::vector<int>(maxLevel);
85  nbInGroupParents_ = groupParentsBegin_= std::vector<std::vector<int> >(maxLevel,std::vector<int>(maxChild));
86
87  groupBegin_[level_] = begin;
88  nbInGroup_[level_] = nb;
89  ++level_;
90  while (nb>2 && (level_<maxLevel))
91  {
92    n = 0; idx = 0;
93    pos = begin;
94    for(int i=0;i<maxChild && i<nb;i++)
95    {
96      if (i<nb%maxChild) n = nb/maxChild + 1;
97      else n = nb/maxChild;
98
99      if (mpiRank>=pos && mpiRank<pos+n)
100      {
101        begin=pos;
102        end=pos+n-1;
103      }
104      groupParentsBegin_[level_-1][idx] = pos;
105      nbInGroupParents_[level_-1][idx] = n;
106      ++idx;
107      pos=pos+n;
108    }
109    groupBegin_[level_] = begin;
110    nbInGroup_[level_] = nb = end-begin+1;
111
112    ++level_;
113  }
114
115  for (int i = 0; i < nbInGroup_[level_-1];++i)
116  {
117    groupParentsBegin_[level_-1][i] = groupBegin_[level_-1]+i;
118    nbInGroupParents_[level_-1][i] = 1;
119  }
120
121
122
123//  parent=vector<int>(maxLevel+1);
124//  child=vector<vector<int> >(maxLevel+1,vector<int>(maxChild));
125//  nbChild=vector<int> (maxLevel+1);
126
127//  do
128//  {
129//    n=0;
130//    pos=begin;
131//    nbChild[level_]=0;
132//    parent[level_+1]=begin;
133//    for(int i=0;i<maxChild && i<nb;i++)
134//    {
135//      if (i<nb%maxChild) n = nb/maxChild + 1;
136//      else n = nb/maxChild;
137//
138//      if (mpiRank>=pos && mpiRank<pos+n)
139//      {
140//        begin=pos;
141//        end=pos+n-1;
142//      }
143//      child[level_][i]=pos;
144//      pos=pos+n;
145//      nbChild[level_]++;
146//    }
147//    nb=end-begin+1;
148//    level_=level_+1;
149//  } while (nb>1);
150}
151
152//void DivideAdaptiveComm::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int color, int level)
153//{
154////  int clientRank;
155////  MPI_Comm_rank(mpiCommLevel,&clientRank);
156//
157//   --level;
158//  if (0 < level)
159//  {
160//   int color = clientRank % 2;
161//   commLevel_.push_back(MPI_Comm());
162//   xios::MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back()));
163//   divideMPICommLevel(commLevel_.back(), level);
164//  }
165//}
166
167
168}
169
Note: See TracBrowser for help on using the repository browser.