1 | #include "xmlioserver_spl.hpp" |
---|
2 | #include "cxios.hpp" |
---|
3 | #include "client.hpp" |
---|
4 | #include <boost/functional/hash.hpp> |
---|
5 | #include "type.hpp" |
---|
6 | #include "context.hpp" |
---|
7 | #include "context_client.hpp" |
---|
8 | #include "oasis_cinterface.hpp" |
---|
9 | #include "mpi.hpp" |
---|
10 | #include "timer.hpp" |
---|
11 | #include "buffer_client.hpp" |
---|
12 | |
---|
13 | namespace xios |
---|
14 | { |
---|
15 | |
---|
16 | MPI_Comm CClient::intraComm ; |
---|
17 | MPI_Comm CClient::interComm ; |
---|
18 | int CClient::serverLeader ; |
---|
19 | bool CClient::is_MPI_Initialized ; |
---|
20 | |
---|
21 | |
---|
22 | void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) |
---|
23 | { |
---|
24 | int initialized ; |
---|
25 | MPI_Initialized(&initialized) ; |
---|
26 | if (initialized) is_MPI_Initialized=true ; |
---|
27 | else is_MPI_Initialized=false ; |
---|
28 | |
---|
29 | // don't use OASIS |
---|
30 | if (!CXios::usingOasis) |
---|
31 | { |
---|
32 | // localComm doesn't given |
---|
33 | if (localComm == MPI_COMM_NULL) |
---|
34 | { |
---|
35 | if (!is_MPI_Initialized) |
---|
36 | { |
---|
37 | int argc=0; |
---|
38 | char** argv=NULL; |
---|
39 | MPI_Init(&argc,&argv) ; |
---|
40 | } |
---|
41 | CTimer::get("XIOS").resume() ; |
---|
42 | CTimer::get("XIOS init").resume() ; |
---|
43 | boost::hash<string> hashString ; |
---|
44 | |
---|
45 | unsigned long hashClient=hashString(codeId) ; |
---|
46 | unsigned long hashServer=hashString(CXios::xiosCodeId) ; |
---|
47 | unsigned long* hashAll ; |
---|
48 | int rank ; |
---|
49 | int size ; |
---|
50 | int myColor ; |
---|
51 | int i,c ; |
---|
52 | MPI_Comm newComm ; |
---|
53 | |
---|
54 | MPI_Comm_size(CXios::globalComm,&size) ; |
---|
55 | MPI_Comm_rank(CXios::globalComm,&rank); |
---|
56 | hashAll=new unsigned long[size] ; |
---|
57 | |
---|
58 | MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; |
---|
59 | |
---|
60 | map<unsigned long, int> colors ; |
---|
61 | map<unsigned long, int> leaders ; |
---|
62 | |
---|
63 | for(i=0,c=0;i<size;i++) |
---|
64 | { |
---|
65 | if (colors.find(hashAll[i])==colors.end()) |
---|
66 | { |
---|
67 | colors[hashAll[i]] =c ; |
---|
68 | leaders[hashAll[i]]=i ; |
---|
69 | c++ ; |
---|
70 | } |
---|
71 | } |
---|
72 | |
---|
73 | myColor=colors[hashClient] ; |
---|
74 | |
---|
75 | MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; |
---|
76 | |
---|
77 | if (CXios::usingServer) |
---|
78 | { |
---|
79 | int clientLeader=leaders[hashClient] ; |
---|
80 | serverLeader=leaders[hashServer] ; |
---|
81 | MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; |
---|
82 | } |
---|
83 | else |
---|
84 | { |
---|
85 | MPI_Comm_dup(intraComm,&interComm) ; |
---|
86 | } |
---|
87 | delete [] hashAll ; |
---|
88 | } |
---|
89 | // localComm argument is given |
---|
90 | else |
---|
91 | { |
---|
92 | if (CXios::usingServer) |
---|
93 | { |
---|
94 | //ERROR("void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm)", << " giving a local communictor is not compatible with using server mode") ; |
---|
95 | } |
---|
96 | else |
---|
97 | { |
---|
98 | MPI_Comm_dup(localComm,&intraComm) ; |
---|
99 | MPI_Comm_dup(intraComm,&interComm) ; |
---|
100 | } |
---|
101 | } |
---|
102 | } |
---|
103 | // using OASIS |
---|
104 | else |
---|
105 | { |
---|
106 | // localComm doesn't given |
---|
107 | if (localComm == MPI_COMM_NULL) |
---|
108 | { |
---|
109 | if (!is_MPI_Initialized) oasis_init(codeId) ; |
---|
110 | oasis_get_localcomm(intraComm) ; |
---|
111 | } |
---|
112 | else MPI_Comm_dup(localComm,&intraComm) ; |
---|
113 | CTimer::get("XIOS").resume() ; |
---|
114 | CTimer::get("XIOS init").resume() ; |
---|
115 | |
---|
116 | if (CXios::usingServer) |
---|
117 | { |
---|
118 | MPI_Status status ; |
---|
119 | int rank ; |
---|
120 | MPI_Comm_rank(intraComm,&rank) ; |
---|
121 | |
---|
122 | oasis_get_intercomm(interComm,CXios::xiosCodeId) ; |
---|
123 | if (rank==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; |
---|
124 | MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; |
---|
125 | |
---|
126 | } |
---|
127 | else MPI_Comm_dup(intraComm,&interComm) ; |
---|
128 | } |
---|
129 | |
---|
130 | MPI_Comm_dup(intraComm,&returnComm) ; |
---|
131 | } |
---|
132 | |
---|
133 | |
---|
134 | void CClient::registerContext(const string& id,MPI_Comm contextComm) |
---|
135 | { |
---|
136 | CContext::setCurrent(id) ; |
---|
137 | CContext* context=CContext::create(id) ; |
---|
138 | |
---|
139 | if (!CXios::isServer) |
---|
140 | { |
---|
141 | int size,rank,globalRank ; |
---|
142 | size_t message_size ; |
---|
143 | int leaderRank ; |
---|
144 | MPI_Comm contextInterComm ; |
---|
145 | |
---|
146 | MPI_Comm_size(contextComm,&size) ; |
---|
147 | MPI_Comm_rank(contextComm,&rank) ; |
---|
148 | MPI_Comm_rank(CXios::globalComm,&globalRank) ; |
---|
149 | if (rank!=0) globalRank=0 ; |
---|
150 | |
---|
151 | |
---|
152 | CMessage msg ; |
---|
153 | msg<<id<<size<<globalRank ; |
---|
154 | |
---|
155 | int messageSize=msg.size() ; |
---|
156 | void * buff = new char[messageSize] ; |
---|
157 | CBufferOut buffer(buff,messageSize) ; |
---|
158 | buffer<<msg ; |
---|
159 | |
---|
160 | MPI_Send(buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; |
---|
161 | delete [] buff ; |
---|
162 | |
---|
163 | MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; |
---|
164 | info(10)<<"Register new Context : "<<id<<endl ; |
---|
165 | |
---|
166 | MPI_Comm inter ; |
---|
167 | MPI_Intercomm_merge(contextInterComm,0,&inter) ; |
---|
168 | MPI_Barrier(inter) ; |
---|
169 | |
---|
170 | context->initClient(contextComm,contextInterComm) ; |
---|
171 | } |
---|
172 | else |
---|
173 | { |
---|
174 | MPI_Comm contextInterComm ; |
---|
175 | MPI_Comm_dup(contextComm,&contextInterComm) ; |
---|
176 | context->initClient(contextComm,contextInterComm) ; |
---|
177 | context->initServer(contextComm,contextInterComm) ; |
---|
178 | } |
---|
179 | } |
---|
180 | |
---|
181 | void CClient::finalize(void) |
---|
182 | { |
---|
183 | int rank ; |
---|
184 | int msg=0 ; |
---|
185 | if (!CXios::isServer) |
---|
186 | { |
---|
187 | MPI_Comm_rank(intraComm,&rank) ; |
---|
188 | if (rank==0) |
---|
189 | { |
---|
190 | MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; |
---|
191 | } |
---|
192 | } |
---|
193 | |
---|
194 | CTimer::get("XIOS finalize").suspend() ; |
---|
195 | CTimer::get("XIOS").suspend() ; |
---|
196 | |
---|
197 | if (!is_MPI_Initialized) |
---|
198 | { |
---|
199 | if (CXios::usingOasis) oasis_finalize(); |
---|
200 | else MPI_Finalize() ; |
---|
201 | } |
---|
202 | info(20) << "Client side context is finalized"<<endl ; |
---|
203 | report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; |
---|
204 | report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; |
---|
205 | report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; |
---|
206 | report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; |
---|
207 | report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; |
---|
208 | report(0)<< " Memory report : Minimum buffer size required : "<<maxRequestSize*2<<endl ; |
---|
209 | report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; |
---|
210 | } |
---|
211 | } |
---|