1 | #include "context_server.hpp" |
---|
2 | #include "buffer_in.hpp" |
---|
3 | #include "type.hpp" |
---|
4 | #include "context.hpp" |
---|
5 | #include "object_template.hpp" |
---|
6 | #include "group_template.hpp" |
---|
7 | #include "attribute_template.hpp" |
---|
8 | #include "domain.hpp" |
---|
9 | #include "field.hpp" |
---|
10 | #include "file.hpp" |
---|
11 | #include "grid.hpp" |
---|
12 | #include "mpi.hpp" |
---|
13 | #include "tracer.hpp" |
---|
14 | #include "timer.hpp" |
---|
15 | #include "cxios.hpp" |
---|
16 | #include "event_scheduler.hpp" |
---|
17 | #include "server.hpp" |
---|
18 | #include <boost/functional/hash.hpp> |
---|
19 | |
---|
20 | |
---|
21 | |
---|
22 | namespace xios |
---|
23 | { |
---|
24 | |
---|
25 | CContextServer::CContextServer(CContext* parent,ep_lib::MPI_Comm intraComm_,ep_lib::MPI_Comm interComm_) |
---|
26 | { |
---|
27 | context=parent; |
---|
28 | intraComm=intraComm_; |
---|
29 | ep_lib::MPI_Comm_size(intraComm,&intraCommSize); |
---|
30 | ep_lib::MPI_Comm_rank(intraComm,&intraCommRank); |
---|
31 | |
---|
32 | interComm=interComm_; |
---|
33 | int flag; |
---|
34 | ep_lib::MPI_Comm_test_inter(interComm,&flag); |
---|
35 | if (flag) ep_lib::MPI_Comm_remote_size(interComm,&commSize); |
---|
36 | else ep_lib::MPI_Comm_size(interComm,&commSize); |
---|
37 | |
---|
38 | currentTimeLine=0; |
---|
39 | scheduled=false; |
---|
40 | finished=false; |
---|
41 | boost::hash<string> hashString; |
---|
42 | if (CServer::serverLevel == 1) |
---|
43 | hashId=hashString(context->getId() + boost::lexical_cast<string>(context->clientPrimServer.size())); |
---|
44 | else |
---|
45 | hashId=hashString(context->getId()); |
---|
46 | } |
---|
47 | |
---|
48 | void CContextServer::setPendingEvent(void) |
---|
49 | { |
---|
50 | pendingEvent=true; |
---|
51 | } |
---|
52 | |
---|
53 | bool CContextServer::hasPendingEvent(void) |
---|
54 | { |
---|
55 | return pendingEvent; |
---|
56 | } |
---|
57 | |
---|
58 | bool CContextServer::hasFinished(void) |
---|
59 | { |
---|
60 | return finished; |
---|
61 | } |
---|
62 | |
---|
63 | bool CContextServer::eventLoop(bool enableEventsProcessing /*= true*/) |
---|
64 | { |
---|
65 | listen(); |
---|
66 | checkPendingRequest(); |
---|
67 | if (enableEventsProcessing) |
---|
68 | processEvents(); |
---|
69 | return finished; |
---|
70 | } |
---|
71 | |
---|
72 | void CContextServer::listen(void) |
---|
73 | { |
---|
74 | int rank; |
---|
75 | int flag; |
---|
76 | int count; |
---|
77 | char * addr; |
---|
78 | ep_lib::MPI_Status status; |
---|
79 | map<int,CServerBuffer*>::iterator it; |
---|
80 | bool okLoop; |
---|
81 | |
---|
82 | traceOff(); |
---|
83 | #ifdef _usingMPI |
---|
84 | MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status); |
---|
85 | #elif _usingEP |
---|
86 | ep_lib::MPI_Iprobe(-2, 20,interComm,&flag,&status); |
---|
87 | #endif |
---|
88 | traceOn(); |
---|
89 | |
---|
90 | if (flag==true) |
---|
91 | { |
---|
92 | #ifdef _usingMPI |
---|
93 | rank=status.MPI_SOURCE ; |
---|
94 | #elif _usingEP |
---|
95 | rank=status.ep_src ; |
---|
96 | #endif |
---|
97 | okLoop = true; |
---|
98 | if (pendingRequest.find(rank)==pendingRequest.end()) |
---|
99 | okLoop = !listenPendingRequest(status) ; |
---|
100 | if (okLoop) |
---|
101 | { |
---|
102 | for(rank=0;rank<commSize;rank++) |
---|
103 | { |
---|
104 | if (pendingRequest.find(rank)==pendingRequest.end()) |
---|
105 | { |
---|
106 | |
---|
107 | traceOff(); |
---|
108 | ep_lib::MPI_Iprobe(rank, 20,interComm,&flag,&status); |
---|
109 | traceOn(); |
---|
110 | if (flag==true) listenPendingRequest(status) ; |
---|
111 | } |
---|
112 | } |
---|
113 | } |
---|
114 | } |
---|
115 | } |
---|
116 | |
---|
117 | bool CContextServer::listenPendingRequest(ep_lib::MPI_Status& status) |
---|
118 | { |
---|
119 | int count; |
---|
120 | char * addr; |
---|
121 | map<int,CServerBuffer*>::iterator it; |
---|
122 | #ifdef _usingMPI |
---|
123 | int rank=status.MPI_SOURCE ; |
---|
124 | #elif _usingEP |
---|
125 | int rank=status.ep_src ; |
---|
126 | #endif |
---|
127 | |
---|
128 | it=buffers.find(rank); |
---|
129 | if (it==buffers.end()) // Receive the buffer size and allocate the buffer |
---|
130 | { |
---|
131 | StdSize buffSize = 0; |
---|
132 | ep_lib::MPI_Recv(&buffSize, 1, EP_LONG, rank, 20, interComm, &status); |
---|
133 | mapBufferSize_.insert(std::make_pair(rank, buffSize)); |
---|
134 | it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; |
---|
135 | return true; |
---|
136 | } |
---|
137 | else |
---|
138 | { |
---|
139 | ep_lib::MPI_Get_count(&status,EP_CHAR,&count); |
---|
140 | if (it->second->isBufferFree(count)) |
---|
141 | { |
---|
142 | addr=(char*)it->second->getBuffer(count); |
---|
143 | ep_lib::MPI_Irecv(addr,count,EP_CHAR,rank,20,interComm,&pendingRequest[rank]); |
---|
144 | bufferRequest[rank]=addr; |
---|
145 | return true; |
---|
146 | } |
---|
147 | else |
---|
148 | return false; |
---|
149 | } |
---|
150 | } |
---|
151 | |
---|
152 | |
---|
153 | void CContextServer::checkPendingRequest(void) |
---|
154 | { |
---|
155 | map<int,ep_lib::MPI_Request>::iterator it; |
---|
156 | list<int> recvRequest; |
---|
157 | list<int>::iterator itRecv; |
---|
158 | int rank; |
---|
159 | int flag; |
---|
160 | int count; |
---|
161 | ep_lib::MPI_Status status; |
---|
162 | |
---|
163 | for(it=pendingRequest.begin();it!=pendingRequest.end();it++) |
---|
164 | { |
---|
165 | rank=it->first; |
---|
166 | traceOff(); |
---|
167 | ep_lib::MPI_Test(& it->second, &flag, &status); |
---|
168 | traceOn(); |
---|
169 | if (flag==true) |
---|
170 | { |
---|
171 | recvRequest.push_back(rank); |
---|
172 | ep_lib::MPI_Get_count(&status,EP_CHAR,&count); |
---|
173 | processRequest(rank,bufferRequest[rank],count); |
---|
174 | } |
---|
175 | } |
---|
176 | |
---|
177 | for(itRecv=recvRequest.begin();itRecv!=recvRequest.end();itRecv++) |
---|
178 | { |
---|
179 | pendingRequest.erase(*itRecv); |
---|
180 | bufferRequest.erase(*itRecv); |
---|
181 | } |
---|
182 | } |
---|
183 | |
---|
184 | void CContextServer::processRequest(int rank, char* buff,int count) |
---|
185 | { |
---|
186 | |
---|
187 | CBufferIn buffer(buff,count); |
---|
188 | char* startBuffer,endBuffer; |
---|
189 | int size, offset; |
---|
190 | size_t timeLine; |
---|
191 | map<size_t,CEventServer*>::iterator it; |
---|
192 | |
---|
193 | CTimer::get("Process request").resume(); |
---|
194 | while(count>0) |
---|
195 | { |
---|
196 | char* startBuffer=(char*)buffer.ptr(); |
---|
197 | CBufferIn newBuffer(startBuffer,buffer.remain()); |
---|
198 | newBuffer>>size>>timeLine; |
---|
199 | |
---|
200 | it=events.find(timeLine); |
---|
201 | if (it==events.end()) it=events.insert(pair<int,CEventServer*>(timeLine,new CEventServer)).first; |
---|
202 | it->second->push(rank,buffers[rank],startBuffer,size); |
---|
203 | |
---|
204 | buffer.advance(size); |
---|
205 | count=buffer.remain(); |
---|
206 | } |
---|
207 | CTimer::get("Process request").suspend(); |
---|
208 | } |
---|
209 | |
---|
210 | void CContextServer::processEvents(void) |
---|
211 | { |
---|
212 | map<size_t,CEventServer*>::iterator it; |
---|
213 | CEventServer* event; |
---|
214 | |
---|
215 | it=events.find(currentTimeLine); |
---|
216 | if (it!=events.end()) |
---|
217 | { |
---|
218 | event=it->second; |
---|
219 | |
---|
220 | if (event->isFull()) |
---|
221 | { |
---|
222 | if (!scheduled && CServer::eventScheduler) // Skip event scheduling for attached mode and reception on client side |
---|
223 | { |
---|
224 | CServer::eventScheduler->registerEvent(currentTimeLine,hashId); |
---|
225 | scheduled=true; |
---|
226 | } |
---|
227 | else if (!CServer::eventScheduler || CServer::eventScheduler->queryEvent(currentTimeLine,hashId) ) |
---|
228 | { |
---|
229 | // When using attached mode, synchronise the processes to avoid that differents event be scheduled by differents processes |
---|
230 | // The best way to properly solve this problem will be to use the event scheduler also in attached mode |
---|
231 | // for now just set up a MPI barrier |
---|
232 | if (!CServer::eventScheduler && CXios::isServer) ep_lib::MPI_Barrier(intraComm) ; |
---|
233 | |
---|
234 | CTimer::get("Process events").resume(); |
---|
235 | dispatchEvent(*event); |
---|
236 | CTimer::get("Process events").suspend(); |
---|
237 | pendingEvent=false; |
---|
238 | delete event; |
---|
239 | events.erase(it); |
---|
240 | currentTimeLine++; |
---|
241 | scheduled = false; |
---|
242 | } |
---|
243 | } |
---|
244 | } |
---|
245 | } |
---|
246 | |
---|
247 | CContextServer::~CContextServer() |
---|
248 | { |
---|
249 | map<int,CServerBuffer*>::iterator it; |
---|
250 | for(it=buffers.begin();it!=buffers.end();++it) delete it->second; |
---|
251 | } |
---|
252 | |
---|
253 | void CContextServer::dispatchEvent(CEventServer& event) |
---|
254 | { |
---|
255 | string contextName; |
---|
256 | string buff; |
---|
257 | int MsgSize; |
---|
258 | int rank; |
---|
259 | list<CEventServer::SSubEvent>::iterator it; |
---|
260 | StdString ctxId = context->getId(); |
---|
261 | CContext::setCurrent(ctxId); |
---|
262 | StdSize totalBuf = 0; |
---|
263 | |
---|
264 | if (event.classId==CContext::GetType() && event.type==CContext::EVENT_ID_CONTEXT_FINALIZE) |
---|
265 | { |
---|
266 | finished=true; |
---|
267 | info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; |
---|
268 | context->finalize(); |
---|
269 | std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
270 | iteMap = mapBufferSize_.end(), itMap; |
---|
271 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
272 | { |
---|
273 | rank = itMap->first; |
---|
274 | report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl |
---|
275 | << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; |
---|
276 | totalBuf += itMap->second; |
---|
277 | } |
---|
278 | report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; |
---|
279 | } |
---|
280 | else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); |
---|
281 | else if (event.classId==CContextGroup::GetType()) CContextGroup::dispatchEvent(event); |
---|
282 | else if (event.classId==CCalendarWrapper::GetType()) CCalendarWrapper::dispatchEvent(event); |
---|
283 | else if (event.classId==CDomain::GetType()) CDomain::dispatchEvent(event); |
---|
284 | else if (event.classId==CDomainGroup::GetType()) CDomainGroup::dispatchEvent(event); |
---|
285 | else if (event.classId==CAxis::GetType()) CAxis::dispatchEvent(event); |
---|
286 | else if (event.classId==CAxisGroup::GetType()) CAxisGroup::dispatchEvent(event); |
---|
287 | else if (event.classId==CScalar::GetType()) CScalar::dispatchEvent(event); |
---|
288 | else if (event.classId==CScalarGroup::GetType()) CScalarGroup::dispatchEvent(event); |
---|
289 | else if (event.classId==CGrid::GetType()) CGrid::dispatchEvent(event); |
---|
290 | else if (event.classId==CGridGroup::GetType()) CGridGroup::dispatchEvent(event); |
---|
291 | else if (event.classId==CField::GetType()) CField::dispatchEvent(event); |
---|
292 | else if (event.classId==CFieldGroup::GetType()) CFieldGroup::dispatchEvent(event); |
---|
293 | else if (event.classId==CFile::GetType()) CFile::dispatchEvent(event); |
---|
294 | else if (event.classId==CFileGroup::GetType()) CFileGroup::dispatchEvent(event); |
---|
295 | else if (event.classId==CVariable::GetType()) CVariable::dispatchEvent(event); |
---|
296 | else |
---|
297 | { |
---|
298 | ERROR("void CContextServer::dispatchEvent(CEventServer& event)",<<" Bad event class Id"<<endl); |
---|
299 | } |
---|
300 | } |
---|
301 | } |
---|