1 | #ifndef __LEGACY_CONTEXT_SERVER_HPP__ |
---|
2 | #define __LEGACY_CONTEXT_SERVER_HPP__ |
---|
3 | #include "xios_spl.hpp" |
---|
4 | #include "event_server.hpp" |
---|
5 | #include "buffer_server.hpp" |
---|
6 | #include "mpi.hpp" |
---|
7 | #include "event_scheduler.hpp" |
---|
8 | #include "context_server.hpp" |
---|
9 | |
---|
10 | namespace xios |
---|
11 | { |
---|
12 | class CContext ; |
---|
13 | class CContextClient; |
---|
14 | |
---|
15 | class CLegacyContextServer : public CContextServer |
---|
16 | { |
---|
17 | public: |
---|
18 | |
---|
19 | CLegacyContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; |
---|
20 | bool eventLoop(bool enableEventsProcessing = true); |
---|
21 | void releaseBuffers(void) ; |
---|
22 | |
---|
23 | private: |
---|
24 | |
---|
25 | void listen(void) ; |
---|
26 | // bool listenPendingRequest(MPI_Status& status) ; |
---|
27 | bool listenPendingRequest(MPI_Message &message, MPI_Status& status) ; |
---|
28 | void checkPendingProbe(void) ; |
---|
29 | void checkPendingRequest(void) ; |
---|
30 | void getBufferFromClient(size_t timeLine) ; |
---|
31 | void processRequest(int rank, char* buff,int count) ; |
---|
32 | void processEvents(bool enableEventsProcessing) ; |
---|
33 | bool hasFinished(void); |
---|
34 | void dispatchEvent(CEventServer& event) ; |
---|
35 | bool isCollectiveEvent(CEventServer& event) ; |
---|
36 | void setPendingEvent(void) ; |
---|
37 | bool hasPendingEvent(void) ; |
---|
38 | void notifyClientsFinalize(void) ; |
---|
39 | void freeWindows(void) ; // !<< free Windows for one sided communication |
---|
40 | |
---|
41 | MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. |
---|
42 | MPI_Comm commSelf_ ; //!< Communicator for proc alone from interCommMerged |
---|
43 | |
---|
44 | map<int,CServerBuffer*> buffers ; |
---|
45 | map<int,size_t> lastTimeLine ; //!< last event time line for a processed request |
---|
46 | map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine |
---|
47 | map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe; |
---|
48 | map<int,MPI_Request> pendingRequest ; |
---|
49 | map<int,char*> bufferRequest ; |
---|
50 | |
---|
51 | map<size_t,CEventServer*> events ; |
---|
52 | size_t currentTimeLine ; |
---|
53 | |
---|
54 | bool finished ; |
---|
55 | bool pendingEvent ; |
---|
56 | bool scheduled ; /*!< event of current timeline is alreading scheduled ? */ |
---|
57 | bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. |
---|
58 | |
---|
59 | ~CLegacyContextServer() ; |
---|
60 | |
---|
61 | std::map<int, StdSize> mapBufferSize_; |
---|
62 | std::map<int,MPI_Comm> winComm_ ; //! Window communicators |
---|
63 | std::map<int,std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side. |
---|
64 | bool isProcessingEvent_ ; |
---|
65 | size_t remoteHashId_; //!< the hash is of the calling context client |
---|
66 | |
---|
67 | MPI_Comm processEventBarrier_ ; |
---|
68 | bool eventScheduled_=false; |
---|
69 | MPI_Request processEventRequest_ ; |
---|
70 | } ; |
---|
71 | |
---|
72 | } |
---|
73 | |
---|
74 | #endif |
---|