[300] | 1 | #ifndef __CONTEXT_SERVER_HPP__ |
---|
[327] | 2 | #define __CONTEXT_SERVER_HPP__ |
---|
[591] | 3 | #include "xios_spl.hpp" |
---|
[300] | 4 | #include "event_server.hpp" |
---|
| 5 | #include "buffer_server.hpp" |
---|
[382] | 6 | #include "mpi.hpp" |
---|
[1761] | 7 | #include "event_scheduler.hpp" |
---|
[300] | 8 | |
---|
[335] | 9 | namespace xios |
---|
[300] | 10 | { |
---|
[345] | 11 | class CContext ; |
---|
[511] | 12 | |
---|
[300] | 13 | class CContextServer |
---|
| 14 | { |
---|
| 15 | public: |
---|
[511] | 16 | |
---|
[1639] | 17 | CContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; |
---|
[1054] | 18 | bool eventLoop(bool enableEventsProcessing = true); |
---|
[300] | 19 | void listen(void) ; |
---|
[1639] | 20 | bool listenPendingRequest(MPI_Status& status) ; |
---|
[300] | 21 | void checkPendingRequest(void) ; |
---|
[1757] | 22 | void getBufferFromClient(size_t timeLine) ; |
---|
[1158] | 23 | void processRequest(int rank, char* buff,int count) ; |
---|
[300] | 24 | void processEvents(void) ; |
---|
[1054] | 25 | bool hasFinished(void); |
---|
[300] | 26 | void dispatchEvent(CEventServer& event) ; |
---|
| 27 | void setPendingEvent(void) ; |
---|
| 28 | bool hasPendingEvent(void) ; |
---|
[1757] | 29 | bool isAttachedModeEnabled() const; |
---|
| 30 | void releaseBuffers(void) ; |
---|
| 31 | void notifyClientsFinalize(void) ; |
---|
| 32 | |
---|
[1639] | 33 | MPI_Comm intraComm ; |
---|
[300] | 34 | int intraCommSize ; |
---|
| 35 | int intraCommRank ; |
---|
[511] | 36 | |
---|
[1639] | 37 | MPI_Comm interComm ; |
---|
[300] | 38 | int commSize ; |
---|
[511] | 39 | |
---|
[1757] | 40 | MPI_Comm interCommMerged; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. |
---|
| 41 | |
---|
| 42 | MPI_Comm commSelf; //!< Communicator of the server alone. Needed to create a new communicator between 1 proc client and 1 proc server for one sided communication |
---|
| 43 | |
---|
[300] | 44 | map<int,CServerBuffer*> buffers ; |
---|
[1757] | 45 | map<int,size_t> lastTimeLine ; //!< last event time line for a processed request |
---|
| 46 | map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine |
---|
[1639] | 47 | map<int,MPI_Request> pendingRequest ; |
---|
[300] | 48 | map<int,char*> bufferRequest ; |
---|
[511] | 49 | |
---|
[300] | 50 | map<size_t,CEventServer*> events ; |
---|
| 51 | size_t currentTimeLine ; |
---|
[345] | 52 | CContext* context ; |
---|
[697] | 53 | |
---|
[300] | 54 | bool finished ; |
---|
| 55 | bool pendingEvent ; |
---|
[492] | 56 | bool scheduled ; /*!< event of current timeline is alreading scheduled ? */ |
---|
[1757] | 57 | bool attachedMode ; //! true if attached mode is enabled otherwise false |
---|
| 58 | bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. |
---|
| 59 | |
---|
[492] | 60 | size_t hashId ; |
---|
[1054] | 61 | |
---|
[511] | 62 | ~CContextServer() ; |
---|
| 63 | |
---|
| 64 | private: |
---|
[1158] | 65 | std::map<int, StdSize> mapBufferSize_; |
---|
[1757] | 66 | vector<MPI_Win> windows ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side. |
---|
[1761] | 67 | CEventScheduler* eventScheduler_ ; |
---|
[1764] | 68 | bool isProcessingEvent_ ; |
---|
[300] | 69 | } ; |
---|
| 70 | |
---|
| 71 | } |
---|
| 72 | |
---|
| 73 | #endif |
---|