source: XIOS3/trunk/src/transport/p2p_context_server.hpp @ 2562

Last change on this file since 2562 was 2562, checked in by jderouillat, 10 months ago

Add the missing delete tokenManager_

  • Property svn:executable set to *
File size: 4.2 KB
Line 
1#ifndef __P2P_CONTEXT_SERVER_HPP__
2#define __P2P_CONTEXT_SERVER_HPP__
3#include "xios_spl.hpp"
4#include "event_server.hpp"
5#include "buffer_server.hpp"
6#include "p2p_server_buffer.hpp"
7#include "p2p_server_base.hpp"
8#include "mpi.hpp"
9#include "event_scheduler.hpp"
10#include "context_server.hpp"
11
12namespace xios
13{
14  class CContext ;
15  class CContextClient;
16
17  class CP2pContextServer : public CContextServer, public CP2pServerBase
18  {
19   
20    public:
21
22      CP2pContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ;
23      ~CP2pContextServer() ;
24      bool eventLoop(bool enableEventsProcessing = true);
25      void releaseBuffers(void) ;
26
27
28    private:
29
30      class CRequest
31      {
32        public : 
33          CRequest(MPI_Comm& interComm, MPI_Status& status)
34          {
35            rank_=status.MPI_SOURCE ;
36            MPI_Get_count(&status,MPI_CHAR,&count_);
37            buffer_.resize(count_) ;
38            info(logProtocol) << "DBG : Request created from "  << rank_ << ", of size " << count_ << endl;
39            MPI_Irecv(buffer_.data(), count_, MPI_CHAR, rank_, 20, interComm, &request_) ;
40            //MPI_Status mpistatus;
41            //MPI_Recv(buffer_.data(), count_, MPI_CHAR, rank_, 20, interComm, &mpistatus) ;
42          }
43         
44          ~CRequest() { info(logProtocol) << "DBG : Request deleted "  << rank_ << ", of size " << count_ << endl; }
45
46          bool test(void)
47          {
48            int flag ;
49            MPI_Status status ;
50            MPI_Test(&request_, &flag, &status) ;
51            if (flag==true) {
52              info(logProtocol) << "DBG : Request completed "  << rank_ << ", of size " << count_ << endl;
53              return true ;
54            }
55            else return false ;
56          }
57          int getCount(void) {return count_ ;}
58          int getRank(void) {return rank_ ;}
59          vector<char>& getBuffer(void) { return buffer_;}
60
61        private:
62          int rank_ ;
63          int count_ ;
64          vector<char> buffer_ ;
65          MPI_Request request_ ;
66      };
67   
68   
69    void listen(void) ;
70    void listenPendingRequest(void) ;
71    void processRequest(CRequest& request) ;
72    void checkBuffers(void) ;
73    void processEvents(bool enableEventsProcessing) ;
74   
75    bool hasFinished(void);
76    void dispatchEvent(CEventServer& event) ;
77    bool isCollectiveEvent(CEventServer& event) ;
78    void setPendingEvent(void) ;
79    bool hasPendingEvent(void) ;
80
81    void notifyClientsFinalize(void) ;
82    void freeWindows(void) ; // !<< free Windows for one sided communication
83   
84    MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication.
85    MPI_Comm commSelf_ ; //!< Communicator for proc alone from interCommMerged
86
87    map<int,CP2pServerBuffer*> buffers_ ;
88    map<int,size_t> lastTimeLine ; //!< last event time line for a processed request
89    map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine
90    map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe;
91    map<int,MPI_Request> pendingRequest ;
92    map<int,char*> bufferRequest ;
93
94    map<size_t,CEventServer*> events ;
95    size_t currentTimeLine ;
96     
97    bool finished ;
98    bool pendingEvent ;
99    bool scheduled  ;    /*!< event of current timeline is alreading scheduled ? */
100    bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used.
101
102    private:
103 
104      std::map<int, StdSize> mapBufferSize_;
105      std::map<int,MPI_Comm> winComm_ ; //! Window communicators
106      std::map<int,std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side.
107      bool isProcessingEvent_ ;
108      size_t remoteHashId_; //!< the hash is of the calling context client
109     
110      MPI_Comm processEventBarrier_ ;
111      bool eventScheduled_=false;
112      MPI_Request processEventRequest_ ;
113
114      std::map<int,std::list<CRequest*> >requests_ ;
115
116      std::map<size_t, SPendingEvent> pendingEvents_   ;
117      std::map<size_t, SPendingEvent> completedEvents_ ;
118
119  } ;
120
121}
122
123#endif
Note: See TracBrowser for help on using the repository browser.