source: XIOS/trunk/src/client_server_mapping_distributed.hpp @ 726

Last change on this file since 726 was 721, checked in by mhnguyen, 9 years ago

Templated version of distributed hashed table

+) Implement DHT in more generic way to work with different type of information
+) Some old codes of DHT are kept to be a reference (they will be deleted soon)

Test
+) On local, mode attached, 8 processes
+) test_remap passes and result is correct

File size: 4.6 KB
RevLine 
[569]1/*!
2   \file client_server_mapping.hpp
3   \author Ha NGUYEN
4   \since 27 Feb 2015
5   \date 09 Mars 2015
6
7   \brief Mapping between index client and server.
8   Clients pre-calculate all information of server distribution.
9 */
10
[568]11#ifndef __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
12#define __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
13
14#include <client_server_mapping.hpp>
[591]15#include "xios_spl.hpp"
[568]16#include "array_new.hpp"
17#include "mpi.hpp"
18#include <boost/unordered_map.hpp>
[721]19#include "client_client_dht_template.hpp"
[568]20
21namespace xios
22{
[569]23/*!
24  \class CClientServerMappingDistributed
25  This class computes index of data which are sent to server as well as index of data
26on server side with a distributed alogrithm. Each client has a piece of information about the distribution
27of servers. To find out all these info, first of all, all client join a discovering process in which each client
28announces the others about the info they have as well as demand others info they are lacked of. After this process,
29each client has enough info to decide to which client it need to send a demand for corresponding server of a global index.
30The alogrithm depends on hashed index.
31*/
[568]32class CClientServerMappingDistributed : public CClientServerMapping
33{
34  public:
35    /** Default constructor */
36    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer,
[585]37                                    const MPI_Comm& clientIntraComm, bool isDataDistributed = true);
[568]38
[584]39    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer);
[569]40
[584]41    std::vector<int> computeConnectedServerRank(const CArray<size_t,1> globalIndexClient);
42
[568]43    /** Default destructor */
44    virtual ~CClientServerMappingDistributed();
45
[584]46
47
[568]48  protected:
[569]49    // Redistribute global index and server index among clients
[568]50    void computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
51                                       const MPI_Comm& clientIntraComm);
52
[569]53    // Send server index to clients
[568]54    void sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer,
55                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
56
[569]57    // Send global index to clients
[568]58    void sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
59                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
60
[569]61    // Verify sending request
62    void testSendRequest(std::list<MPI_Request>& sendRequest);
[568]63
[569]64    // Process request
65    void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count);
66
67    // Probe and receive message of global index
[568]68    void probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount);
69
[569]70    // Probe and receive message of server index
[568]71    void probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount);
72
[569]73    // Compute range of hashing
74    void computeHashIndex();
75
76    // Compute size of receiving buffer for global index
[568]77    int computeBuffCountIndexGlobal(MPI_Request& requestRecv);
78
[569]79    // Compute size of receiving buffer for server index
[568]80    int computeBuffCountIndexServer(MPI_Request& requestRecv);
[569]81
82    // Reset request map
83    void resetReceivingRequestAndCount();
84
[620]85  protected:
[569]86    //! Mapping of global index to the corresponding server
[568]87    boost::unordered_map<size_t,int> globalIndexToServerMapping_;
88
[569]89    //! Bounds of hash index
[568]90    std::vector<size_t> indexClientHash_;
91
[569]92    //! Number of client
[568]93    int nbClient_;
94
[569]95    //! Rank of client
[568]96    int clientRank_;
97
[569]98    //! Counting of buffer for receiving global index
[568]99    int countIndexGlobal_;
100
[569]101    //! Counting of buffer for receiving server index
[568]102    int countIndexServer_;
103
[569]104    //! intracommuntion of clients
[568]105    MPI_Comm clientIntraComm_;
106
[569]107    //! Request returned by MPI_IRecv function about global index
108    std::map<int, MPI_Request> requestRecvIndexGlobal_;
109
110    //! Request returned by MPI_IRecv function about index of server
111    std::map<int, MPI_Request> requestRecvIndexServer_;
112
113    //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client
[568]114    std::map<int, unsigned long*> indexGlobalBuffBegin_;
115
[569]116    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client
[568]117    std::map<int, int*> indexServerBuffBegin_;
[585]118
119    //! Flag to specify whether data is distributed or not
120    bool isDataDistributed_;
[720]121
[721]122//    CClientClientDHTTemplate<int>* ccDHT_;
123    CClientClientDHTInt* ccDHT_;
[568]124};
125
126} // namespace xios
127#endif // __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
Note: See TracBrowser for help on using the repository browser.