source: XIOS/trunk/src/client_client_dht_template.hpp @ 833

Last change on this file since 833 was 833, checked in by mhnguyen, 8 years ago

Improvements for dht

+) Implement adaptive hierarchy for dht, level of hierarchy depends on number of processes
+) Remove some redundant codes

Test
+) On Curie
+) All tests are correct

File size: 4.6 KB
RevLine 
[721]1/*!
2   \file client_client_dht_template.hpp
3   \author Ha NGUYEN
4   \since 01 Oct 2015
5   \date 06 Oct 2015
6
7   \brief Distributed hashed table implementation.
8 */
9
10#ifndef __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
11#define __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
12
13#include "xios_spl.hpp"
14#include "array_new.hpp"
15#include "mpi.hpp"
16#include "policy.hpp"
17#include <boost/unordered_map.hpp>
[829]18#include "dht_data_types.hpp"
[721]19
20namespace xios
21{
[833]22template<typename T, class HierarchyPolicy = DivideAdaptiveComm> class CClientClientDHTTemplate;
[721]23
24/*!
25  \class CClientClientDHTTemplate
26  This class provides the similar features like \class CClientServerMappingDistributed,
27which implements a simple distributed hashed table; Moreover, by extending with hierarchical structure,
28it allows to reduce greatly the number of communication among processes.
29*/
30template<typename T, typename HierarchyPolicy>
31class CClientClientDHTTemplate: public HierarchyPolicy
32{
33  public:
34    typedef T InfoType;
[727]35    static const int infoTypeSize = sizeof(InfoType);
[829]36    typedef typename boost::unordered_map<InfoType, std::vector<size_t> > InfoType2IndexMap;
37    typedef typename boost::unordered_map<size_t,InfoType> Index2InfoTypeMap;
[721]38
39  public:
[829]40    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap,
[721]41                             const MPI_Comm& clientIntraComm,
42                             int hierarLvl = 2);
43
44    void computeIndexInfoMapping(const CArray<size_t,1>& indices);
45
[830]46    const Index2InfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; }
[721]47
48    /** Default destructor */
49    virtual ~CClientClientDHTTemplate();
50
51  protected:
52    // Redistribute index and info among clients
[829]53    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap,
[721]54                                 const MPI_Comm& intraCommLevel,
55                                 int level);
56
57    void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient);
58
59    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices,
60                                      const MPI_Comm& intraCommLevel,
61                                      int level);
62
[833]63    void computeSendRecvRank(int level, int rank);
64
65    void sendRecvRank(int level,
66                      const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements,
67                      int& recvNbRank, int& recvNbElements);
68
[721]69  protected:
70    void probeIndexMessageFromClients(unsigned long* recvIndexGlobalBuff,
71                                      const int recvNbIndexCount,
72                                      int& countIndexGlobal,
73                                      std::map<int, unsigned long*>& indexGlobalBuffBegin,
74                                      std::map<int, MPI_Request>& requestRecvIndexGlobal,
75                                      const MPI_Comm& intraComm);
76
[829]77    void probeInfoMessageFromClients(unsigned char* recvIndexServerBuff,
[721]78                                     const int recvNbIndexCount,
79                                     int& countIndexServer,
[830]80                                     std::map<int, unsigned char*>& infoBuffBegin,
[721]81                                     std::map<int, MPI_Request>& requestRecvIndexServer,
82                                     const MPI_Comm& intraComm);
83
[830]84    // Send information to clients
85    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize,
[721]86                           const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
87
88    // Send global index to clients
[833]89    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize,
[721]90                            const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
91
92    // Verify sending request
93    void testSendRequest(std::list<MPI_Request>& sendRequest);
94
95    // Compute size of receiving buffer for global index
96    int computeBuffCountIndex(MPI_Request& requestRecv);
97
98    // Compute size of receiving buffer for server index
99    int computeBuffCountInfo(MPI_Request& requestRecv);
100
101  protected:
102    //! Mapping of global index to the corresponding client
[829]103    Index2InfoTypeMap index2InfoMapping_;
[721]104
[830]105    //! A mapping of index to the corresponding information in each level of hierarchy
[829]106    Index2InfoTypeMap indexToInfoMappingLevel_;
[721]107
[833]108    std::vector<std::vector<int> > sendRank_;
[721]109
[833]110    std::vector<std::vector<int> > recvRank_;
111
[721]112    //! Flag to specify whether data is distributed or not
113    bool isDataDistributed_;
114};
115
116typedef CClientClientDHTTemplate<int> CClientClientDHTInt;
[829]117typedef CClientClientDHTTemplate<PairIntInt> CClientClientDHTPairIntInt;
[721]118
119} // namespace xios
120#endif // __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
Note: See TracBrowser for help on using the repository browser.