Changeset 1638 for XIOS/trunk/src/node
- Timestamp:
- 01/22/19 16:15:03 (6 years ago)
- Location:
- XIOS/trunk/src/node
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/node/axis.cpp
r1637 r1638 130 130 \return the number of indexes written by each server 131 131 */ 132 int CAxis::getNumberWrittenIndexes( MPI_Comm writtenCom)132 int CAxis::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 133 133 TRY 134 134 { 135 135 int writtenSize; 136 MPI_Comm_size(writtenCom, &writtenSize);136 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 137 137 return numberWrittenIndexes_[writtenSize]; 138 138 } … … 143 143 \return the total number of indexes written by the servers 144 144 */ 145 int CAxis::getTotalNumberWrittenIndexes( MPI_Comm writtenCom)145 int CAxis::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 146 146 TRY 147 147 { 148 148 int writtenSize; 149 MPI_Comm_size(writtenCom, &writtenSize);149 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 150 150 return totalNumberWrittenIndexes_[writtenSize]; 151 151 } … … 156 156 \return the offset of indexes written by each server 157 157 */ 158 int CAxis::getOffsetWrittenIndexes( MPI_Comm writtenCom)158 int CAxis::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 159 159 TRY 160 160 { 161 161 int writtenSize; 162 MPI_Comm_size(writtenCom, &writtenSize);162 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 163 163 return offsetWrittenIndexes_[writtenSize]; 164 164 } 165 165 CATCH_DUMP_ATTR 166 166 167 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer( MPI_Comm writtenCom)167 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 168 168 TRY 169 169 { 170 170 int writtenSize; 171 MPI_Comm_size(writtenCom, &writtenSize);171 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 172 172 return compressedIndexToWriteOnServer[writtenSize]; 173 173 } … … 768 768 CATCH_DUMP_ATTR 769 769 770 void CAxis::computeWrittenCompressedIndex( MPI_Comm writtenComm)770 void CAxis::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 771 771 TRY 772 772 { 773 773 int writtenCommSize; 774 MPI_Comm_size(writtenComm, &writtenCommSize);774 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 775 775 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 776 776 return; … … 850 850 { 851 851 852 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);853 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);852 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 853 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 854 854 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 855 855 } -
XIOS/trunk/src/node/axis.hpp
r1637 r1638 68 68 const std::set<StdString> & getRelFiles(void) const; 69 69 70 int getNumberWrittenIndexes( MPI_Comm writtenCom);71 int getTotalNumberWrittenIndexes( MPI_Comm writtenCom);72 int getOffsetWrittenIndexes( MPI_Comm writtenCom);73 CArray<int, 1>& getCompressedIndexToWriteOnServer( MPI_Comm writtenCom);70 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 71 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 72 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 73 CArray<int, 1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 74 74 75 75 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, … … 113 113 114 114 void computeWrittenIndex(); 115 void computeWrittenCompressedIndex( MPI_Comm);115 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 116 116 bool hasTransformation(); 117 117 void solveInheritanceTransformation(); -
XIOS/trunk/src/node/context.cpp
r1622 r1638 265 265 266 266 //! Initialize client side 267 void CContext::initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer /*= 0*/)267 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 268 268 TRY 269 269 { 270 270 271 271 hasClient = true; 272 MPI_Comm intraCommServer, interCommServer;272 ep_lib::MPI_Comm intraCommServer, interCommServer; 273 273 274 274 … … 284 284 else 285 285 { 286 MPI_Comm_dup(intraComm, &intraCommServer);286 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 287 287 comms.push_back(intraCommServer); 288 MPI_Comm_dup(interComm, &interCommServer);288 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 289 289 comms.push_back(interCommServer); 290 290 } … … 309 309 { 310 310 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 311 MPI_Comm_dup(intraComm, &intraCommServer);311 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 312 312 comms.push_back(intraCommServer); 313 MPI_Comm_dup(interComm, &interCommServer);313 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 314 314 comms.push_back(interCommServer); 315 315 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); … … 383 383 CATCH_DUMP_ATTR 384 384 385 void CContext::initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/)385 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 386 386 TRY 387 387 { … … 402 402 registryOut->setPath(contextRegistryId) ; 403 403 404 MPI_Comm intraCommClient, interCommClient;404 ep_lib::MPI_Comm intraCommClient, interCommClient; 405 405 if (cxtClient) // Attached mode 406 406 { … … 410 410 else 411 411 { 412 MPI_Comm_dup(intraComm, &intraCommClient);412 ep_lib::MPI_Comm_dup(intraComm, &intraCommClient); 413 413 comms.push_back(intraCommClient); 414 MPI_Comm_dup(interComm, &interCommClient);414 ep_lib::MPI_Comm_dup(interComm, &interCommClient); 415 415 comms.push_back(interCommClient); 416 416 } … … 502 502 503 503 //! Free internally allocated communicators 504 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)505 MPI_Comm_free(&(*it));504 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 505 ep_lib::MPI_Comm_free(&(*it)); 506 506 comms.clear(); 507 507 … … 544 544 545 545 //! Free internally allocated communicators 546 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)547 MPI_Comm_free(&(*it));546 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 547 ep_lib::MPI_Comm_free(&(*it)); 548 548 comms.clear(); 549 549 … … 560 560 TRY 561 561 { 562 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)563 MPI_Comm_free(&(*it));562 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 563 ep_lib::MPI_Comm_free(&(*it)); 564 564 comms.clear(); 565 565 } -
XIOS/trunk/src/node/context.hpp
r1622 r1638 88 88 public : 89 89 // Initialize server or client 90 void initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer = 0);91 void initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient = 0);90 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 91 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 92 92 bool isInitialized(void); 93 93 … … 263 263 StdString idServer_; 264 264 CGarbageCollector garbageCollector; 265 std::list< MPI_Comm> comms; //!< Communicators allocated internally265 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 266 266 267 267 public: // Some function maybe removed in the near future -
XIOS/trunk/src/node/domain.cpp
r1637 r1638 99 99 \return the number of indexes written by each server 100 100 */ 101 int CDomain::getNumberWrittenIndexes( MPI_Comm writtenCom)101 int CDomain::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 102 102 TRY 103 103 { 104 104 int writtenSize; 105 MPI_Comm_size(writtenCom, &writtenSize);105 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 106 106 return numberWrittenIndexes_[writtenSize]; 107 107 } … … 112 112 \return the total number of indexes written by the servers 113 113 */ 114 int CDomain::getTotalNumberWrittenIndexes( MPI_Comm writtenCom)114 int CDomain::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 115 115 TRY 116 116 { 117 117 int writtenSize; 118 MPI_Comm_size(writtenCom, &writtenSize);118 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 119 119 return totalNumberWrittenIndexes_[writtenSize]; 120 120 } … … 125 125 \return the offset of indexes written by each server 126 126 */ 127 int CDomain::getOffsetWrittenIndexes( MPI_Comm writtenCom)127 int CDomain::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 128 128 TRY 129 129 { 130 130 int writtenSize; 131 MPI_Comm_size(writtenCom, &writtenSize);131 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 132 132 return offsetWrittenIndexes_[writtenSize]; 133 133 } 134 134 CATCH_DUMP_ATTR 135 135 136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer( MPI_Comm writtenCom)136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 137 137 TRY 138 138 { 139 139 int writtenSize; 140 MPI_Comm_size(writtenCom, &writtenSize);140 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 141 141 return compressedIndexToWriteOnServer[writtenSize]; 142 142 } … … 690 690 int v ; 691 691 v=ibegin ; 692 MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ;692 ep_lib::MPI_Allgather(&v,1,EP_INT,ibegin_g,1,EP_INT,client->intraComm) ; 693 693 v=jbegin ; 694 MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ;694 ep_lib::MPI_Allgather(&v,1,EP_INT,jbegin_g,1,EP_INT,client->intraComm) ; 695 695 v=ni ; 696 MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ;696 ep_lib::MPI_Allgather(&v,1,EP_INT,ni_g,1,EP_INT,client->intraComm) ; 697 697 v=nj ; 698 MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ;699 700 MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ;701 MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ;698 ep_lib::MPI_Allgather(&v,1,EP_INT,nj_g,1,EP_INT,client->intraComm) ; 699 700 ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,EP_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,EP_DOUBLE,client->intraComm) ; 701 ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,EP_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,EP_DOUBLE,client->intraComm) ; 702 702 703 703 delete[] ibegin_g ; … … 1932 1932 displs[0] = 0; 1933 1933 int localCount = connectedServerRank_[nbServer].size() ; 1934 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ;1934 ep_lib::MPI_Gather(&localCount, 1, EP_INT, &counts[0], 1, EP_INT, 0, client->intraComm) ; 1935 1935 for (int i = 0; i < clientSize-1; ++i) 1936 1936 { … … 1938 1938 } 1939 1939 std::vector<int> allConnectedServers(displs[clientSize-1]+counts[clientSize-1]); 1940 MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm);1940 ep_lib::MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, EP_INT, &allConnectedServers[0], &counts[0], &displs[0], EP_INT, 0, client->intraComm); 1941 1941 1942 1942 if ((allConnectedServers.size() != nbServer) && (rank == 0)) … … 2003 2003 CATCH_DUMP_ATTR 2004 2004 2005 void CDomain::computeWrittenCompressedIndex( MPI_Comm writtenComm)2005 void CDomain::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 2006 2006 TRY 2007 2007 { 2008 2008 int writtenCommSize; 2009 MPI_Comm_size(writtenComm, &writtenCommSize);2009 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 2010 2010 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 2011 2011 return; … … 2064 2064 { 2065 2065 2066 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);2067 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);2066 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 2067 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 2068 2068 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 2069 2069 } -
XIOS/trunk/src/node/domain.hpp
r1637 r1638 94 94 bool isWrittenCompressed(const StdString& filename) const; 95 95 96 int getNumberWrittenIndexes( MPI_Comm writtenCom);97 int getTotalNumberWrittenIndexes( MPI_Comm writtenCom);98 int getOffsetWrittenIndexes( MPI_Comm writtenCom);99 CArray<int,1>& getCompressedIndexToWriteOnServer( MPI_Comm writtenCom);96 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 97 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 98 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 99 CArray<int,1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 100 100 101 101 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); … … 116 116 117 117 void computeWrittenIndex(); 118 void computeWrittenCompressedIndex( MPI_Comm);118 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 119 119 120 120 void AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, -
XIOS/trunk/src/node/field.cpp
r1637 r1638 531 531 if (!nstepMaxRead) 532 532 { 533 #ifdef _usingMPI 533 534 MPI_Allreduce(MPI_IN_PLACE, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 535 #elif _usingEP 536 ep_lib::MPI_Allreduce(&nstepMax, &nstepMax, 1, EP_INT, EP_MAX, context->server->intraComm); 537 #endif 534 538 nstepMaxRead = true; 535 539 } -
XIOS/trunk/src/node/file.cpp
r1622 r1638 25 25 CFile::CFile(void) 26 26 : CObjectTemplate<CFile>(), CFileAttributes() 27 , vFieldGroup(), data_out(), enabledFields(), fileComm( MPI_COMM_NULL)27 , vFieldGroup(), data_out(), enabledFields(), fileComm(EP_COMM_NULL) 28 28 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 29 29 { … … 34 34 CFile::CFile(const StdString & id) 35 35 : CObjectTemplate<CFile>(id), CFileAttributes() 36 , vFieldGroup(), data_out(), enabledFields(), fileComm( MPI_COMM_NULL)36 , vFieldGroup(), data_out(), enabledFields(), fileComm(EP_COMM_NULL) 37 37 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 38 38 { … … 307 307 308 308 int color = allZoneEmpty ? 0 : 1; 309 MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm);310 if (allZoneEmpty) MPI_Comm_free(&fileComm);309 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 310 if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm); 311 311 } 312 312 CATCH_DUMP_ATTR … … 554 554 { 555 555 int commSize, commRank; 556 MPI_Comm_size(fileComm, &commSize);557 MPI_Comm_rank(fileComm, &commRank);556 ep_lib::MPI_Comm_size(fileComm, &commSize); 557 ep_lib::MPI_Comm_rank(fileComm, &commRank); 558 558 559 559 if (server->intraCommSize > 1) … … 634 634 CContext* context = CContext::getCurrent(); 635 635 CContextServer* server = context->server; 636 MPI_Comm readComm = this->fileComm;636 ep_lib::MPI_Comm readComm = this->fileComm; 637 637 638 638 if (!allZoneEmpty) … … 677 677 { 678 678 int commSize, commRank; 679 MPI_Comm_size(readComm, &commSize);680 MPI_Comm_rank(readComm, &commRank);679 ep_lib::MPI_Comm_size(readComm, &commSize); 680 ep_lib::MPI_Comm_rank(readComm, &commRank); 681 681 682 682 if (server->intraCommSize > 1) … … 722 722 isOpen = false; 723 723 } 724 if (fileComm != MPI_COMM_NULL)MPI_Comm_free(&fileComm);724 if (fileComm != EP_COMM_NULL) ep_lib::MPI_Comm_free(&fileComm); 725 725 } 726 726 CATCH_DUMP_ATTR -
XIOS/trunk/src/node/file.hpp
r1622 r1638 175 175 int nbAxis, nbDomains; 176 176 bool isOpen; 177 MPI_Comm fileComm;177 ep_lib::MPI_Comm fileComm; 178 178 179 179 private: -
XIOS/trunk/src/node/grid.cpp
r1637 r1638 661 661 { 662 662 CContextServer* server = CContext::getCurrent()->server; 663 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);664 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);663 ep_lib::MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, EP_INT, EP_SUM, server->intraComm); 664 ep_lib::MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, EP_INT, EP_SUM, server->intraComm); 665 665 offsetWrittenIndexes_ -= numberWrittenIndexes_; 666 666 } … … 856 856 displs[0] = 0; 857 857 int localCount = connectedServerRank_[receiverSize].size() ; 858 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ;858 ep_lib::MPI_Gather(&localCount, 1, EP_INT, &counts[0], 1, EP_INT, 0, client->intraComm) ; 859 859 for (int i = 0; i < client->clientSize-1; ++i) 860 860 { … … 862 862 } 863 863 std::vector<int> allConnectedServers(displs[client->clientSize-1]+counts[client->clientSize-1]); 864 MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm);864 ep_lib::MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, EP_INT, &allConnectedServers[0], &counts[0], &displs[0], EP_INT, 0, client->intraComm); 865 865 866 866 if ((allConnectedServers.size() != receiverSize) && (client->clientRank == 0)) -
XIOS/trunk/src/node/mesh.cpp
r1542 r1638 414 414 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 415 415 */ 416 void CMesh::createMeshEpsilon(const MPI_Comm& comm,416 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 417 417 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 418 418 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 421 421 int nvertex = (bounds_lon.numElements() == 0) ? 1 : bounds_lon.rows(); 422 422 int mpiRank, mpiSize; 423 MPI_Comm_rank(comm, &mpiRank);424 MPI_Comm_size(comm, &mpiSize);423 ep_lib::MPI_Comm_rank(comm, &mpiRank); 424 ep_lib::MPI_Comm_size(comm, &mpiSize); 425 425 double prec = 1e-11; // used in calculations of edge_lon/lat 426 426 … … 460 460 unsigned long nbEdgesOnProc = nbEdges_; 461 461 unsigned long nbEdgesAccum; 462 MPI_Scan(&nbEdgesOnProc, &nbEdgesAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);462 ep_lib::MPI_Scan(&nbEdgesOnProc, &nbEdgesAccum, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 463 463 nbEdgesAccum -= nbEdges_; 464 464 … … 590 590 unsigned long nodeCount = nodeIdx2Idx.size(); 591 591 unsigned long nodeStart, nbNodes; 592 MPI_Scan(&nodeCount, &nodeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);592 ep_lib::MPI_Scan(&nodeCount, &nodeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 593 593 int nNodes = nodeStart; 594 MPI_Bcast(&nNodes, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);594 ep_lib::MPI_Bcast(&nNodes, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 595 595 nbNodesGlo = nNodes; 596 596 … … 683 683 unsigned long nbFacesOnProc = nbFaces_; 684 684 unsigned long nbFacesAccum; 685 MPI_Scan(&nbFacesOnProc, &nbFacesAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);685 ep_lib::MPI_Scan(&nbFacesOnProc, &nbFacesAccum, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 686 686 nbFacesAccum -= nbFaces_; 687 687 … … 807 807 808 808 unsigned long edgeStart, nbEdges; 809 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);809 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 810 810 int nEdges = edgeStart; 811 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);811 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 812 812 nbEdgesGlo = nEdges; 813 813 … … 1028 1028 unsigned long edgeCount = edgeIdx2Idx.size(); 1029 1029 unsigned long edgeStart, nbEdges; 1030 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);1030 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 1031 1031 int nEdges = edgeStart; 1032 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);1032 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 1033 1033 nbEdgesGlo = nEdges; 1034 1034 … … 1298 1298 unsigned long nodeCount = nodeIdx2Idx.size(); 1299 1299 unsigned long nodeStart, nbNodes; 1300 MPI_Scan(&nodeCount, &nodeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);1300 ep_lib::MPI_Scan(&nodeCount, &nodeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 1301 1301 int nNodes = nodeStart; 1302 MPI_Bcast(&nNodes, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);1302 ep_lib::MPI_Bcast(&nNodes, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 1303 1303 nbNodesGlo = nNodes; 1304 1304 … … 1418 1418 unsigned long edgeCount = edgeIdx2Idx.size(); 1419 1419 unsigned long edgeStart, nbEdges; 1420 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);1420 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 1421 1421 int nEdges = edgeStart; 1422 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);1422 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 1423 1423 nbEdgesGlo = nEdges; 1424 1424 … … 1614 1614 */ 1615 1615 1616 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1616 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1617 1617 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1618 1618 CArray<int, 2>& nghbFaces) … … 1623 1623 1624 1624 int mpiRank, mpiSize; 1625 MPI_Comm_rank(comm, &mpiRank);1626 MPI_Comm_size(comm, &mpiSize);1625 ep_lib::MPI_Comm_rank(comm, &mpiRank); 1626 ep_lib::MPI_Comm_size(comm, &mpiSize); 1627 1627 1628 1628 // (1) Generating unique node indexes … … 1770 1770 */ 1771 1771 1772 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1772 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1773 1773 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1774 1774 CArray<int, 2>& nghbFaces) … … 1779 1779 1780 1780 int mpiRank, mpiSize; 1781 MPI_Comm_rank(comm, &mpiRank);1782 MPI_Comm_size(comm, &mpiSize);1781 ep_lib::MPI_Comm_rank(comm, &mpiRank); 1782 ep_lib::MPI_Comm_size(comm, &mpiSize); 1783 1783 1784 1784 // (1) Generating unique node indexes … … 1951 1951 */ 1952 1952 1953 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm,1953 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 1954 1954 const CArray<int, 1>& face_idx, 1955 1955 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/trunk/src/node/mesh.hpp
r1542 r1638 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const MPI_Comm&,62 void createMeshEpsilon(const ep_lib::MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 87 87 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 88 88 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 89 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);90 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);89 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 90 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 91 91 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 92 92 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&);
Note: See TracChangeset
for help on using the changeset viewer.