Ignore:
Timestamp:
05/15/19 17:19:08 (5 years ago)
Author:
yushan
Message:

MARK: branch merged with trunk @1660. Test (test_complete, test_remap) on ADA with IntelMPI and _usingEP/_usingMPI as switch.

Location:
XIOS/dev/dev_trunk_omp/src/transformation
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.cpp

    r1646 r1661  
    438438 
    439439  ep_lib::MPI_Comm poleComme = MPI_COMM_NULL; 
    440   ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 
     440  MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 
    441441  if (poleComme!=MPI_COMM_NULL) 
    442442  { 
    443443    int nbClientPole; 
    444     ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
     444    MPI_Comm_size(poleComme, &nbClientPole); 
    445445 
    446446    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    453453    std::vector<int> recvCount(nbClientPole,0); 
    454454    std::vector<int> displ(nbClientPole,0); 
    455     ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
     455    MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
     456 
    456457    displ[0]=0; 
    457458    for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; 
     
    475476 
    476477    // Gather all index and weight for pole 
    477     ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
    478     ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
     478    MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
     479    MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
    479480 
    480481    std::map<int,double> recvTemp; 
     
    633634 
    634635 
    635   ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
     636  MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
    636637 
    637638  int* sendIndexDestBuff = new int [sendBuffSize]; 
     
    661662    } 
    662663 
    663     ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 
     664    MPI_Isend(sendIndexDestBuff + sendOffSet, 
    664665             k, 
    665666             MPI_INT, 
     
    668669             client->intraComm, 
    669670             &sendRequest[position++]); 
    670     ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 
     671    MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    671672             k, 
    672673             MPI_INT, 
     
    675676             client->intraComm, 
    676677             &sendRequest[position++]); 
    677     ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 
     678    MPI_Isend(sendWeightBuff + sendOffSet, 
    678679             k, 
    679680             MPI_DOUBLE, 
     
    694695  { 
    695696    ep_lib::MPI_Status recvStatus; 
    696     ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 
     697    MPI_Recv((recvIndexDestBuff + receivedSize), 
    697698             recvBuffSize, 
    698699             MPI_INT, 
     
    710711    #endif 
    711712 
    712     ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 
     713    MPI_Recv((recvIndexSrcBuff + receivedSize), 
    713714             recvBuffSize, 
    714715             MPI_INT, 
     
    718719             &recvStatus); 
    719720 
    720     ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 
     721    MPI_Recv((recvWeightBuff + receivedSize), 
    721722             recvBuffSize, 
    722723             MPI_DOUBLE, 
     
    735736 
    736737  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
    737   ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 
     738  MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 
    738739 
    739740  delete [] sendIndexDestBuff; 
     
    843844  } 
    844845 
    845   ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    846   ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     846  MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     847  MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    847848   
    848849  if (0 == globalNbWeight) 
  • XIOS/dev/dev_trunk_omp/src/transformation/generic_algorithm_transformation.cpp

    r1646 r1661  
    136136      { 
    137137        distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 
    138         ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     138        MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
    139139     
    140140      } 
     
    142142      { 
    143143        distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 
    144         ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     144        MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
    145145      } 
    146146      else //it's a scalar 
     
    238238  int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 
    239239  int recvValue = 0; 
    240   ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
     240  MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
    241241  computeGlobalIndexOnProc = (0 < recvValue); 
    242242 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp

    r1646 r1661  
    514514    sendRankSizeMap[itIndex->first] = sendSize; 
    515515  } 
    516   ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     516  MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
    517517 
    518518  displ[0]=0 ; 
     
    521521  int* recvRankBuff=new int[recvSize]; 
    522522  int* recvSizeBuff=new int[recvSize]; 
    523   ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
    524   ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
     523  MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
     524  MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
    525525  for (int i = 0; i < nbClient; ++i) 
    526526  { 
     
    546546    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    547547 
    548     ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
    549     ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
     548    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
     549    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
    550550  } 
    551551 
     
    582582 
    583583    // Send global index source and mask 
    584     ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
    585     ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
     584    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
     585    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
    586586  } 
    587587 
     
    599599    int recvSize = itSend->second; 
    600600 
    601     ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
     601    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    602602  } 
    603603 
     
    635635 
    636636    // Okie, now inform the destination which source index are masked 
    637     ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
     637    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    638638  } 
    639639  status.resize(requests.size()); 
    640   ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     640  MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    641641 
    642642  // Cool, now we can fill in local index of grid destination (counted for masked index) 
Note: See TracChangeset for help on using the changeset viewer.