Ignore:
Timestamp:
01/22/19 16:43:32 (5 years ago)
Author:
yushan
Message:

revert erroneous commit on trunk

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/extern/remap/src/mapper.cpp

    r1638 r1639  
    3232 
    3333  int mpiRank, mpiSize; 
    34   ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
    35   ep_lib::MPI_Comm_size(communicator, &mpiSize); 
     34  MPI_Comm_rank(communicator, &mpiRank); 
     35  MPI_Comm_size(communicator, &mpiSize); 
    3636 
    3737  sourceElements.reserve(nbCells); 
     
    4343    long int offset ; 
    4444    long int nb=nbCells ; 
    45     ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 
     45    MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ; 
    4646    offset=offset-nb ; 
    4747    for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; 
     
    7070 
    7171  int mpiRank, mpiSize; 
    72   ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
    73   ep_lib::MPI_Comm_size(communicator, &mpiSize); 
     72  MPI_Comm_rank(communicator, &mpiRank); 
     73  MPI_Comm_size(communicator, &mpiSize); 
    7474 
    7575  targetElements.reserve(nbCells); 
     
    8181    long int offset ; 
    8282    long int nb=nbCells ; 
    83     ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 
     83    MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ; 
    8484    offset=offset-nb ; 
    8585    for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; 
     
    117117  vector<double> timings; 
    118118  int mpiSize, mpiRank; 
    119   ep_lib::MPI_Comm_size(communicator, &mpiSize); 
    120   ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
     119  MPI_Comm_size(communicator, &mpiSize); 
     120  MPI_Comm_rank(communicator, &mpiRank); 
    121121 
    122122  this->buildSSTree(sourceMesh, targetMesh); 
     
    173173{ 
    174174  int mpiSize, mpiRank; 
    175   ep_lib::MPI_Comm_size(communicator, &mpiSize); 
    176   ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
     175  MPI_Comm_size(communicator, &mpiSize); 
     176  MPI_Comm_rank(communicator, &mpiRank); 
    177177 
    178178  /* create list of intersections (super mesh elements) for each rank */ 
     
    235235  /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 
    236236  int *nbRecvElement = new int[mpiSize]; 
    237   ep_lib::MPI_Alltoall(nbSendElement, 1, EP_INT, nbRecvElement, 1, EP_INT, communicator); 
     237  MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 
    238238 
    239239  /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 
     
    246246  Coord **sendGrad = new Coord*[mpiSize]; 
    247247  GloId **sendNeighIds = new GloId*[mpiSize]; 
    248   ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[5*mpiSize]; 
    249   ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[5*mpiSize]; 
     248  MPI_Request *sendRequest = new MPI_Request[5*mpiSize]; 
     249  MPI_Request *recvRequest = new MPI_Request[5*mpiSize]; 
    250250  for (int rank = 0; rank < mpiSize; rank++) 
    251251  { 
    252252    if (nbSendElement[rank] > 0) 
    253253    { 
    254       ep_lib::MPI_Issend(sendElement[rank], nbSendElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     254      MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    255255      nbSendRequest++; 
    256256    } 
     
    271271        sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 
    272272      } 
    273       ep_lib::MPI_Irecv(recvElement[rank], nbRecvElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     273      MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    274274      nbRecvRequest++; 
    275275    } 
    276276  } 
    277   ep_lib::MPI_Status *status = new ep_lib::MPI_Status[5*mpiSize]; 
     277  MPI_Status *status = new MPI_Status[5*mpiSize]; 
    278278   
    279   ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 
    280         ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 
     279  MPI_Waitall(nbSendRequest, sendRequest, status); 
     280        MPI_Waitall(nbRecvRequest, recvRequest, status); 
    281281 
    282282  /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 
     
    310310          sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 
    311311      } 
    312       ep_lib::MPI_Issend(sendValue[rank],  nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     312      MPI_Issend(sendValue[rank],  nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    313313      nbSendRequest++; 
    314       ep_lib::MPI_Issend(sendArea[rank],  nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     314      MPI_Issend(sendArea[rank],  nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    315315      nbSendRequest++; 
    316       ep_lib::MPI_Issend(sendGivenArea[rank],  nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     316      MPI_Issend(sendGivenArea[rank],  nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    317317      nbSendRequest++; 
    318318      if (order == 2) 
    319319      { 
    320         ep_lib::MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     320        MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    321321        nbSendRequest++; 
    322         ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     322        MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    323323//ym  --> attention taille GloId 
    324324        nbSendRequest++; 
     
    326326      else 
    327327      { 
    328         ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     328        MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    329329//ym  --> attention taille GloId 
    330330        nbSendRequest++; 
     
    333333    if (nbSendElement[rank] > 0) 
    334334    { 
    335       ep_lib::MPI_Irecv(recvValue[rank],  nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     335      MPI_Irecv(recvValue[rank],  nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    336336      nbRecvRequest++; 
    337       ep_lib::MPI_Irecv(recvArea[rank],  nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     337      MPI_Irecv(recvArea[rank],  nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    338338      nbRecvRequest++; 
    339       ep_lib::MPI_Irecv(recvGivenArea[rank],  nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     339      MPI_Irecv(recvGivenArea[rank],  nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    340340      nbRecvRequest++; 
    341341      if (order == 2) 
    342342      { 
    343         ep_lib::MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 
    344             EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     343        MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 
     344            MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    345345        nbRecvRequest++; 
    346         ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     346        MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    347347//ym  --> attention taille GloId 
    348348        nbRecvRequest++; 
     
    350350      else 
    351351      { 
    352         ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     352        MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    353353//ym  --> attention taille GloId 
    354354        nbRecvRequest++; 
     
    357357  } 
    358358         
    359         ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 
    360   ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 
     359        MPI_Waitall(nbSendRequest, sendRequest, status); 
     360  MPI_Waitall(nbRecvRequest, recvRequest, status); 
    361361   
    362362 
     
    487487{ 
    488488  int mpiSize, mpiRank; 
    489   ep_lib::MPI_Comm_size(communicator, &mpiSize); 
    490   ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
     489  MPI_Comm_size(communicator, &mpiSize); 
     490  MPI_Comm_rank(communicator, &mpiRank); 
    491491 
    492492  vector<Node> *routingList = new vector<Node>[mpiSize]; 
     
    522522  } 
    523523 
    524   ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 
    525   ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 
     524  MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 
     525  MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 
    526526 
    527527  char **sendBuffer = new char*[mpiSize]; 
     
    549549  int nbSendRequest = 0; 
    550550  int nbRecvRequest = 0; 
    551   ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 
    552   ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 
    553   ep_lib::MPI_Status  *status      = new ep_lib::MPI_Status[mpiSize]; 
     551  MPI_Request *sendRequest = new MPI_Request[mpiSize]; 
     552  MPI_Request *recvRequest = new MPI_Request[mpiSize]; 
     553  MPI_Status  *status      = new MPI_Status[mpiSize]; 
    554554 
    555555  for (int rank = 0; rank < mpiSize; rank++) 
     
    557557    if (nbSendNode[rank] > 0) 
    558558    { 
    559       ep_lib::MPI_Issend(sendBuffer[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     559      MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    560560      nbSendRequest++; 
    561561    } 
    562562    if (nbRecvNode[rank] > 0) 
    563563    { 
    564       ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     564      MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    565565      nbRecvRequest++; 
    566566    } 
    567567  } 
    568568 
    569   ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 
    570   ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 
     569  MPI_Waitall(nbRecvRequest, recvRequest, status); 
     570  MPI_Waitall(nbSendRequest, sendRequest, status); 
    571571 
    572572  for (int rank = 0; rank < mpiSize; rank++) 
     
    615615 
    616616 
    617   ep_lib::MPI_Barrier(communicator); 
    618   ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 
    619   ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 
     617  MPI_Barrier(communicator); 
     618  MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 
     619  MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 
    620620 
    621621  for (int rank = 0; rank < mpiSize; rank++) 
     
    629629    if (nbSendNode[rank] > 0) 
    630630    { 
    631       ep_lib::MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     631      MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    632632      nbSendRequest++; 
    633633    } 
    634634    if (nbRecvNode[rank] > 0) 
    635635    { 
    636       ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     636      MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    637637      nbRecvRequest++; 
    638638    } 
    639639  } 
    640640 
    641   ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 
    642   ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 
     641  MPI_Waitall(nbRecvRequest, recvRequest, status); 
     642  MPI_Waitall(nbSendRequest, sendRequest, status); 
    643643 
    644644  int nbNeighbourNodes = 0; 
     
    725725{ 
    726726  int mpiSize, mpiRank; 
    727   ep_lib::MPI_Comm_size(communicator, &mpiSize); 
    728   ep_lib::MPI_Comm_rank(communicator, &mpiRank); 
    729  
    730   ep_lib::MPI_Barrier(communicator); 
     727  MPI_Comm_size(communicator, &mpiSize); 
     728  MPI_Comm_rank(communicator, &mpiRank); 
     729 
     730  MPI_Barrier(communicator); 
    731731 
    732732  vector<Node> *routingList = new vector<Node>[mpiSize]; 
     
    753753    cout << endl; 
    754754  } 
    755   ep_lib::MPI_Barrier(communicator); 
     755  MPI_Barrier(communicator); 
    756756 
    757757  int *nbSendNode = new int[mpiSize]; 
     
    771771  } 
    772772 
    773   ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 
    774   ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 
     773  MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 
     774  MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 
    775775 
    776776  int total = 0; 
     
    805805  int nbSendRequest = 0; 
    806806  int nbRecvRequest = 0; 
    807   ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 
    808   ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 
    809   ep_lib::MPI_Status   *status = new ep_lib::MPI_Status[mpiSize]; 
     807  MPI_Request *sendRequest = new MPI_Request[mpiSize]; 
     808  MPI_Request *recvRequest = new MPI_Request[mpiSize]; 
     809  MPI_Status   *status = new MPI_Status[mpiSize]; 
    810810 
    811811  for (int rank = 0; rank < mpiSize; rank++) 
     
    813813    if (nbSendNode[rank] > 0) 
    814814    { 
    815       ep_lib::MPI_Issend(sendBuffer[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     815      MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    816816      nbSendRequest++; 
    817817    } 
    818818    if (nbRecvNode[rank] > 0) 
    819819    { 
    820       ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     820      MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    821821      nbRecvRequest++; 
    822822    } 
    823823  } 
    824824 
    825   ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 
    826   ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 
     825  MPI_Waitall(nbRecvRequest, recvRequest, status); 
     826  MPI_Waitall(nbSendRequest, sendRequest, status); 
    827827  char **sendBuffer2 = new char*[mpiSize]; 
    828828  char **recvBuffer2 = new char*[mpiSize]; 
     
    883883 
    884884  if (verbose >= 2) cout << "Rank " << mpiRank << "  Compute (internal) intersection " << cputime() - tic << " s" << endl; 
    885   ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 
     885  MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 
    886886 
    887887  for (int rank = 0; rank < mpiSize; rank++) 
     
    896896    if (sentMessageSize[rank] > 0) 
    897897    { 
    898       ep_lib::MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
     898      MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 
    899899      nbSendRequest++; 
    900900    } 
    901901    if (recvMessageSize[rank] > 0) 
    902902    { 
    903       ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
     903      MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 
    904904      nbRecvRequest++; 
    905905    } 
    906906  } 
    907907 
    908   ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 
    909   ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 
     908  MPI_Waitall(nbRecvRequest, recvRequest, status); 
     909  MPI_Waitall(nbSendRequest, sendRequest, status); 
    910910 
    911911  delete [] sendRequest; 
Note: See TracChangeset for help on using the changeset viewer.