1 | #include "mpi.hpp" |
---|
2 | #include <map> |
---|
3 | #include "cputime.hpp" /* time */ |
---|
4 | #include "meshutil.hpp" |
---|
5 | #include "polyg.hpp" |
---|
6 | #include "circle.hpp" |
---|
7 | #include "intersect.hpp" |
---|
8 | #include "intersection_ym.hpp" |
---|
9 | #include "errhandle.hpp" |
---|
10 | #include "mpi_routing.hpp" |
---|
11 | #include "gridRemap.hpp" |
---|
12 | |
---|
13 | #include <fstream> |
---|
14 | #include "client.hpp" |
---|
15 | |
---|
16 | #include "mapper.hpp" |
---|
17 | |
---|
18 | namespace MemTrack |
---|
19 | { |
---|
20 | void TrackDumpBlocks(std::ofstream& memReport, size_t memtrack_blocks, size_t memtrack_size); |
---|
21 | } |
---|
22 | |
---|
23 | namespace sphereRemap { |
---|
24 | |
---|
25 | /* A subdivition of an array into N sub-arays |
---|
26 | can be represented by the length of the N arrays |
---|
27 | or by the offsets when each of the N arrays starts. |
---|
28 | This function convertes from the former to the latter. */ |
---|
29 | void cptOffsetsFromLengths(const int *lengths, int *offsets, int sz) |
---|
30 | { |
---|
31 | offsets[0] = 0; |
---|
32 | for (int i = 1; i < sz; i++) |
---|
33 | offsets[i] = offsets[i-1] + lengths[i-1]; |
---|
34 | } |
---|
35 | |
---|
36 | |
---|
37 | void Mapper::setSourceMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId) |
---|
38 | { |
---|
39 | srcGrid.pole = Coord(pole[0], pole[1], pole[2]); |
---|
40 | |
---|
41 | int mpiRank, mpiSize; |
---|
42 | MPI_Comm_rank(communicator, &mpiRank); |
---|
43 | MPI_Comm_size(communicator, &mpiSize); |
---|
44 | |
---|
45 | sourceElements.reserve(nbCells); |
---|
46 | sourceMesh.reserve(nbCells); |
---|
47 | sourceGlobalId.resize(nbCells) ; |
---|
48 | |
---|
49 | if (globalId==NULL) |
---|
50 | { |
---|
51 | long int offset ; |
---|
52 | long int nb=nbCells ; |
---|
53 | MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ; |
---|
54 | offset=offset-nb ; |
---|
55 | for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; |
---|
56 | } |
---|
57 | else sourceGlobalId.assign(globalId,globalId+nbCells); |
---|
58 | |
---|
59 | for (int i = 0; i < nbCells; i++) |
---|
60 | { |
---|
61 | int offs = i*nVertex; |
---|
62 | Elt elt(boundsLon + offs, boundsLat + offs, nVertex); |
---|
63 | elt.src_id.rank = mpiRank; |
---|
64 | elt.src_id.ind = i; |
---|
65 | elt.src_id.globalId = sourceGlobalId[i]; |
---|
66 | sourceElements.push_back(elt); |
---|
67 | sourceMesh.push_back(make_shared<Node>(elt.x, cptRadius(elt), &sourceElements.back())); |
---|
68 | cptEltGeom(sourceElements[i], Coord(pole[0], pole[1], pole[2])); |
---|
69 | if (area!=NULL) sourceElements[i].given_area=area[i] ; |
---|
70 | else sourceElements[i].given_area=sourceElements[i].area ; |
---|
71 | } |
---|
72 | |
---|
73 | } |
---|
74 | |
---|
75 | void Mapper::setTargetMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId) |
---|
76 | { |
---|
77 | tgtGrid.pole = Coord(pole[0], pole[1], pole[2]); |
---|
78 | |
---|
79 | int mpiRank, mpiSize; |
---|
80 | MPI_Comm_rank(communicator, &mpiRank); |
---|
81 | MPI_Comm_size(communicator, &mpiSize); |
---|
82 | |
---|
83 | targetElements.reserve(nbCells); |
---|
84 | targetMesh.reserve(nbCells); |
---|
85 | |
---|
86 | targetGlobalId.resize(nbCells) ; |
---|
87 | if (globalId==NULL) |
---|
88 | { |
---|
89 | long int offset ; |
---|
90 | long int nb=nbCells ; |
---|
91 | MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ; |
---|
92 | offset=offset-nb ; |
---|
93 | for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; |
---|
94 | } |
---|
95 | else targetGlobalId.assign(globalId,globalId+nbCells); |
---|
96 | |
---|
97 | for (int i = 0; i < nbCells; i++) |
---|
98 | { |
---|
99 | int offs = i*nVertex; |
---|
100 | Elt elt(boundsLon + offs, boundsLat + offs, nVertex); |
---|
101 | targetElements.push_back(elt); |
---|
102 | targetMesh.push_back(make_shared<Node>(elt.x, cptRadius(elt), &sourceElements.back())); |
---|
103 | cptEltGeom(targetElements[i], Coord(pole[0], pole[1], pole[2])); |
---|
104 | if (area!=NULL) targetElements[i].given_area=area[i] ; |
---|
105 | else targetElements[i].given_area=targetElements[i].area ; |
---|
106 | } |
---|
107 | |
---|
108 | |
---|
109 | } |
---|
110 | |
---|
111 | void Mapper::setSourceValue(const double* val) |
---|
112 | { |
---|
113 | int size=sourceElements.size() ; |
---|
114 | for(int i=0;i<size;++i) sourceElements[i].val=val[i] ; |
---|
115 | } |
---|
116 | |
---|
117 | void Mapper::getTargetValue(double* val) |
---|
118 | { |
---|
119 | int size=targetElements.size() ; |
---|
120 | for(int i=0;i<size;++i) val[i]=targetElements[i].val ; |
---|
121 | } |
---|
122 | |
---|
123 | vector<double> Mapper::computeWeights(int interpOrder, bool renormalize, bool quantity) |
---|
124 | { |
---|
125 | vector<double> timings; |
---|
126 | int mpiSize, mpiRank; |
---|
127 | MPI_Comm_size(communicator, &mpiSize); |
---|
128 | MPI_Comm_rank(communicator, &mpiRank); |
---|
129 | |
---|
130 | this->buildSSTree(sourceMesh, targetMesh); |
---|
131 | |
---|
132 | |
---|
133 | if (mpiRank == 0 && verbose) cout << "Computing intersections ..." << endl; |
---|
134 | double tic = cputime(); |
---|
135 | computeIntersection(&targetElements[0], targetElements.size()); |
---|
136 | timings.push_back(cputime() - tic); |
---|
137 | |
---|
138 | tic = cputime(); |
---|
139 | if (interpOrder == 2) { |
---|
140 | if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; |
---|
141 | buildMeshTopology(); |
---|
142 | computeGrads(); |
---|
143 | } |
---|
144 | timings.push_back(cputime() - tic); |
---|
145 | |
---|
146 | /* Prepare computation of weights */ |
---|
147 | /* compute number of intersections which for the first order case |
---|
148 | corresponds to the number of edges in the remap matrix */ |
---|
149 | int nIntersections = 0; |
---|
150 | for (int j = 0; j < targetElements.size(); j++) |
---|
151 | { |
---|
152 | Elt &elt = targetElements[j]; |
---|
153 | for (list<Polyg*>::iterator it = elt.is.begin(); it != elt.is.end(); it++) |
---|
154 | nIntersections++; |
---|
155 | } |
---|
156 | |
---|
157 | /* overallocate for NMAX neighbours for each elements */ |
---|
158 | /* |
---|
159 | remapMatrix = new double[nIntersections*NMAX]; |
---|
160 | srcAddress = new int[nIntersections*NMAX]; |
---|
161 | srcRank = new int[nIntersections*NMAX]; |
---|
162 | dstAddress = new int[nIntersections*NMAX]; |
---|
163 | sourceWeightId =new long[nIntersections*NMAX]; |
---|
164 | targetWeightId =new long[nIntersections*NMAX]; |
---|
165 | */ |
---|
166 | |
---|
167 | if (mpiRank == 0 && verbose) cout << "Remapping..." << endl; |
---|
168 | tic = cputime(); |
---|
169 | nWeights = remap(&targetElements[0], targetElements.size(), interpOrder, renormalize, quantity); |
---|
170 | timings.push_back(cputime() - tic); |
---|
171 | |
---|
172 | for (int i = 0; i < targetElements.size(); i++) targetElements[i].delete_intersections(); |
---|
173 | |
---|
174 | return timings; |
---|
175 | } |
---|
176 | |
---|
177 | /** |
---|
178 | @param elements are cells of the target grid that are distributed over CPUs |
---|
179 | indepentently of the distribution of the SS-tree. |
---|
180 | @param nbElements is the size of the elements array. |
---|
181 | @param order is the order of interpolaton (must be 1 or 2). |
---|
182 | */ |
---|
183 | int Mapper::remap(Elt *elements, int nbElements, int order, bool renormalize, bool quantity) |
---|
184 | { |
---|
185 | int mpiSize, mpiRank; |
---|
186 | MPI_Comm_size(communicator, &mpiSize); |
---|
187 | MPI_Comm_rank(communicator, &mpiRank); |
---|
188 | |
---|
189 | /* create list of intersections (super mesh elements) for each rank */ |
---|
190 | multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; |
---|
191 | for (int j = 0; j < nbElements; j++) |
---|
192 | { |
---|
193 | Elt& e = elements[j]; |
---|
194 | for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) |
---|
195 | elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); |
---|
196 | } |
---|
197 | |
---|
198 | int *nbSendElement = new int[mpiSize]; |
---|
199 | int *nbSendVertex = new int[mpiSize]; |
---|
200 | int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ |
---|
201 | double **recvValue = new double*[mpiSize]; |
---|
202 | double **recvArea = new double*[mpiSize]; |
---|
203 | double **recvGivenArea = new double*[mpiSize]; |
---|
204 | int **recvVertexOffset = new int*[mpiSize]; |
---|
205 | Coord **recvGrad = new Coord*[mpiSize]; |
---|
206 | GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ |
---|
207 | for (int rank = 0; rank < mpiSize; rank++) |
---|
208 | { |
---|
209 | /* get size for allocation */ |
---|
210 | int last = -1; /* compares unequal to any index */ |
---|
211 | int index = -1; /* increased to starting index 0 in first iteration */ |
---|
212 | for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) |
---|
213 | { |
---|
214 | if (last != it->first) |
---|
215 | index++; |
---|
216 | (it->second)->id.ind = index; |
---|
217 | last = it->first; |
---|
218 | } |
---|
219 | nbSendElement[rank] = index + 1; |
---|
220 | |
---|
221 | /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ |
---|
222 | if (nbSendElement[rank] > 0) |
---|
223 | { |
---|
224 | sendElement[rank] = new int[nbSendElement[rank]]; |
---|
225 | last = -1; |
---|
226 | index = -1; |
---|
227 | for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) |
---|
228 | { |
---|
229 | if (last != it->first) |
---|
230 | index++; |
---|
231 | sendElement[rank][index] = it->first; |
---|
232 | last = it->first; |
---|
233 | } |
---|
234 | } |
---|
235 | } |
---|
236 | |
---|
237 | /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ |
---|
238 | int *nbRecvElement = new int[mpiSize]; |
---|
239 | int *nbRecvVertex = new int[mpiSize]; |
---|
240 | MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); |
---|
241 | |
---|
242 | /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ |
---|
243 | int nbSendRequest = 0; |
---|
244 | int nbRecvRequest = 0; |
---|
245 | int **recvElement = new int*[mpiSize]; |
---|
246 | double **sendValue = new double*[mpiSize]; |
---|
247 | double **sendArea = new double*[mpiSize]; |
---|
248 | double **sendGivenArea = new double*[mpiSize]; |
---|
249 | int **sendVertexOffset = new int*[mpiSize]; |
---|
250 | Coord **sendGrad = new Coord*[mpiSize]; |
---|
251 | GloId **sendNeighIds = new GloId*[mpiSize]; |
---|
252 | MPI_Request *sendRequest = new MPI_Request[6*mpiSize]; |
---|
253 | MPI_Request *recvRequest = new MPI_Request[6*mpiSize]; |
---|
254 | for (int rank = 0; rank < mpiSize; rank++) |
---|
255 | { |
---|
256 | if (nbSendElement[rank] > 0) |
---|
257 | { |
---|
258 | MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
259 | nbSendRequest++; |
---|
260 | } |
---|
261 | |
---|
262 | if (nbRecvElement[rank] > 0) |
---|
263 | { |
---|
264 | recvElement[rank] = new int[nbRecvElement[rank]]; |
---|
265 | MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
266 | nbRecvRequest++; |
---|
267 | } |
---|
268 | } |
---|
269 | MPI_Status *status = new MPI_Status[6*mpiSize]; |
---|
270 | |
---|
271 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
272 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
273 | |
---|
274 | nbSendRequest = 0; |
---|
275 | nbRecvRequest = 0; |
---|
276 | |
---|
277 | for (int rank = 0; rank < mpiSize; rank++) |
---|
278 | { |
---|
279 | if (nbRecvElement[rank] > 0) |
---|
280 | { |
---|
281 | nbRecvVertex[rank]=0 ; |
---|
282 | for (int j = 0; j < nbRecvElement[rank]; j++) nbRecvVertex[rank]+=sstree.localElements[recvElement[rank][j]].n+1; |
---|
283 | MPI_Issend(&nbRecvVertex[rank], 1, MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
284 | nbSendRequest++; |
---|
285 | } |
---|
286 | |
---|
287 | if (nbSendElement[rank] > 0) |
---|
288 | { |
---|
289 | MPI_Irecv(&nbSendVertex[rank], 1, MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
290 | nbRecvRequest++; |
---|
291 | } |
---|
292 | } |
---|
293 | |
---|
294 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
295 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
296 | |
---|
297 | |
---|
298 | /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ |
---|
299 | nbSendRequest = 0; |
---|
300 | nbRecvRequest = 0; |
---|
301 | for (int rank = 0; rank < mpiSize; rank++) |
---|
302 | { |
---|
303 | if (nbRecvElement[rank] > 0) |
---|
304 | { |
---|
305 | sendValue[rank] = new double[nbRecvElement[rank]]; |
---|
306 | sendArea[rank] = new double[nbRecvElement[rank]]; |
---|
307 | sendGivenArea[rank] = new double[nbRecvElement[rank]]; |
---|
308 | sendVertexOffset[rank] = new int[nbRecvElement[rank]]; |
---|
309 | |
---|
310 | if (order == 2) |
---|
311 | { |
---|
312 | sendNeighIds[rank] = new GloId[nbRecvVertex[rank]]; |
---|
313 | sendGrad[rank] = new Coord[nbRecvVertex[rank]]; |
---|
314 | } |
---|
315 | else sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; |
---|
316 | |
---|
317 | int jj = 0; // jj == j if no weight writing |
---|
318 | sendVertexOffset[rank][0]=0 ; |
---|
319 | for (int j = 0; j < nbRecvElement[rank]; j++) |
---|
320 | { |
---|
321 | sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; |
---|
322 | sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; |
---|
323 | sendGivenArea[rank][j] = sstree.localElements[recvElement[rank][j]].given_area; |
---|
324 | |
---|
325 | if (j==0) |
---|
326 | { |
---|
327 | if (order==2) sendVertexOffset[rank][j]=sstree.localElements[recvElement[rank][j]].n+1 ; |
---|
328 | else sendVertexOffset[rank][j]=1 ; |
---|
329 | } |
---|
330 | else |
---|
331 | { |
---|
332 | if (order == 2) sendVertexOffset[rank][j] = sendVertexOffset[rank][j-1]+sstree.localElements[recvElement[rank][j]].n+1 ; |
---|
333 | else sendVertexOffset[rank][j] = sendVertexOffset[rank][j-1]+1 ; |
---|
334 | } |
---|
335 | |
---|
336 | if (order == 2) |
---|
337 | { |
---|
338 | sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; |
---|
339 | // cout<<"grad "<<jj<<" "<<recvElement[rank][j]<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; |
---|
340 | sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; |
---|
341 | jj++; |
---|
342 | for (int i = 0; i < sstree.localElements[recvElement[rank][j]].n ; i++) |
---|
343 | { |
---|
344 | sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; |
---|
345 | // cout<<"grad "<<jj<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; |
---|
346 | sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].neighId[i]; |
---|
347 | jj++; |
---|
348 | } |
---|
349 | } |
---|
350 | else |
---|
351 | sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; |
---|
352 | } |
---|
353 | MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
354 | nbSendRequest++; |
---|
355 | MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
356 | nbSendRequest++; |
---|
357 | MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
358 | nbSendRequest++; |
---|
359 | MPI_Issend(sendVertexOffset[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
360 | nbSendRequest++; |
---|
361 | |
---|
362 | if (order == 2) |
---|
363 | { |
---|
364 | MPI_Issend(sendGrad[rank], 3*nbRecvVertex[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
365 | nbSendRequest++; |
---|
366 | MPI_Issend(sendNeighIds[rank], 4*nbRecvVertex[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
367 | //ym --> attention taille GloId |
---|
368 | nbSendRequest++; |
---|
369 | } |
---|
370 | else |
---|
371 | { |
---|
372 | MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
373 | //ym --> attention taille GloId |
---|
374 | nbSendRequest++; |
---|
375 | } |
---|
376 | } |
---|
377 | if (nbSendElement[rank] > 0) |
---|
378 | { |
---|
379 | recvValue[rank] = new double[nbSendElement[rank]]; |
---|
380 | recvArea[rank] = new double[nbSendElement[rank]]; |
---|
381 | recvGivenArea[rank] = new double[nbSendElement[rank]]; |
---|
382 | recvVertexOffset[rank] = new int[nbSendElement[rank]]; |
---|
383 | |
---|
384 | if (order == 2) |
---|
385 | { |
---|
386 | recvNeighIds[rank] = new GloId[nbSendVertex[rank]]; |
---|
387 | recvGrad[rank] = new Coord[nbSendVertex[rank]]; |
---|
388 | } |
---|
389 | else recvNeighIds[rank] = new GloId[nbSendElement[rank]]; |
---|
390 | |
---|
391 | |
---|
392 | MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
393 | nbRecvRequest++; |
---|
394 | MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
395 | nbRecvRequest++; |
---|
396 | MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
397 | nbRecvRequest++; |
---|
398 | MPI_Irecv(recvVertexOffset[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
399 | nbRecvRequest++; |
---|
400 | |
---|
401 | if (order == 2) |
---|
402 | { |
---|
403 | MPI_Irecv(recvGrad[rank], 3*nbSendVertex[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
404 | nbRecvRequest++; |
---|
405 | MPI_Irecv(recvNeighIds[rank], 4*nbSendVertex[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
406 | //ym --> attention taille GloId |
---|
407 | nbRecvRequest++; |
---|
408 | } |
---|
409 | else |
---|
410 | { |
---|
411 | MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
412 | //ym --> attention taille GloId |
---|
413 | nbRecvRequest++; |
---|
414 | } |
---|
415 | } |
---|
416 | } |
---|
417 | |
---|
418 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
419 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
420 | |
---|
421 | |
---|
422 | /* now that all values and gradients are available use them to computed interpolated values on target |
---|
423 | and also to compute weights */ |
---|
424 | int i = 0; |
---|
425 | for (int j = 0; j < nbElements; j++) |
---|
426 | { |
---|
427 | Elt& e = elements[j]; |
---|
428 | |
---|
429 | /* since for the 2nd order case source grid elements can contribute to a destination grid element over several "paths" |
---|
430 | (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid) |
---|
431 | accumulate them so that there is only one final weight between two elements */ |
---|
432 | map<GloId,double> wgt_map; |
---|
433 | |
---|
434 | /* for destination element `e` loop over all intersetions/the corresponding source elements */ |
---|
435 | for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) |
---|
436 | { |
---|
437 | /* it is the intersection element, so it->x and it->area are barycentre and area of intersection element (super mesh) |
---|
438 | but it->id is id of the source element that it intersects */ |
---|
439 | int n1 = (*it)->id.ind; |
---|
440 | int rank = (*it)->id.rank; |
---|
441 | double fk = recvValue[rank][n1]; |
---|
442 | double srcArea = recvArea[rank][n1]; |
---|
443 | double srcGivenArea = recvGivenArea[rank][n1]; |
---|
444 | double w = (*it)->area; |
---|
445 | if (quantity) w/=srcArea ; |
---|
446 | else w=w*srcGivenArea/srcArea*e.area/e.given_area ; |
---|
447 | |
---|
448 | /* first order: src value times weight (weight = supermesh area), later divide by target area */ |
---|
449 | // int kk = (order == 2) ? n1 * (NMAX + 1) : n1; |
---|
450 | int offset ; |
---|
451 | int nvertex ; |
---|
452 | if (n1==0) |
---|
453 | { |
---|
454 | offset=0 ; |
---|
455 | nvertex=recvVertexOffset[rank][0] ; |
---|
456 | } |
---|
457 | else |
---|
458 | { |
---|
459 | offset = recvVertexOffset[rank][n1-1]; |
---|
460 | nvertex = recvVertexOffset[rank][n1]-recvVertexOffset[rank][n1-1]; |
---|
461 | } |
---|
462 | |
---|
463 | GloId neighID = recvNeighIds[rank][offset]; |
---|
464 | wgt_map[neighID] += w; |
---|
465 | |
---|
466 | if (order == 2) |
---|
467 | { |
---|
468 | for (int k = 0; k < nvertex; k++) |
---|
469 | { |
---|
470 | int kk = offset + k; |
---|
471 | GloId neighID = recvNeighIds[rank][kk]; |
---|
472 | if (neighID.ind != -1) wgt_map[neighID] += w * scalarprod(recvGrad[rank][kk], (*it)->x); |
---|
473 | } |
---|
474 | |
---|
475 | } |
---|
476 | } |
---|
477 | |
---|
478 | double renorm=0; |
---|
479 | if (renormalize) |
---|
480 | { |
---|
481 | if (quantity) for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second ; |
---|
482 | else for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second / e.area; |
---|
483 | } |
---|
484 | else renorm=1. ; |
---|
485 | |
---|
486 | for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) |
---|
487 | { |
---|
488 | if (quantity) this->remapMatrix.push_back((it->second ) / renorm); |
---|
489 | else this->remapMatrix.push_back((it->second / e.area) / renorm); |
---|
490 | this->srcAddress.push_back(it->first.ind); |
---|
491 | this->srcRank.push_back(it->first.rank); |
---|
492 | this->dstAddress.push_back(j); |
---|
493 | this->sourceWeightId.push_back(it->first.globalId) ; |
---|
494 | this->targetWeightId.push_back(targetGlobalId[j]) ; |
---|
495 | i++; |
---|
496 | } |
---|
497 | } |
---|
498 | |
---|
499 | /* free all memory allocated in this function */ |
---|
500 | for (int rank = 0; rank < mpiSize; rank++) |
---|
501 | { |
---|
502 | if (nbSendElement[rank] > 0) |
---|
503 | { |
---|
504 | delete[] sendElement[rank]; |
---|
505 | delete[] recvValue[rank]; |
---|
506 | delete[] recvArea[rank]; |
---|
507 | delete[] recvGivenArea[rank]; |
---|
508 | delete[] recvVertexOffset[rank]; |
---|
509 | if (order == 2) |
---|
510 | { |
---|
511 | delete[] recvGrad[rank]; |
---|
512 | } |
---|
513 | delete[] recvNeighIds[rank]; |
---|
514 | } |
---|
515 | if (nbRecvElement[rank] > 0) |
---|
516 | { |
---|
517 | delete[] recvElement[rank]; |
---|
518 | delete[] sendValue[rank]; |
---|
519 | delete[] sendArea[rank]; |
---|
520 | delete[] sendGivenArea[rank]; |
---|
521 | delete[] sendVertexOffset[rank]; |
---|
522 | if (order == 2) |
---|
523 | delete[] sendGrad[rank]; |
---|
524 | delete[] sendNeighIds[rank]; |
---|
525 | } |
---|
526 | } |
---|
527 | delete[] status; |
---|
528 | delete[] sendRequest; |
---|
529 | delete[] recvRequest; |
---|
530 | delete[] elementList; |
---|
531 | delete[] nbSendElement; |
---|
532 | delete[] nbRecvElement; |
---|
533 | delete[] nbSendVertex; |
---|
534 | delete[] nbRecvVertex; |
---|
535 | delete[] sendElement; |
---|
536 | delete[] recvElement; |
---|
537 | delete[] sendValue; |
---|
538 | delete[] sendArea ; |
---|
539 | delete[] sendGivenArea ; |
---|
540 | delete[] recvValue; |
---|
541 | delete[] recvArea ; |
---|
542 | delete[] recvGivenArea ; |
---|
543 | delete[] sendGrad; |
---|
544 | delete[] recvGrad; |
---|
545 | delete[] sendNeighIds; |
---|
546 | delete[] recvNeighIds; |
---|
547 | return i; |
---|
548 | } |
---|
549 | |
---|
550 | void Mapper::computeGrads() |
---|
551 | { |
---|
552 | /* array of pointers to collect local elements and elements received from other cpu */ |
---|
553 | vector<Elt*> globalElements(sstree.nbLocalElements + nbNeighbourElements); |
---|
554 | int index = 0; |
---|
555 | for (int i = 0; i < sstree.nbLocalElements; i++, index++) |
---|
556 | globalElements[index] = &(sstree.localElements[i]); |
---|
557 | for (int i = 0; i < nbNeighbourElements; i++, index++) |
---|
558 | globalElements[index] = &neighbourElements[i]; |
---|
559 | |
---|
560 | update_baryc(sstree.localElements, sstree.nbLocalElements); |
---|
561 | computeGradients(&globalElements[0], sstree.nbLocalElements); |
---|
562 | } |
---|
563 | |
---|
564 | /** for each element of the source grid, finds all the neighbouring elements that share an edge |
---|
565 | (filling array neighbourElements). This is used later to compute gradients */ |
---|
566 | void Mapper::buildMeshTopology() |
---|
567 | { |
---|
568 | int mpiSize, mpiRank; |
---|
569 | MPI_Comm_size(communicator, &mpiSize); |
---|
570 | MPI_Comm_rank(communicator, &mpiRank); |
---|
571 | |
---|
572 | vector<NodePtr> *routingList = new vector<NodePtr>[mpiSize]; |
---|
573 | vector<vector<int> > routes(sstree.localTree.leafs.size()); |
---|
574 | |
---|
575 | sstree.routeIntersections(routes, sstree.localTree.leafs); |
---|
576 | |
---|
577 | for (int i = 0; i < routes.size(); ++i) |
---|
578 | for (int k = 0; k < routes[i].size(); ++k) |
---|
579 | routingList[routes[i][k]].push_back(sstree.localTree.leafs[i]); |
---|
580 | routingList[mpiRank].clear(); |
---|
581 | |
---|
582 | |
---|
583 | CMPIRouting mpiRoute(communicator); |
---|
584 | mpiRoute.init(routes); |
---|
585 | int nRecv = mpiRoute.getTotalSourceElement(); |
---|
586 | // cout << mpiRank << " NRECV " << nRecv << "(" << routes.size() << ")"<< endl; |
---|
587 | |
---|
588 | int *nbSendNode = new int[mpiSize]; |
---|
589 | int *nbRecvNode = new int[mpiSize]; |
---|
590 | int *sendMessageSize = new int[mpiSize]; |
---|
591 | int *recvMessageSize = new int[mpiSize]; |
---|
592 | |
---|
593 | for (int rank = 0; rank < mpiSize; rank++) |
---|
594 | { |
---|
595 | nbSendNode[rank] = routingList[rank].size(); |
---|
596 | sendMessageSize[rank] = 0; |
---|
597 | for (size_t j = 0; j < routingList[rank].size(); j++) |
---|
598 | { |
---|
599 | Elt *elt = (Elt *) (routingList[rank][j]->data); |
---|
600 | sendMessageSize[rank] += packedPolygonSize(*elt); |
---|
601 | } |
---|
602 | } |
---|
603 | |
---|
604 | MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); |
---|
605 | MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); |
---|
606 | |
---|
607 | char **sendBuffer = new char*[mpiSize]; |
---|
608 | char **recvBuffer = new char*[mpiSize]; |
---|
609 | int *pos = new int[mpiSize]; |
---|
610 | |
---|
611 | for (int rank = 0; rank < mpiSize; rank++) |
---|
612 | { |
---|
613 | if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sendMessageSize[rank]]; |
---|
614 | if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; |
---|
615 | } |
---|
616 | |
---|
617 | for (int rank = 0; rank < mpiSize; rank++) |
---|
618 | { |
---|
619 | pos[rank] = 0; |
---|
620 | for (size_t j = 0; j < routingList[rank].size(); j++) |
---|
621 | { |
---|
622 | Elt *elt = (Elt *) (routingList[rank][j]->data); |
---|
623 | packPolygon(*elt, sendBuffer[rank], pos[rank]); |
---|
624 | } |
---|
625 | } |
---|
626 | delete [] routingList; |
---|
627 | |
---|
628 | |
---|
629 | int nbSendRequest = 0; |
---|
630 | int nbRecvRequest = 0; |
---|
631 | MPI_Request *sendRequest = new MPI_Request[mpiSize]; |
---|
632 | MPI_Request *recvRequest = new MPI_Request[mpiSize]; |
---|
633 | MPI_Status *status = new MPI_Status[mpiSize]; |
---|
634 | |
---|
635 | for (int rank = 0; rank < mpiSize; rank++) |
---|
636 | { |
---|
637 | if (nbSendNode[rank] > 0) |
---|
638 | { |
---|
639 | MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
640 | nbSendRequest++; |
---|
641 | } |
---|
642 | if (nbRecvNode[rank] > 0) |
---|
643 | { |
---|
644 | MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
645 | nbRecvRequest++; |
---|
646 | } |
---|
647 | } |
---|
648 | |
---|
649 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
650 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
651 | |
---|
652 | for (int rank = 0; rank < mpiSize; rank++) |
---|
653 | if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; |
---|
654 | delete [] sendBuffer; |
---|
655 | |
---|
656 | char **sendBuffer2 = new char*[mpiSize]; |
---|
657 | char **recvBuffer2 = new char*[mpiSize]; |
---|
658 | |
---|
659 | for (int rank = 0; rank < mpiSize; rank++) |
---|
660 | { |
---|
661 | nbSendNode[rank] = 0; |
---|
662 | sendMessageSize[rank] = 0; |
---|
663 | |
---|
664 | if (nbRecvNode[rank] > 0) |
---|
665 | { |
---|
666 | set<NodePtr> neighbourList; |
---|
667 | pos[rank] = 0; |
---|
668 | for (int j = 0; j < nbRecvNode[rank]; j++) |
---|
669 | { |
---|
670 | Elt elt; |
---|
671 | unpackPolygon(elt, recvBuffer[rank], pos[rank]); |
---|
672 | NodePtr node=make_shared<Node>(elt.x, cptRadius(elt), &elt); |
---|
673 | findNeighbour(sstree.localTree.root, node, neighbourList); |
---|
674 | } |
---|
675 | nbSendNode[rank] = neighbourList.size(); |
---|
676 | for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) |
---|
677 | { |
---|
678 | Elt *elt = (Elt *) ((*it)->data); |
---|
679 | sendMessageSize[rank] += packedPolygonSize(*elt); |
---|
680 | } |
---|
681 | |
---|
682 | sendBuffer2[rank] = new char[sendMessageSize[rank]]; |
---|
683 | pos[rank] = 0; |
---|
684 | |
---|
685 | for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) |
---|
686 | { |
---|
687 | Elt *elt = (Elt *) ((*it)->data); |
---|
688 | packPolygon(*elt, sendBuffer2[rank], pos[rank]); |
---|
689 | } |
---|
690 | } |
---|
691 | } |
---|
692 | for (int rank = 0; rank < mpiSize; rank++) |
---|
693 | if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; |
---|
694 | delete [] recvBuffer; |
---|
695 | |
---|
696 | |
---|
697 | MPI_Barrier(communicator); |
---|
698 | MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); |
---|
699 | MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); |
---|
700 | |
---|
701 | for (int rank = 0; rank < mpiSize; rank++) |
---|
702 | if (nbRecvNode[rank] > 0) recvBuffer2[rank] = new char[recvMessageSize[rank]]; |
---|
703 | |
---|
704 | nbSendRequest = 0; |
---|
705 | nbRecvRequest = 0; |
---|
706 | |
---|
707 | for (int rank = 0; rank < mpiSize; rank++) |
---|
708 | { |
---|
709 | if (nbSendNode[rank] > 0) |
---|
710 | { |
---|
711 | MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
712 | nbSendRequest++; |
---|
713 | } |
---|
714 | if (nbRecvNode[rank] > 0) |
---|
715 | { |
---|
716 | MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
717 | nbRecvRequest++; |
---|
718 | } |
---|
719 | } |
---|
720 | |
---|
721 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
722 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
723 | |
---|
724 | int nbNeighbourNodes = 0; |
---|
725 | for (int rank = 0; rank < mpiSize; rank++) |
---|
726 | nbNeighbourNodes += nbRecvNode[rank]; |
---|
727 | |
---|
728 | neighbourElements = new Elt[nbNeighbourNodes]; |
---|
729 | nbNeighbourElements = nbNeighbourNodes; |
---|
730 | |
---|
731 | int index = 0; |
---|
732 | for (int rank = 0; rank < mpiSize; rank++) |
---|
733 | { |
---|
734 | pos[rank] = 0; |
---|
735 | for (int j = 0; j < nbRecvNode[rank]; j++) |
---|
736 | { |
---|
737 | unpackPolygon(neighbourElements[index], recvBuffer2[rank], pos[rank]); |
---|
738 | neighbourElements[index].id.ind = sstree.localTree.leafs.size() + index; |
---|
739 | index++; |
---|
740 | } |
---|
741 | } |
---|
742 | for (int rank = 0; rank < mpiSize; rank++) |
---|
743 | { |
---|
744 | if (nbRecvNode[rank] > 0) delete [] recvBuffer2[rank]; |
---|
745 | if (nbSendNode[rank] > 0) delete [] sendBuffer2[rank]; |
---|
746 | } |
---|
747 | delete [] recvBuffer2; |
---|
748 | delete [] sendBuffer2; |
---|
749 | delete [] sendMessageSize; |
---|
750 | delete [] recvMessageSize; |
---|
751 | delete [] nbSendNode; |
---|
752 | delete [] nbRecvNode; |
---|
753 | delete [] sendRequest; |
---|
754 | delete [] recvRequest; |
---|
755 | delete [] status; |
---|
756 | delete [] pos; |
---|
757 | |
---|
758 | /* re-compute on received elements to avoid having to send this information */ |
---|
759 | neighbourNodes.resize(nbNeighbourNodes); |
---|
760 | for(int i=0;i<neighbourNodes.size();i++) neighbourNodes[i]=make_shared<Node>() ; |
---|
761 | setCirclesAndLinks(neighbourElements, neighbourNodes); |
---|
762 | cptAllEltsGeom(neighbourElements, nbNeighbourNodes, srcGrid.pole); |
---|
763 | |
---|
764 | /* the local SS tree must include nodes from other cpus if they are potential |
---|
765 | intersector of a local node */ |
---|
766 | sstree.localTree.insertNodes(neighbourNodes); |
---|
767 | |
---|
768 | /* for every local element, |
---|
769 | use the SS-tree to find all elements (including neighbourElements) |
---|
770 | who are potential neighbours because their circles intersect, |
---|
771 | then check all canditates for common edges to build up connectivity information |
---|
772 | */ |
---|
773 | for (int j = 0; j < sstree.localTree.leafs.size(); j++) |
---|
774 | { |
---|
775 | NodePtr node = sstree.localTree.leafs[j]; |
---|
776 | |
---|
777 | /* find all leafs whoes circles that intersect node's circle and save into node->intersectors */ |
---|
778 | node->search(sstree.localTree.root); |
---|
779 | |
---|
780 | Elt *elt = (Elt *)(node->data); |
---|
781 | |
---|
782 | for (int i = 0; i < elt->n; i++) elt->neighbour[i] = NOT_FOUND; |
---|
783 | |
---|
784 | /* for element `elt` loop through all nodes in the SS-tree |
---|
785 | whoes circles intersect with the circle around `elt` (the SS intersectors) |
---|
786 | and check if they are neighbours in the sense that the two elements share an edge. |
---|
787 | If they do, save this information for elt */ |
---|
788 | for (list<NodePtr>::iterator it = (node->intersectors).begin(); it != (node->intersectors).end(); ++it) |
---|
789 | { |
---|
790 | Elt *elt2 = (Elt *)((*it)->data); |
---|
791 | set_neighbour(*elt, *elt2); |
---|
792 | } |
---|
793 | |
---|
794 | /* |
---|
795 | for (int i = 0; i < elt->n; i++) |
---|
796 | { |
---|
797 | if (elt->neighbour[i] == NOT_FOUND) |
---|
798 | error_exit("neighbour not found"); |
---|
799 | } |
---|
800 | */ |
---|
801 | } |
---|
802 | } |
---|
803 | |
---|
804 | /** @param elements are the target grid elements */ |
---|
805 | void Mapper::computeIntersection(Elt *elements, int nbElements) |
---|
806 | { |
---|
807 | int mpiSize, mpiRank; |
---|
808 | MPI_Comm_size(communicator, &mpiSize); |
---|
809 | MPI_Comm_rank(communicator, &mpiRank); |
---|
810 | |
---|
811 | MPI_Barrier(communicator); |
---|
812 | |
---|
813 | vector<NodePtr> *routingList = new vector<NodePtr>[mpiSize]; |
---|
814 | |
---|
815 | vector<NodePtr> routeNodes; routeNodes.reserve(nbElements); |
---|
816 | for (int j = 0; j < nbElements; j++) |
---|
817 | { |
---|
818 | elements[j].id.ind = j; |
---|
819 | elements[j].id.rank = mpiRank; |
---|
820 | routeNodes.push_back(make_shared<Node>(elements[j].x, cptRadius(elements[j]), &elements[j])); |
---|
821 | } |
---|
822 | |
---|
823 | vector<vector<int> > routes(routeNodes.size()); |
---|
824 | sstree.routeIntersections(routes, routeNodes); |
---|
825 | for (int i = 0; i < routes.size(); ++i) |
---|
826 | for (int k = 0; k < routes[i].size(); ++k) |
---|
827 | routingList[routes[i][k]].push_back(routeNodes[i]); |
---|
828 | |
---|
829 | if (verbose >= 2) |
---|
830 | { |
---|
831 | cout << " --> rank " << mpiRank << " nbElements " << nbElements << " : "; |
---|
832 | for (int rank = 0; rank < mpiSize; rank++) |
---|
833 | cout << routingList[rank].size() << " "; |
---|
834 | cout << endl; |
---|
835 | } |
---|
836 | MPI_Barrier(communicator); |
---|
837 | |
---|
838 | int *nbSendNode = new int[mpiSize]; |
---|
839 | int *nbRecvNode = new int[mpiSize]; |
---|
840 | int *sentMessageSize = new int[mpiSize]; |
---|
841 | int *recvMessageSize = new int[mpiSize]; |
---|
842 | |
---|
843 | for (int rank = 0; rank < mpiSize; rank++) |
---|
844 | { |
---|
845 | nbSendNode[rank] = routingList[rank].size(); |
---|
846 | sentMessageSize[rank] = 0; |
---|
847 | for (size_t j = 0; j < routingList[rank].size(); j++) |
---|
848 | { |
---|
849 | Elt *elt = (Elt *) (routingList[rank][j]->data); |
---|
850 | sentMessageSize[rank] += packedPolygonSize(*elt); |
---|
851 | } |
---|
852 | } |
---|
853 | |
---|
854 | MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); |
---|
855 | MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); |
---|
856 | |
---|
857 | int total = 0; |
---|
858 | |
---|
859 | for (int rank = 0; rank < mpiSize; rank++) |
---|
860 | { |
---|
861 | total = total + nbRecvNode[rank]; |
---|
862 | } |
---|
863 | |
---|
864 | if (verbose >= 2) cout << "---> rank " << mpiRank << " : compute intersection : total received nodes " << total << endl; |
---|
865 | char **sendBuffer = new char*[mpiSize]; |
---|
866 | char **recvBuffer = new char*[mpiSize]; |
---|
867 | int *pos = new int[mpiSize]; |
---|
868 | |
---|
869 | for (int rank = 0; rank < mpiSize; rank++) |
---|
870 | { |
---|
871 | if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sentMessageSize[rank]]; |
---|
872 | if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; |
---|
873 | } |
---|
874 | |
---|
875 | for (int rank = 0; rank < mpiSize; rank++) |
---|
876 | { |
---|
877 | pos[rank] = 0; |
---|
878 | for (size_t j = 0; j < routingList[rank].size(); j++) |
---|
879 | { |
---|
880 | Elt* elt = (Elt *) (routingList[rank][j]->data); |
---|
881 | packPolygon(*elt, sendBuffer[rank], pos[rank]); |
---|
882 | } |
---|
883 | } |
---|
884 | delete [] routingList; |
---|
885 | |
---|
886 | int nbSendRequest = 0; |
---|
887 | int nbRecvRequest = 0; |
---|
888 | MPI_Request *sendRequest = new MPI_Request[mpiSize]; |
---|
889 | MPI_Request *recvRequest = new MPI_Request[mpiSize]; |
---|
890 | MPI_Status *status = new MPI_Status[mpiSize]; |
---|
891 | |
---|
892 | for (int rank = 0; rank < mpiSize; rank++) |
---|
893 | { |
---|
894 | if (nbSendNode[rank] > 0) |
---|
895 | { |
---|
896 | MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
897 | nbSendRequest++; |
---|
898 | } |
---|
899 | if (nbRecvNode[rank] > 0) |
---|
900 | { |
---|
901 | MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
902 | nbRecvRequest++; |
---|
903 | } |
---|
904 | } |
---|
905 | |
---|
906 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
907 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
908 | char **sendBuffer2 = new char*[mpiSize]; |
---|
909 | char **recvBuffer2 = new char*[mpiSize]; |
---|
910 | |
---|
911 | double tic = cputime(); |
---|
912 | for (int rank = 0; rank < mpiSize; rank++) |
---|
913 | { |
---|
914 | sentMessageSize[rank] = 0; |
---|
915 | |
---|
916 | if (nbRecvNode[rank] > 0) |
---|
917 | { |
---|
918 | Elt *recvElt = new Elt[nbRecvNode[rank]]; |
---|
919 | pos[rank] = 0; |
---|
920 | for (int j = 0; j < nbRecvNode[rank]; j++) |
---|
921 | { |
---|
922 | unpackPolygon(recvElt[j], recvBuffer[rank], pos[rank]); |
---|
923 | cptEltGeom(recvElt[j], tgtGrid.pole); |
---|
924 | NodePtr recvNode = make_shared<Node>(recvElt[j].x, cptRadius(recvElt[j]), &recvElt[j]); |
---|
925 | recvNode->search(sstree.localTree.root); |
---|
926 | /* for a node holding an element of the target, loop throught candidates for intersecting source */ |
---|
927 | for (list<NodePtr>::iterator it = (recvNode->intersectors).begin(); it != (recvNode->intersectors).end(); ++it) |
---|
928 | { |
---|
929 | Elt *elt2 = (Elt *) ((*it)->data); |
---|
930 | /* recvElt is target, elt2 is source */ |
---|
931 | // intersect(&recvElt[j], elt2); |
---|
932 | intersect_ym(&recvElt[j], elt2); |
---|
933 | } |
---|
934 | |
---|
935 | if (recvElt[j].is.size() > 0) sentMessageSize[rank] += packIntersectionSize(recvElt[j]); |
---|
936 | |
---|
937 | // here recvNode goes out of scope |
---|
938 | } |
---|
939 | |
---|
940 | if (sentMessageSize[rank] > 0) |
---|
941 | { |
---|
942 | sentMessageSize[rank] += sizeof(int); |
---|
943 | sendBuffer2[rank] = new char[sentMessageSize[rank]]; |
---|
944 | *((int *) sendBuffer2[rank]) = 0; |
---|
945 | pos[rank] = sizeof(int); |
---|
946 | for (int j = 0; j < nbRecvNode[rank]; j++) |
---|
947 | { |
---|
948 | packIntersection(recvElt[j], sendBuffer2[rank], pos[rank]); |
---|
949 | //FIXME should be deleted: recvElt[j].delete_intersections(); // intersection areas have been packed to buffer and won't be used any more |
---|
950 | } |
---|
951 | } |
---|
952 | |
---|
953 | // don't delete intersection because it is shared also with elt2 wich is a source element, and need for order 2 |
---|
954 | //for (int j = 0; j < nbRecvNode[rank]; j++) recvElt[j].delete_intersections(); |
---|
955 | // mybe should be deleted later after usage in order 2 |
---|
956 | |
---|
957 | delete [] recvElt; |
---|
958 | |
---|
959 | } |
---|
960 | } |
---|
961 | delete [] pos; |
---|
962 | |
---|
963 | for (int rank = 0; rank < mpiSize; rank++) |
---|
964 | { |
---|
965 | if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; |
---|
966 | if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; |
---|
967 | nbSendNode[rank] = 0; |
---|
968 | } |
---|
969 | |
---|
970 | if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; |
---|
971 | MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); |
---|
972 | |
---|
973 | for (int rank = 0; rank < mpiSize; rank++) |
---|
974 | if (recvMessageSize[rank] > 0) |
---|
975 | recvBuffer2[rank] = new char[recvMessageSize[rank]]; |
---|
976 | |
---|
977 | nbSendRequest = 0; |
---|
978 | nbRecvRequest = 0; |
---|
979 | |
---|
980 | for (int rank = 0; rank < mpiSize; rank++) |
---|
981 | { |
---|
982 | if (sentMessageSize[rank] > 0) |
---|
983 | { |
---|
984 | MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); |
---|
985 | nbSendRequest++; |
---|
986 | } |
---|
987 | if (recvMessageSize[rank] > 0) |
---|
988 | { |
---|
989 | MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); |
---|
990 | nbRecvRequest++; |
---|
991 | } |
---|
992 | } |
---|
993 | |
---|
994 | MPI_Waitall(nbRecvRequest, recvRequest, status); |
---|
995 | MPI_Waitall(nbSendRequest, sendRequest, status); |
---|
996 | |
---|
997 | delete [] sendRequest; |
---|
998 | delete [] recvRequest; |
---|
999 | delete [] status; |
---|
1000 | for (int rank = 0; rank < mpiSize; rank++) |
---|
1001 | { |
---|
1002 | if (nbRecvNode[rank] > 0) |
---|
1003 | { |
---|
1004 | if (sentMessageSize[rank] > 0) |
---|
1005 | delete [] sendBuffer2[rank]; |
---|
1006 | } |
---|
1007 | |
---|
1008 | if (recvMessageSize[rank] > 0) |
---|
1009 | { |
---|
1010 | unpackIntersection(elements, recvBuffer2[rank]); |
---|
1011 | delete [] recvBuffer2[rank]; |
---|
1012 | } |
---|
1013 | } |
---|
1014 | delete [] sendBuffer2; |
---|
1015 | delete [] recvBuffer2; |
---|
1016 | delete [] sendBuffer; |
---|
1017 | delete [] recvBuffer; |
---|
1018 | |
---|
1019 | delete [] nbSendNode; |
---|
1020 | delete [] nbRecvNode; |
---|
1021 | delete [] sentMessageSize; |
---|
1022 | delete [] recvMessageSize; |
---|
1023 | } |
---|
1024 | |
---|
1025 | Mapper::~Mapper() |
---|
1026 | { |
---|
1027 | //delete [] remapMatrix; |
---|
1028 | //delete [] srcAddress; |
---|
1029 | //delete [] srcRank; |
---|
1030 | //delete [] dstAddress; |
---|
1031 | //delete [] sourceWeightId ; |
---|
1032 | //delete [] targetWeightId ; |
---|
1033 | //if (neighbourElements) delete [] neighbourElements; |
---|
1034 | CTimer::release() ; |
---|
1035 | } |
---|
1036 | |
---|
1037 | } |
---|