source: branches/ORCHIDEE_2_2/ORCHIDEE/src_sechiba/routing_native_para.f90 @ 8273

Last change on this file since 8273 was 8227, checked in by yann.meurdesoif, 9 months ago

New version of routing simple => become routing_native ; after validation routing_simple will be supress

  • more robust routing scheme on native grid
  • rearchitecturing of routing by spliting routing sub-process into separate file/module => smaller file
  • works on global as well on regional (orchidee) grid.
  • works on standard 50km routing grid and highres routing MERIT grid (1-2km), for regional studies
  • integrate irrigation old scheme et new scheme (pedro arboleda)
  • more water conservative (1e-16 Vs 1e-7 relative for simple routing)
  • To be tested : ICOLMDZOR grid and ICOLAMDZ-LAM grid that are expeted to work also.

It is a first guess, not the definitive native routing package that will continue to evolve.
No side effect is expected on other configurations since it is just some files adding.
Native routing is activated using the run.def key :
ROUTING_METHOD=native

YM

File size: 8.9 KB
Line 
1MODULE routing_native_para
2
3 PRIVATE
4
5 INTEGER,SAVE  :: ni
6 INTEGER,SAVE  :: nj
7
8 INTEGER,SAVE  :: ni_glo
9 INTEGER,SAVE  :: nj_glo
10 INTEGER,SAVE  :: nbpt_r 
11 INTEGER,SAVE  :: nbpt_rp1 
12 INTEGER,ALLOCATABLE :: r_to_rp1(:)
13 INTEGER,ALLOCATABLE :: rp1_to_r(:)
14
15 TYPE mpi_neighbours
16   INTEGER             :: rank
17   INTEGER             :: n
18   INTEGER,ALLOCATABLE :: index(:)
19   INTEGER,ALLOCATABLE :: ibuffer(:)
20   REAL,ALLOCATABLE :: rbuffer(:)
21 END TYPE
22 
23 TYPE(mpi_neighbours),SAVE, ALLOCATABLE :: send_neighbours(:)
24 TYPE(mpi_neighbours),SAVE, ALLOCATABLE :: recv_neighbours(:)
25 
26 INTERFACE update_halo
27   PROCEDURE update_halo_integer, update_halo_real
28 END INTERFACE
29
30 PUBLIC routing_para_initialize, update_halo, r_to_rp1, rp1_to_r
31
32CONTAINS
33
34  SUBROUTINE routing_para_initialize
35  USE mod_orchidee_para, ONLY : mpi_rank,mpi_size, MPI_COMM_ORCH, MPI_INT_ORCH
36  USE xios
37  IMPLICIT NONE
38    INCLUDE 'mpif.h'
39    REAL, ALLOCATABLE :: my_rank(:)
40    REAL, ALLOCATABLE :: global_ranks(:)
41    REAL, ALLOCATABLE :: my_index(:)
42    REAL, ALLOCATABLE :: global_index(:)
43    INTEGER :: nb_send_neighbours
44    INTEGER :: nb_recv_neighbours
45    INTEGER :: nb_points_to_recv
46    INTEGER, ALLOCATABLE :: list_rank_to_recv(:)
47     INTEGER :: i,j,k,ij,ind,r,rp1
48    INTEGER :: rank, pos_rank
49    INTEGER,ALLOCATABLE :: send_request(:)
50    INTEGER,ALLOCATABLE :: recv_request(:)
51    INTEGER,ALLOCATABLE :: send_status(:,:)
52    INTEGER,ALLOCATABLE :: recv_status(:,:)
53    INTEGER :: nb_points_ranks(0:mpi_size-1)
54    INTEGER :: ierr
55    INTEGER :: test
56
57    CALL xios_get_domain_attr("routing_domain", ni=ni, nj=nj, ni_glo=ni_glo, nj_glo=nj_glo)    ! get routing domain dimension
58
59    nbpt_r= ni*nj                                               
60    nbpt_rp1= (ni+2)*(nj+2)
61   
62    ALLOCATE(my_rank(nbpt_r))
63    ALLOCATE(global_ranks(nbpt_rp1))
64    ALLOCATE(my_index(nbpt_r))
65    ALLOCATE(global_index(nbpt_rp1))
66    ALLOCATE(r_to_rp1(nbpt_r))
67    ALLOCATE(rp1_to_r(nbpt_rp1))
68
69    my_rank(:)=mpi_rank
70    rp1_to_r(:)=-1
71
72    DO j=1,nj
73      DO i=1,ni
74        r=((j-1)*ni)+i
75        rp1=(j+1-1)*(ni+2)+i+1 
76        r_to_rp1(r)=rp1
77        rp1_to_r(rp1)=r
78        my_index(r) = rp1
79      ENDDO
80    ENDDO
81
82    CALL xios_send_field("my_rank",my_rank)
83    CALL xios_recv_field("global_ranks",global_ranks)
84    CALL xios_send_field("my_index",my_index)
85    CALL xios_recv_field("global_index",global_index)
86
87    ! number of point received by rank
88    nb_points_ranks(:)=0
89    DO rp1=1,nbpt_rp1
90      nb_points_ranks(global_ranks(rp1)) = nb_points_ranks(global_ranks(rp1))+1
91    ENDDO
92
93    nb_points_ranks(mpi_rank) = 0 ! not me
94
95   
96    nb_recv_neighbours=0
97    nb_points_to_recv=0
98    DO rank=0, mpi_size-1
99      IF (nb_points_ranks(rank)/=0) THEN
100        nb_recv_neighbours=nb_recv_neighbours+1
101        nb_points_to_recv = nb_points_to_recv + nb_points_ranks(rank)
102      ENDIF
103    ENDDO
104   
105    ALLOCATE(recv_neighbours(nb_recv_neighbours))
106    ALLOCATE(list_rank_to_recv(0:mpi_size-1))
107    k=1
108    DO rank=0, mpi_size-1
109      IF (nb_points_ranks(rank)/=0) THEN
110        recv_neighbours(k)%rank = rank
111        recv_neighbours(k)%n = 0
112        ALLOCATE(recv_neighbours(k)%index(nb_points_ranks(rank)))
113        ALLOCATE(recv_neighbours(k)%ibuffer(nb_points_ranks(rank)))
114        ALLOCATE(recv_neighbours(k)%rbuffer(nb_points_ranks(rank)))
115        list_rank_to_recv(rank) = k
116        k=k+1
117      ENDIF
118    ENDDO   
119   
120    DO rp1=1,nbpt_rp1
121      rank = global_ranks(rp1)
122      IF (rank /= mpi_rank) THEN
123        k = list_rank_to_recv(rank) 
124        recv_neighbours(k)%n=recv_neighbours(k)%n+1
125        recv_neighbours(k)%index(recv_neighbours(k)%n) = rp1
126      ENDIF
127    ENDDO
128
129
130    nb_points_ranks(:) = 0
131    DO k=1,size(recv_neighbours)
132      nb_points_ranks(recv_neighbours(k)%rank) = 1
133    ENDDO
134   
135    CALL MPI_ALLREDUCE(MPI_IN_PLACE , nb_points_ranks, mpi_size, MPI_INT_ORCH, MPI_SUM, MPI_COMM_ORCH, ierr)
136
137    nb_send_neighbours = nb_points_ranks(mpi_rank)
138    ALLOCATE(send_neighbours(nb_send_neighbours))
139
140
141    ALLOCATE(send_request(size(recv_neighbours)))
142    ALLOCATE(send_status(MPI_STATUS_SIZE,size(recv_neighbours)))
143    ALLOCATE(recv_request(size(send_neighbours)))
144    ALLOCATE(recv_status(MPI_STATUS_SIZE, size(send_neighbours)))
145   
146
147    DO k=1,size(recv_neighbours)
148      CALL MPI_ISEND(recv_neighbours(k)%n, 1, MPI_INT_ORCH, recv_neighbours(k)%rank, 0, MPI_COMM_ORCH, send_request(k) , ierr )
149    ENDDO
150
151    DO k=1,size(send_neighbours)
152      CALL MPI_IRecv(send_neighbours(k)%n, 1, MPI_INT_ORCH, MPI_ANY_SOURCE, 0, MPI_COMM_ORCH, recv_request(k) , ierr )
153    ENDDO
154
155    CALL MPI_WAITALL(size(recv_neighbours),send_request, send_status, ierr)
156    CALL MPI_WAITALL(size(send_neighbours),recv_request, recv_status, ierr)
157   
158    DO k=1,size(send_neighbours)
159      send_neighbours(k)%rank = recv_status(MPI_SOURCE, k)
160      ALLOCATE(send_neighbours(k)%index(send_neighbours(k)%n))
161      ALLOCATE(send_neighbours(k)%ibuffer(send_neighbours(k)%n))
162      ALLOCATE(send_neighbours(k)%rbuffer(send_neighbours(k)%n))
163    ENDDO
164
165    DO k=1,size(recv_neighbours)
166      DO ij=1, recv_neighbours(k)%n
167        ind = recv_neighbours(k)%index(ij)
168        recv_neighbours(k)%ibuffer(ij) = global_index(ind)
169      ENDDO
170    ENDDO
171
172    DO k=1,size(recv_neighbours)
173      CALL MPI_ISEND(recv_neighbours(k)%ibuffer, recv_neighbours(k)%n, MPI_INT_ORCH, recv_neighbours(k)%rank, 1, MPI_COMM_ORCH, send_request(k) , ierr )
174    ENDDO
175
176    DO k=1,size(send_neighbours)
177      CALL MPI_IRECV(send_neighbours(k)%index, send_neighbours(k)%n, MPI_INT_ORCH, send_neighbours(k)%rank, 1, MPI_COMM_ORCH, recv_request(k) , ierr )
178    ENDDO
179
180    CALL MPI_WAITALL(size(recv_neighbours),send_request, send_status, ierr)
181    CALL MPI_WAITALL(size(send_neighbours),recv_request, recv_status, ierr)
182
183    global_ranks(:)=1000000000
184    DO r=1,nbpt_r
185      global_ranks(r_to_rp1(r)) = mpi_rank 
186    ENDDO
187   
188    CALL update_halo_real(global_ranks)
189
190  END SUBROUTINE routing_para_initialize
191
192  SUBROUTINE update_halo_integer(field)
193  USE mod_orchidee_para, ONLY :  MPI_COMM_ORCH, MPI_INT_ORCH
194  IMPLICIT NONE
195    INCLUDE 'mpif.h'
196    INTEGER, INTENT(INOUT) :: field(nbpt_rp1)
197    INTEGER :: send_request(size(send_neighbours))
198    INTEGER :: send_status(MPI_STATUS_SIZE,size(send_neighbours))
199    INTEGER :: recv_request(size(recv_neighbours))
200    INTEGER :: recv_status(MPI_STATUS_SIZE, size(recv_neighbours))
201    INTEGER :: ij, k,ind
202    INTEGER :: ierr
203   
204    DO k=1,size(send_neighbours)
205      DO ij=1, send_neighbours(k)%n
206        ind = send_neighbours(k)%index(ij)
207        send_neighbours(k)%ibuffer(ij) = field(ind)
208      ENDDO
209    ENDDO
210
211    DO ij=1,size(send_neighbours)
212      CALL MPI_ISEND(send_neighbours(ij)%ibuffer, send_neighbours(ij)%n, MPI_INT_ORCH, send_neighbours(ij)%rank, 0, MPI_COMM_ORCH, send_request(ij) , ierr )
213    ENDDO
214
215    DO ij=1,size(recv_neighbours)
216      CALL MPI_IRECV(recv_neighbours(ij)%ibuffer, recv_neighbours(ij)%n, MPI_INT_ORCH, recv_neighbours(ij)%rank, 0, MPI_COMM_ORCH, recv_request(ij) , ierr )
217    ENDDO
218
219    CALL MPI_WAITALL(size(send_neighbours),send_request, send_status, ierr)
220    CALL MPI_WAITALL(size(recv_neighbours),recv_request, recv_status, ierr)
221
222    DO k=1,size(recv_neighbours)
223      DO ij=1, recv_neighbours(k)%n
224        ind = recv_neighbours(k)%index(ij)
225        field(ind) = recv_neighbours(k)%ibuffer(ij) 
226      ENDDO
227    ENDDO
228
229  END SUBROUTINE update_halo_integer
230
231  SUBROUTINE update_halo_real(field)
232  USE mod_orchidee_para, ONLY :  MPI_COMM_ORCH, MPI_REAL_ORCH
233  IMPLICIT NONE
234    INCLUDE 'mpif.h'
235    REAL, INTENT(INOUT) :: field(nbpt_rp1)
236    INTEGER :: send_request(size(send_neighbours))
237    INTEGER :: send_status(MPI_STATUS_SIZE,size(send_neighbours))
238    INTEGER :: recv_request(size(recv_neighbours))
239    INTEGER :: recv_status(MPI_STATUS_SIZE, size(recv_neighbours))
240    INTEGER :: ij, k,ind
241    INTEGER :: ierr
242
243    DO k=1,size(send_neighbours)
244      DO ij=1, send_neighbours(k)%n
245        ind = send_neighbours(k)%index(ij)
246        send_neighbours(k)%rbuffer(ij) = field(ind)
247      ENDDO
248    ENDDO
249   
250    DO ij=1,size(send_neighbours)
251      CALL MPI_ISEND(send_neighbours(ij)%rbuffer, send_neighbours(ij)%n, MPI_REAL_ORCH, send_neighbours(ij)%rank, 0, MPI_COMM_ORCH, send_request(ij) , ierr )
252    ENDDO
253
254    DO ij=1,size(recv_neighbours)
255      CALL MPI_IRECV(recv_neighbours(ij)%rbuffer, recv_neighbours(ij)%n, MPI_REAL_ORCH, recv_neighbours(ij)%rank, 0, MPI_COMM_ORCH, recv_request(ij) , ierr )
256    ENDDO
257
258    CALL MPI_WAITALL(size(send_neighbours),send_request, send_status, ierr)
259    CALL MPI_WAITALL(size(recv_neighbours),recv_request, recv_status, ierr)
260
261    DO k=1,size(recv_neighbours)
262      DO ij=1, recv_neighbours(k)%n
263        ind = recv_neighbours(k)%index(ij)
264        field(ind) = recv_neighbours(k)%rbuffer(ij) 
265      ENDDO
266    ENDDO
267
268  END SUBROUTINE update_halo_real
269
270END MODULE routing_native_para
Note: See TracBrowser for help on using the repository browser.