XBeach
|
00001 module xmpi_module 00002 00003 !#define SHIFT_TIMER 00004 00005 #ifdef USEMPI 00006 use mpi 00007 implicit none 00008 save 00009 #ifndef HAVE_MPI_WTIME 00010 real*8, external :: MPI_Wtime 00011 #endif 00012 logical :: bcast_backtrace = .false. 00013 integer, parameter :: MPIBOUNDARY_Y = 1 00014 integer, parameter :: MPIBOUNDARY_X = 2 00015 integer, parameter :: MPIBOUNDARY_AUTO = 3 00016 integer, parameter :: MPIBOUNDARY_MAN = 4 00017 ! We use two communicators: 00018 ! xmpi_ocomm and xmpi_comm 00019 ! xmpi_comm will contain all computing processes 00020 ! xmpi_ocomm will contain all computing processes + one process 00021 ! that will do most of the output, hoping that the output can be 00022 ! done in parallel with computing 00023 integer :: xmpi_orank ! mpi rank of this process in xmpi_ocomm 00024 integer :: xmpi_osize ! number of mpi processes in xmpi_ocomm 00025 integer :: xmpi_ocomm ! mpi communicator to use for the I/O 00026 integer :: xmpi_omaster ! rank of master process = rank of io-process in xmpi_ocomm 00027 integer :: xmpi_imaster ! rank of master process of computational processes in xmpi_ocomm 00028 ! 00029 integer :: xmpi_rank ! mpi rank of this process in xmpi_comm 00030 integer :: xmpi_size ! number of mpi processes in xmpi_comm 00031 integer :: xmpi_comm ! mpi communicator to use in xmpi_comm 00032 integer :: xmpi_master ! rank of master process in xmpi_comm 00033 integer :: xmpi_m ! 1st dimension of processor 00034 ! grid 00035 integer :: xmpi_n ! 2nd dimension of processor 00036 ! grid 00037 integer :: xmpi_pcol ! my column in processor grid (starting at 1) 00038 integer :: xmpi_prow ! my row in processor grid (starting at 1) 00039 integer :: xmpi_left ! left neighbour 00040 integer :: xmpi_right ! right neighbour 00041 integer :: xmpi_bot ! bottom neighbour 00042 integer :: xmpi_top ! top neighbour 00043 logical :: xmpi_isleft ! submatrix is at the left side 00044 ! ie: shares first column with 00045 ! global matrix 00046 logical :: xmpi_isright ! submatrix is at the right side 00047 ! ie: shares last column with 00048 ! global matrix 00049 logical :: xmpi_istop ! submatrix is at the top side 00050 ! ie: shares first row with 00051 ! global matrix 00052 logical :: xmpi_isbot ! submatrix is at the bottom side 00053 ! ie: shares first row with 00054 ! global matrix 00055 logical :: xmaster ! .true. if process is the master process 00056 ! ! of the computing processes 00057 logical :: xomaster ! .true if this process is the output process 00058 ! ! in xmpi_ocomm 00059 logical :: xcompute ! .true. if this is a compute process 00060 ! ! 00061 ! 00062 ! 1 2 3 4 5 6 7 y-axis 00063 ! X 1 x x x x x x x 00064 ! a 2 x x x x x x x 00065 ! x 3 x x x x x x x 00066 ! i 4 x x x x x x x 00067 ! s 5 x x x x x x x 00068 ! 00069 ! 00070 integer, parameter :: SHIFT_X_U = 1 ! shift in x direction from high to low: from bottom to top 00071 integer, parameter :: SHIFT_X_D = 2 ! shift in x direction from low to high: from top to bottom 00072 integer, parameter :: SHIFT_Y_R = 3 ! shift in y direction from low to high: from left to right 00073 integer, parameter :: SHIFT_Y_L = 4 ! shift in y direction from high to low: from right to left 00074 integer, parameter :: SHIFT_R = 5 ! shift in 1d array from left to right 00075 integer, parameter :: SHIFT_L = 6 ! shift in 1d array from right to left 00076 #ifdef USEMPE 00077 integer :: event_output_start 00078 integer :: event_output_end 00079 integer :: event_write_start 00080 integer :: event_write_end 00081 integer :: event_coll_start 00082 integer :: event_coll_end 00083 #endif 00084 00085 #else 00086 implicit none 00087 save 00088 integer, parameter :: xmpi_rank = 0 00089 integer, parameter :: xmpi_orank = 0 00090 integer, parameter :: xmpi_size = 1 00091 logical, parameter :: xmaster = .true. 00092 logical, parameter :: xomaster = .true. 00093 logical, parameter :: xcompute = .true. 00094 logical, parameter :: xmpi_isleft = .true. 00095 logical, parameter :: xmpi_isright = .true. 00096 logical, parameter :: xmpi_istop = .true. 00097 logical, parameter :: xmpi_isbot = .true. 00098 integer, parameter :: xmpi_pcol = 1 00099 integer, parameter :: xmpi_prow = 1 00100 #endif 00101 00102 #ifdef USEMPI 00103 interface xmpi_bcast 00104 module procedure xmpi_bcast_array_integer 00105 module procedure xmpi_bcast_array_integer_3 00106 module procedure xmpi_bcast_array_logical 00107 module procedure xmpi_bcast_array_logical_3 00108 module procedure xmpi_bcast_array_real8 00109 module procedure xmpi_bcast_array_real8_3 00110 module procedure xmpi_bcast_char 00111 module procedure xmpi_bcast_char_3 00112 module procedure xmpi_bcast_complex16 00113 module procedure xmpi_bcast_complex16_3 00114 module procedure xmpi_bcast_integer 00115 module procedure xmpi_bcast_integer_3 00116 module procedure xmpi_bcast_integer8 00117 module procedure xmpi_bcast_integer8_3 00118 module procedure xmpi_bcast_logical 00119 module procedure xmpi_bcast_logical_3 00120 module procedure xmpi_bcast_matrix_integer 00121 module procedure xmpi_bcast_matrix_integer_3 00122 module procedure xmpi_bcast_matrix_real8 00123 module procedure xmpi_bcast_matrix_real8_3 00124 module procedure xmpi_bcast_real4 00125 module procedure xmpi_bcast_real4_3 00126 module procedure xmpi_bcast_real8 00127 module procedure xmpi_bcast_real8_3 00128 end interface xmpi_bcast 00129 00130 interface xmpi_allreduce 00131 module procedure xmpi_allreduce_r0 00132 module procedure xmpi_allreduce_r1 00133 module procedure xmpi_allreduce_i0 00134 end interface xmpi_allreduce 00135 00136 interface xmpi_reduce 00137 module procedure xmpi_reduce_r0 00138 module procedure xmpi_reduce_i0 00139 module procedure xmpi_reduce_r1 00140 module procedure xmpi_reduce_r2 00141 module procedure xmpi_reduce_i1 00142 end interface xmpi_reduce 00143 00144 interface xmpi_sendrecv 00145 module procedure xmpi_sendrecv_r1 00146 module procedure xmpi_sendrecv_r2 00147 module procedure xmpi_sendrecv_r3 00148 module procedure xmpi_sendrecv_i1 00149 module procedure xmpi_sendrecv_i2 00150 end interface xmpi_sendrecv 00151 00152 interface xmpi_shift 00153 module procedure xmpi_shift_r2 00154 module procedure xmpi_shift_i2 00155 module procedure xmpi_shift_r3 00156 module procedure xmpi_shift_i3 00157 00158 module procedure xmpi_shift_r2_l 00159 module procedure xmpi_shift_i2_l 00160 module procedure xmpi_shift_r3_l 00161 end interface xmpi_shift 00162 00163 interface xmpi_shift_ee 00164 module procedure xmpi_shift_ee_r2 00165 module procedure xmpi_shift_ee_i2 00166 module procedure xmpi_shift_ee_r3 00167 end interface xmpi_shift_ee 00168 00169 interface xmpi_shift_uu 00170 module procedure xmpi_shift_uu_r2 00171 module procedure xmpi_shift_uu_r3 00172 end interface xmpi_shift_uu 00173 00174 interface xmpi_shift_vv 00175 module procedure xmpi_shift_vv_r2 00176 module procedure xmpi_shift_vv_r3 00177 end interface xmpi_shift_vv 00178 00179 interface xmpi_shift_zs 00180 module procedure xmpi_shift_zs_r2 00181 module procedure xmpi_shift_zs_r3 00182 end interface xmpi_shift_zs 00183 00184 interface xmpi_send 00185 module procedure xmpi_send_r0 00186 module procedure xmpi_send_i0 00187 module procedure xmpi_send_l0 00188 module procedure xmpi_send_r1 00189 module procedure xmpi_send_r2 00190 module procedure xmpi_send_r3 00191 module procedure xmpi_send_r4 00192 module procedure xmpi_send_i1 00193 module procedure xmpi_send_l1 00194 end interface xmpi_send 00195 00196 #endif 00197 contains 00198 #ifdef USEMPI 00199 00200 subroutine xmpi_initialize 00201 ! initialize mpi environment 00202 implicit none 00203 integer ierr,color,errhandler,r,comm_world 00204 external comm_errhandler 00205 ierr = 0 00206 ! Message buffers in openmpi are not initialized so this call can give a vallgrind error 00207 ! http://www.open-mpi.org/community/lists/users/2009/06/9566.php 00208 ! http://valgrind.org/docs/manual/manual-core.html#manual-core.suppress 00209 call MPI_Init(ierr) 00210 ! Use comm_world as a variable for compatibility with SGI MPI 00211 comm_world = MPI_COMM_WORLD 00212 call MPI_Comm_create_errhandler(comm_errhandler,errhandler,ierr) 00213 call MPI_Comm_set_errhandler(comm_world,errhandler,ierr) 00214 #ifdef USEMPE 00215 call MPE_Log_get_solo_eventid(event_output_start) 00216 call MPE_Log_get_solo_eventid(event_output_end) 00217 call MPE_Log_get_solo_eventid(event_write_start) 00218 call MPE_Log_get_solo_eventid(event_write_end) 00219 call MPE_Log_get_solo_eventid(event_coll_start) 00220 call MPE_Log_get_solo_eventid(event_coll_end) 00221 call MPE_Describe_event(event_output_start,'out_start','red') 00222 call MPE_Describe_event(event_output_end,'out_end','blue') 00223 call MPE_Describe_event(event_write_start,'write_start','orange') 00224 call MPE_Describe_event(event_write_end,'write_end','green') 00225 call MPE_Describe_event(event_coll_start,'coll_start','white') 00226 call MPE_Describe_event(event_coll_end,'coll_end','white') 00227 #endif 00228 xmpi_ocomm = MPI_COMM_WORLD 00229 call MPI_Comm_rank(xmpi_ocomm,xmpi_orank,ierr) 00230 call MPI_Comm_size(xmpi_ocomm,xmpi_osize,ierr) 00231 if (xmpi_osize < 2) then 00232 print *,'Number of MPI processes must be 2 or greater, but is:',xmpi_osize 00233 call halt_program(.false.) 00234 endif 00235 xmpi_omaster = 0 00236 xomaster = (xmpi_orank == xmpi_omaster) 00237 xcompute = .not. xomaster 00238 ! 00239 ! Create the compute communicator. This will contain all 00240 ! processes in xmpi_iocomm except process xmpi_master 00241 ! 00242 color = 1 00243 if(xmpi_orank == xmpi_omaster) color = 0 00244 call MPI_Comm_split(xmpi_ocomm,color,1,xmpi_comm,ierr) 00245 call MPI_Comm_rank(xmpi_comm,xmpi_rank,ierr) 00246 call MPI_Comm_size(xmpi_comm,xmpi_size,ierr) 00247 call MPI_Comm_set_name(xmpi_comm,'xmpi_comm',ierr) 00248 call MPI_Comm_set_name(xmpi_ocomm,'xmpi_ocomm',ierr) 00249 xmpi_master = 0 00250 xmaster = (xmpi_rank == xmpi_master) 00251 ! 00252 ! on the output process, xmpi_comm and xmpi_rank 00253 ! are of no use, so give them values that will trigger 00254 ! errors when used: 00255 ! 00256 if (xomaster) then 00257 xmpi_comm = MPI_COMM_NULL 00258 xmpi_rank = -123 00259 xmaster = .false. 00260 endif 00261 ! 00262 ! Let the I/O process know how many computational processes there are: 00263 ! 00264 00265 if(xomaster) then 00266 xmpi_size = xmpi_osize - 1 00267 endif 00268 00269 xmpi_imaster = 1 00270 00271 ! sanity check for xmpi_orank_to_rank and xmpi_rank_to_orank 00272 00273 r = xmpi_orank_to_rank(xmpi_orank) 00274 if (r .ne. xmpi_rank) then 00275 print *,'Wrong conversion from xmpi_orank',xmpi_orank,'to xmpi_rank',r 00276 call halt_program 00277 endif 00278 00279 r = xmpi_rank_to_orank(xmpi_rank) 00280 if (r .ne. xmpi_orank) then 00281 print *,'Wrong conversion from xmpi_rank',xmpi_rank,'to xmpi_orank',r 00282 call halt_program 00283 endif 00284 00285 end subroutine xmpi_initialize 00286 00287 integer function xmpi_orank_to_rank(r) 00288 ! given rank r in xmpi_ocomm, return rank in xmpi_comm 00289 integer, intent(in) :: r 00290 integer rr 00291 rr = r-1 00292 if (rr .lt. 0) rr = -123 00293 xmpi_orank_to_rank = rr 00294 end function xmpi_orank_to_rank 00295 00296 integer function xmpi_rank_to_orank(r) 00297 ! given rank r in xmpi_comm, return rank in xmpi_ocomm 00298 integer, intent(in) :: r 00299 if (r .eq. -123) then 00300 xmpi_rank_to_orank = 0 00301 else 00302 xmpi_rank_to_orank = r+1 00303 endif 00304 end function xmpi_rank_to_orank 00305 00306 subroutine xmpi_finalize 00307 ! ends mpi environment, collective subroutine 00308 implicit none 00309 integer ierr 00310 call MPI_Finalize(ierr) 00311 end subroutine xmpi_finalize 00312 00313 subroutine xmpi_abort 00314 ! to be used if program has to end, and there is no way 00315 ! to tell other processes about this fact 00316 implicit none 00317 integer ierr 00318 call MPI_Abort(xmpi_comm,1,ierr) 00319 stop 1 00320 end subroutine xmpi_abort 00321 00322 !____________________________________________________________________________ 00323 00324 subroutine xmpi_determine_processor_grid(m,n,divtype,mmanual,nmanual,cyclic,error) 00325 implicit none 00326 integer, intent(in) :: m,n,mmanual,nmanual ! the dimensions of the global domain 00327 integer, intent(in) :: cyclic 00328 integer, intent(in) :: divtype 00329 integer, intent(out) :: error 00330 00331 integer mm,nn, borderlength, min_borderlength 00332 00333 ! determine the processor grid (xmpi_m ampi_n), such that 00334 ! - xmpi_m * xmpi_n = xmpi_size 00335 ! - the total length of the internal borders is minimal 00336 00337 min_borderlength = 1000000000 00338 error = 0 00339 select case(divtype) 00340 case(MPIBOUNDARY_Y) ! Force all subdivisions to run along y-lines 00341 xmpi_m=xmpi_size 00342 xmpi_n=1 00343 case(MPIBOUNDARY_X) ! Force all subdivisions to run along x-lines 00344 xmpi_m=1 00345 xmpi_n=xmpi_size 00346 case (MPIBOUNDARY_AUTO) 00347 do mm = 1,xmpi_size 00348 nn = xmpi_size/mm 00349 if (mm * nn .eq. xmpi_size) then 00350 borderlength = (mm - 1)*n + (nn -1)*m 00351 if (borderlength .lt. min_borderlength) then 00352 xmpi_m = mm 00353 xmpi_n = nn 00354 min_borderlength = borderlength 00355 endif 00356 endif 00357 enddo 00358 case (MPIBOUNDARY_MAN) 00359 if (mmanual * nmanual .ne. xmpi_size) then 00360 error = 2 00361 return 00362 endif 00363 xmpi_m = mmanual 00364 xmpi_n = nmanual 00365 case default 00366 error = 1 00367 return 00368 end select 00369 00370 ! Check whether each MPI subdomain is large enough (at least 4 cells in x and y), preferably more 00371 ! Errors 00372 if(m+1<4*xmpi_m) then 00373 error = 3 00374 return 00375 endif 00376 if (n+1<4*xmpi_n .and. n>2) then 00377 error = 4 00378 return 00379 endif 00380 ! "Warning" 00381 if (m+1<8*xmpi_m .or. (n+1<8*xmpi_n .and. n>2)) then 00382 if (m+1<8*xmpi_m) error = 5 00383 if (n+1<8*xmpi_n .and. n>2) error = 6 00384 endif 00385 00386 00387 ! Pieter: TODO: Check whether the grid distribution did not lead to domains with to few grid cells (mpi implementation needs at least 2 at each boundary). Specify error and give a message if this is the case. 00388 00389 ! The layout of the processors is as follows: 00390 ! example 12 processors, xmpi_m=3, xmpi_n=4: 00391 00392 ! 0 3 6 9 00393 ! 1 4 7 10 00394 ! 2 5 8 11 00395 00396 ! top 00397 ! left X right 00398 ! bot 00399 00400 ! Left neighbour: 00401 00402 xmpi_left = xmpi_rank - xmpi_m 00403 xmpi_isleft = .false. 00404 if (xmpi_left .lt. 0) then 00405 select case (cyclic) 00406 case(0) 00407 xmpi_left = MPI_PROC_NULL 00408 xmpi_isleft = .true. 00409 case(1) 00410 xmpi_left = xmpi_left + xmpi_m*xmpi_n 00411 end select 00412 endif 00413 00414 ! Right neighbour: 00415 00416 xmpi_right = xmpi_rank + xmpi_m 00417 xmpi_isright = .false. 00418 if (xmpi_right .ge. xmpi_size) then 00419 select case (cyclic) 00420 case(0) 00421 xmpi_right = MPI_PROC_NULL 00422 xmpi_isright = .true. 00423 case(1) 00424 xmpi_right = xmpi_right - xmpi_m*xmpi_n 00425 end select 00426 endif 00427 00428 ! Upper neighbour: 00429 00430 if (mod (xmpi_rank,xmpi_m) .eq. 0) then 00431 xmpi_top = MPI_PROC_NULL 00432 xmpi_istop = .true. 00433 else 00434 xmpi_top = xmpi_rank - 1 00435 xmpi_istop = .false. 00436 endif 00437 00438 ! Lower neighbour: 00439 00440 if (mod (xmpi_rank+1,xmpi_m) .eq. 0) then 00441 xmpi_bot = MPI_PROC_NULL 00442 xmpi_isbot = .true. 00443 else 00444 xmpi_bot = xmpi_rank + 1 00445 xmpi_isbot = .false. 00446 endif 00447 00448 ! my row and column (starting with 1,1) 00449 00450 xmpi_pcol = xmpi_rank/xmpi_m 00451 xmpi_prow = xmpi_rank - xmpi_pcol*xmpi_m 00452 xmpi_pcol = xmpi_pcol+1 00453 xmpi_prow = xmpi_prow+1 00454 00455 end subroutine xmpi_determine_processor_grid 00456 !____________________________________________________________________________ 00457 00458 subroutine xmpi_bcast_array_logical(x,toall) 00459 implicit none 00460 logical, dimension(:) :: x 00461 include 'xmpi_bcast.inc' 00462 end subroutine xmpi_bcast_array_logical 00463 00464 subroutine xmpi_bcast_array_logical_3(x,src,comm) 00465 implicit none 00466 logical, dimension(:) :: x 00467 integer, intent(in) :: src 00468 integer, intent(in) :: comm 00469 integer ierror,l 00470 l = size(x) 00471 call MPI_Bcast(x, l, MPI_LOGICAL, src, comm, ierror) 00472 end subroutine xmpi_bcast_array_logical_3 00473 00474 subroutine xmpi_bcast_logical(x,toall) 00475 implicit none 00476 logical x 00477 include 'xmpi_bcast.inc' 00478 end subroutine xmpi_bcast_logical 00479 00480 subroutine xmpi_bcast_logical_3(x,src,comm) 00481 implicit none 00482 logical x 00483 integer, intent(in) :: src 00484 integer, intent(in) :: comm 00485 integer ierror 00486 call MPI_Bcast(x, 1, MPI_LOGICAL, src, comm, ierror) 00487 end subroutine xmpi_bcast_logical_3 00488 00489 subroutine xmpi_bcast_array_real8(x,toall) 00490 implicit none 00491 real*8, dimension(:) :: x 00492 include 'xmpi_bcast.inc' 00493 end subroutine xmpi_bcast_array_real8 00494 00495 subroutine xmpi_bcast_array_real8_3(x,src,comm) 00496 implicit none 00497 real*8, dimension(:):: x 00498 integer, intent(in) :: src,comm 00499 integer ierror,l 00500 l = size(x) 00501 call MPI_Bcast(x, l, MPI_DOUBLE_PRECISION, src, comm, ierror) 00502 end subroutine xmpi_bcast_array_real8_3 00503 00504 subroutine xmpi_bcast_matrix_real8(x,toall) 00505 implicit none 00506 real*8, dimension(:,:) :: x 00507 include 'xmpi_bcast.inc' 00508 end subroutine xmpi_bcast_matrix_real8 00509 00510 subroutine xmpi_bcast_matrix_real8_3(x,src,comm) 00511 implicit none 00512 real*8, dimension(:,:) :: x 00513 integer, intent(in) :: src,comm 00514 integer ierror,l 00515 l = size(x) 00516 call MPI_Bcast(x, l, MPI_DOUBLE_PRECISION, src, comm, ierror) 00517 end subroutine xmpi_bcast_matrix_real8_3 00518 00519 subroutine xmpi_bcast_matrix_integer(x,toall) 00520 implicit none 00521 integer, dimension(:,:) :: x 00522 include 'xmpi_bcast.inc' 00523 end subroutine xmpi_bcast_matrix_integer 00524 00525 subroutine xmpi_bcast_matrix_integer_3(x,src,comm) 00526 implicit none 00527 integer, dimension(:,:) :: x 00528 integer, intent(in) :: src,comm 00529 integer ierror,l 00530 l = size(x) 00531 call MPI_Bcast(x, l, MPI_INTEGER, src, comm, ierror) 00532 end subroutine xmpi_bcast_matrix_integer_3 00533 00534 subroutine xmpi_bcast_array_integer(x,toall) 00535 implicit none 00536 integer, dimension(:) :: x 00537 include 'xmpi_bcast.inc' 00538 end subroutine xmpi_bcast_array_integer 00539 00540 subroutine xmpi_bcast_array_integer_3(x,src,comm) 00541 implicit none 00542 integer, dimension(:) :: x 00543 integer, intent(in) :: src,comm 00544 integer ierror,l 00545 l = size(x) 00546 call MPI_Bcast(x, l, MPI_INTEGER, src, comm, ierror) 00547 end subroutine xmpi_bcast_array_integer_3 00548 00549 subroutine xmpi_bcast_real4(x,toall) 00550 implicit none 00551 real*4 :: x 00552 include 'xmpi_bcast.inc' 00553 end subroutine xmpi_bcast_real4 00554 00555 subroutine xmpi_bcast_real4_3(x,src,comm) 00556 implicit none 00557 real*4 :: x 00558 integer, intent(in) :: src,comm 00559 integer ierror 00560 call MPI_Bcast(x, 1, MPI_REAL, src, comm, ierror) 00561 end subroutine xmpi_bcast_real4_3 00562 00563 subroutine xmpi_bcast_real8(x,toall) 00564 implicit none 00565 real*8 :: x 00566 include 'xmpi_bcast.inc' 00567 end subroutine xmpi_bcast_real8 00568 00569 subroutine xmpi_bcast_real8_3(x,src,comm) 00570 implicit none 00571 real*8 :: x 00572 integer, intent(in) :: src,comm 00573 integer ierror 00574 call MPI_Bcast(x, 1, MPI_DOUBLE_PRECISION, src, comm, ierror) 00575 end subroutine xmpi_bcast_real8_3 00576 00577 subroutine xmpi_bcast_integer(x,toall) 00578 implicit none 00579 integer x 00580 include 'xmpi_bcast.inc' 00581 end subroutine xmpi_bcast_integer 00582 00583 subroutine xmpi_bcast_integer_3(x,src,comm) 00584 implicit none 00585 integer :: x 00586 integer, intent(in) :: src,comm 00587 integer ierror 00588 call MPI_Bcast(x, 1, MPI_INTEGER, src, comm, ierror) 00589 end subroutine xmpi_bcast_integer_3 00590 00591 subroutine xmpi_bcast_integer8(x,toall) 00592 implicit none 00593 integer*8 :: x 00594 include 'xmpi_bcast.inc' 00595 end subroutine xmpi_bcast_integer8 00596 00597 subroutine xmpi_bcast_integer8_3(x,src,comm) 00598 implicit none 00599 integer*8 :: x 00600 integer, intent(in) :: src,comm 00601 integer ierror 00602 call MPI_Bcast(x, 1, MPI_INTEGER8, src, comm, ierror) 00603 end subroutine xmpi_bcast_integer8_3 00604 00605 subroutine xmpi_bcast_complex16(x,toall) 00606 implicit none 00607 complex*16 :: x 00608 include 'xmpi_bcast.inc' 00609 end subroutine xmpi_bcast_complex16 00610 00611 subroutine xmpi_bcast_complex16_3(x,src,comm) 00612 implicit none 00613 complex*16 :: x 00614 integer, intent(in) :: src,comm 00615 integer ierror 00616 call MPI_Bcast(x, 1, MPI_DOUBLE_COMPLEX, src, comm, ierror) 00617 end subroutine xmpi_bcast_complex16_3 00618 00619 subroutine xmpi_bcast_char(x,toall) 00620 implicit none 00621 character(len=*) :: x 00622 include 'xmpi_bcast.inc' 00623 end subroutine xmpi_bcast_char 00624 00625 subroutine xmpi_bcast_char_3(x,src,comm) 00626 ! 00627 ! wwvv convert string to integer array, 00628 ! broadcast integer array and convert back 00629 ! Not very efficient, but this routine is seldom called 00630 ! and now it works for every taste of fortran90 00631 ! 00632 implicit none 00633 character(len=*) :: x 00634 integer, intent(in) :: src,comm 00635 00636 integer :: l,i,rank,ierr 00637 integer, allocatable, dimension(:) :: sx 00638 00639 call MPI_Comm_rank(comm,rank,ierr) 00640 if (rank == src) then 00641 l = len_trim(x) 00642 endif 00643 00644 call xmpi_bcast(l,src,comm) 00645 allocate(sx(l)) 00646 00647 if (rank == src) then 00648 do i = 1,l 00649 sx(i) = ichar(x(i:i)) 00650 enddo 00651 endif 00652 00653 call xmpi_bcast(sx,src,comm) 00654 00655 if ( rank /= src) then 00656 x = ' ' 00657 do i = 1,l 00658 x(i:i) = char(sx(i)) 00659 enddo 00660 endif 00661 00662 deallocate(sx) 00663 00664 end subroutine xmpi_bcast_char_3 00665 00666 00667 subroutine xmpi_sendrecv_r1(sendbuf,dest,recvbuf,source) 00668 implicit none 00669 real*8, dimension(:), intent(in) :: sendbuf 00670 real*8, dimension(:), intent(out) :: recvbuf 00671 integer, intent(in) :: dest,source 00672 00673 integer :: ierror 00674 integer :: n 00675 00676 n = size(sendbuf) 00677 00678 call MPI_Sendrecv(sendbuf,n,MPI_DOUBLE_PRECISION,dest,100, & 00679 recvbuf,n,MPI_DOUBLE_PRECISION,source,100, & 00680 xmpi_comm,MPI_STATUS_IGNORE,ierror) 00681 00682 end subroutine xmpi_sendrecv_r1 00683 00684 subroutine xmpi_sendrecv_r2(sendbuf,dest,recvbuf,source) 00685 implicit none 00686 real*8, dimension(:,:), intent(in) :: sendbuf 00687 real*8, dimension(:,:), intent(out) :: recvbuf 00688 integer, intent(in) :: dest,source 00689 00690 integer :: ierror 00691 integer :: n 00692 00693 n = size(sendbuf) 00694 00695 call MPI_Sendrecv(sendbuf,n,MPI_DOUBLE_PRECISION,dest,101, & 00696 recvbuf,n,MPI_DOUBLE_PRECISION,source,101, & 00697 xmpi_comm,MPI_STATUS_IGNORE,ierror) 00698 00699 end subroutine xmpi_sendrecv_r2 00700 00701 subroutine xmpi_sendrecv_r3(sendbuf,dest,recvbuf,source) 00702 implicit none 00703 real*8, dimension(:,:,:), intent(in) :: sendbuf 00704 real*8, dimension(:,:,:), intent(out) :: recvbuf 00705 integer, intent(in) :: dest,source 00706 00707 integer :: ierror 00708 integer :: n 00709 00710 n = size(sendbuf) 00711 00712 call MPI_Sendrecv(sendbuf,n,MPI_DOUBLE_PRECISION,dest,101, & 00713 recvbuf,n,MPI_DOUBLE_PRECISION,source,101, & 00714 xmpi_comm,MPI_STATUS_IGNORE,ierror) 00715 00716 end subroutine xmpi_sendrecv_r3 00717 00718 subroutine xmpi_sendrecv_i1(sendbuf,dest,recvbuf,source) 00719 implicit none 00720 integer, dimension(:), intent(in) :: sendbuf 00721 integer, dimension(:), intent(out) :: recvbuf 00722 integer, intent(in) :: dest,source 00723 00724 integer :: ierror 00725 integer :: n 00726 00727 n = size(sendbuf) 00728 00729 call MPI_Sendrecv(sendbuf,n,MPI_INTEGER,dest,102, & 00730 recvbuf,n,MPI_INTEGER,source,102, & 00731 xmpi_comm,MPI_STATUS_IGNORE,ierror) 00732 00733 end subroutine xmpi_sendrecv_i1 00734 00735 subroutine xmpi_sendrecv_i2(sendbuf,dest,recvbuf,source) 00736 implicit none 00737 integer, dimension(:,:), intent(in) :: sendbuf 00738 integer, dimension(:,:), intent(out) :: recvbuf 00739 integer, intent(in) :: dest,source 00740 00741 integer :: ierror 00742 integer :: n 00743 00744 n = size(sendbuf) 00745 00746 call MPI_Sendrecv(sendbuf,n,MPI_INTEGER,dest,103, & 00747 recvbuf,n,MPI_INTEGER,source,103, & 00748 xmpi_comm,MPI_STATUS_IGNORE,ierror) 00749 00750 end subroutine xmpi_sendrecv_i2 00751 00752 subroutine xmpi_allreduce_r0(x,op) 00753 implicit none 00754 real*8,intent(inout) :: x 00755 integer,intent(in) :: op 00756 00757 real*8 :: y 00758 integer :: ierror 00759 y = x 00760 call MPI_Allreduce(y,x,1,MPI_DOUBLE_PRECISION,op,xmpi_comm,ierror) 00761 end subroutine xmpi_allreduce_r0 00762 00763 subroutine xmpi_allreduce_r1(x,op) 00764 implicit none 00765 real*8,dimension(:), intent(inout) :: x 00766 real*8,dimension(:), allocatable :: y 00767 integer,intent(in) :: op 00768 00769 integer :: ierror 00770 allocate(y(size(x))) 00771 y = x 00772 call MPI_Allreduce(y,x,size(x),MPI_DOUBLE_PRECISION,op,xmpi_comm,ierror) 00773 deallocate(y) 00774 end subroutine xmpi_allreduce_r1 00775 00776 subroutine xmpi_allreduce_i0(x,op) 00777 implicit none 00778 integer,intent(inout) :: x 00779 integer,intent(in) :: op 00780 00781 integer :: y 00782 integer :: ierror 00783 y = x 00784 call MPI_Allreduce(y,x,1,MPI_INTEGER,op,xmpi_comm,ierror) 00785 end subroutine xmpi_allreduce_i0 00786 00787 subroutine xmpi_reduce_r0(x,y,op) 00788 implicit none 00789 real*8, intent(in) :: x 00790 real*8, intent(out) :: y 00791 integer, intent(in) :: op 00792 00793 integer :: ierror 00794 call MPI_Reduce(x,y,1,MPI_DOUBLE_PRECISION,op,xmpi_master,xmpi_comm,ierror) 00795 end subroutine xmpi_reduce_r0 00796 00797 subroutine xmpi_reduce_r1(x,y,op) 00798 implicit none 00799 real*8,dimension(:), intent(in) :: x 00800 real*8,dimension(:), intent(out) :: y 00801 integer, intent(in) :: op 00802 00803 integer :: ierror 00804 call MPI_Reduce(x,y,size(x),MPI_DOUBLE_PRECISION,op,xmpi_master,xmpi_comm,ierror) 00805 end subroutine xmpi_reduce_r1 00806 00807 subroutine xmpi_reduce_r2(x,y,op) 00808 implicit none 00809 real*8,dimension(:,:), intent(in) :: x 00810 real*8,dimension(:,:), intent(out) :: y 00811 integer, intent(in) :: op 00812 00813 integer :: ierror 00814 call MPI_Reduce(x,y,size(x),MPI_DOUBLE_PRECISION,op,xmpi_master,xmpi_comm,ierror) 00815 end subroutine xmpi_reduce_r2 00816 00817 subroutine xmpi_reduce_i0(x,y,op) 00818 implicit none 00819 integer, intent(in) :: x 00820 integer, intent(out) :: y 00821 integer, intent(in) :: op 00822 00823 integer :: ierror 00824 call MPI_Reduce(x,y,1,MPI_INTEGER,op,xmpi_master,xmpi_comm,ierror) 00825 end subroutine xmpi_reduce_i0 00826 00827 subroutine xmpi_reduce_i1(x,y,op) 00828 implicit none 00829 integer,dimension(:),intent(in) :: x 00830 integer,dimension(:),intent(out) :: y 00831 integer,intent(in) :: op 00832 00833 integer :: ierror 00834 call MPI_Reduce(x,y,size(x),MPI_INTEGER,op,xmpi_master,xmpi_comm,ierror) 00835 end subroutine xmpi_reduce_i1 00836 00837 ! 00838 ! shift routines: x(m,n) is the matrix in this process 00839 ! direction = 'u': shift up, send to top x(2,:) , receive from bot x(m,:) 00840 ! direction = 'd': shift down, send to bot x(m-1,:), receive from top x(1,:) 00841 ! direction = 'l': shift left, send to left x(:,2), receive from right x(:,n) 00842 ! direction = 'r': shift right, send to right x(:,n-1), receive from left x(:,1) 00843 ! 00844 ! also 'm:', '1:', ':n' and ':1' can be used, easier to remember: 00845 ! 'm:' : x(m,:) will be replaced, except for a far bottom process 00846 ! '1:' : x(1,:) will be replaced, except for a far top process 00847 ! ':n' : x(:,n) will be replaced, except for a far right process 00848 ! ':1' : x(:,1) will be replaced, except for a far left process 00849 00850 subroutine xmpi_shift_r2(x,direction) 00851 implicit none 00852 real*8,dimension(:,:),intent(inout) :: x 00853 character(len=*),intent(in) :: direction 00854 00855 integer :: m,n 00856 00857 m = size(x,1) 00858 n = size(x,2) 00859 00860 select case(direction) 00861 case('u','m:') 00862 call xmpi_sendrecv(x(2,:),xmpi_top, x(m,:),xmpi_bot) 00863 case('d','1:') 00864 call xmpi_sendrecv(x(m-1,:),xmpi_bot, x(1,:),xmpi_top) 00865 case('l',':n') 00866 call xmpi_sendrecv(x(:,2),xmpi_left, x(:,n),xmpi_right) 00867 case('r',':1') 00868 call xmpi_sendrecv(x(:,n-1),xmpi_right,x(:,1),xmpi_left) 00869 case default 00870 if(xmaster) then 00871 write (*,*) 'Invalid direction parameter for xmpi_shift_r2: "'// & 00872 direction//'"' 00873 call halt_program 00874 endif 00875 end select 00876 00877 end subroutine xmpi_shift_r2 00878 00879 subroutine xmpi_shift_i2(x,direction) 00880 implicit none 00881 integer, dimension (:,:), intent(inout) :: x 00882 character(len=*), intent(in) :: direction 00883 00884 integer :: m,n 00885 00886 m = size(x,1) 00887 n = size(x,2) 00888 00889 select case(direction) 00890 case('u','m:') 00891 call xmpi_sendrecv(x(2,:), xmpi_top, x(m,:),xmpi_bot) 00892 case('d','1:') 00893 call xmpi_sendrecv(x(m-1,:), xmpi_bot, x(1,:),xmpi_top) 00894 case('l',':n') 00895 call xmpi_sendrecv(x(:,2), xmpi_left, x(:,n),xmpi_right) 00896 case('r',':1') 00897 call xmpi_sendrecv(x(:,n-1), xmpi_right,x(:,1),xmpi_left) 00898 case default 00899 if(xmaster) then 00900 write (*,*) 'Invalid direction parameter for xmpi_shift_r2: "'// & 00901 direction//'"' 00902 call halt_program 00903 endif 00904 end select 00905 00906 end subroutine xmpi_shift_i2 00907 00908 subroutine xmpi_shift_r3(x,direction) 00909 implicit none 00910 real*8, dimension (:,:,:), intent(inout) :: x 00911 character(len=*), intent(in) :: direction 00912 00913 integer :: m,n,l 00914 00915 m = size(x,1) 00916 n = size(x,2) 00917 l = size(x,3) 00918 00919 select case(direction) 00920 case('u','m:') 00921 call xmpi_sendrecv(x(2,:,:), xmpi_top, x(m,:,:),xmpi_bot) 00922 case('d','1:') 00923 call xmpi_sendrecv(x(m-1,:,:),xmpi_bot, x(1,:,:),xmpi_top) 00924 case('l',':n') 00925 call xmpi_sendrecv(x(:,2,:), xmpi_left, x(:,n,:),xmpi_right) 00926 case('r',':1') 00927 call xmpi_sendrecv(x(:,n-1,:),xmpi_right,x(:,1,:),xmpi_left) 00928 case default 00929 if(xmaster) then 00930 write (*,*) 'Invalid direction parameter for xmpi_shift_r3: "'// & 00931 direction//'"' 00932 call halt_program 00933 endif 00934 end select 00935 00936 end subroutine xmpi_shift_r3 00937 00938 subroutine xmpi_shift_i3(x,direction) 00939 implicit none 00940 integer, dimension (:,:,:), intent(inout) :: x 00941 character(len=*), intent(in) :: direction 00942 00943 integer :: m,n,l 00944 00945 m = size(x,1) 00946 n = size(x,2) 00947 l = size(x,3) 00948 00949 select case(direction) 00950 case('u','m:') 00951 call xmpi_sendrecv(x(2,:,:), xmpi_top, x(m,:,:),xmpi_bot) 00952 case('d','1:') 00953 call xmpi_sendrecv(x(m-1,:,:),xmpi_bot, x(1,:,:),xmpi_top) 00954 case('l',':n') 00955 call xmpi_sendrecv(x(:,2,:), xmpi_left, x(:,n,:),xmpi_right) 00956 case('r',':1') 00957 call xmpi_sendrecv(x(:,n-1,:),xmpi_right,x(:,1,:),xmpi_left) 00958 case default 00959 if(xmaster) then 00960 write (*,*) 'Invalid direction parameter for xmpi_shift_i3: "'// & 00961 direction//'"' 00962 call halt_program 00963 endif 00964 end select 00965 00966 end subroutine xmpi_shift_i3 00967 00968 ! translation from nx+1, ny+1 to m and n 00969 ! 00970 ! m=nx+1 nx=m-1 00971 ! n=ny+1 ny=n-1 00972 ! 00973 ! variabele l->r r->l b->t t->b 00974 ! r l u d shift 00975 ! SHIFT_Y_R SHIFT_Y_L SHIFT_X_U SHIFT_X_D 00976 00977 00978 ! ee,rr m-3:m-2,: 3:4,: :,n-3:n-2 :,3:4 00979 ! 1:2,: m-1:m,: :,1:2 :,n-1:n 00980 ! 00981 ! uu m-3,: 2:3,: :,n-3:n-2 :,3:4 00982 ! 1,: m-2,m-1,: :,1:2 :,n-1:n 00983 ! 00984 ! vv m-3:m-2,: 3:4,: :,n-3 :,2:3 00985 ! 1:2,: m-1:m,: :,1 :,n-2:n-1 00986 ! 00987 ! zs,ccg,zb m-2,: 3,: :,n-2 :,3 00988 ! 2,: m-1,: :,2 :,n-1 00989 ! 00990 ! Dus: 00991 ! i p j q 00992 ! 00993 ! 1 <-> m-3 1 <-> n-3 00994 ! 2 <-> m-2 2 <-> n-2 00995 ! 3 <-> m-1 3 <-> n-1 00996 ! 4 <-> m 4 <-> n 00997 ! 00998 ! p=m-4+i q=n-4+j 00999 01000 ! About the _l subroutines: 01001 ! 01002 ! xmpi_shift(x,SHIFT_Y_R,1,2): columns 1:2 are filled in (aka updated) 01003 ! xmpi_shift(x,SHIFT_Y_L,3,4): columns n-1,n are filled in (aka updated) 01004 ! xmpi_shift(x,SHIFT_X_D,1,2): rows 1:2 are filled in (aka updated) 01005 ! xmpi_shift(x,SHIFT_X_U,3,4): rows m-1,m are filled in (aka updated) 01006 ! 01007 subroutine xmpi_shift_r2_l(x,direction,i1,i2) 01008 implicit none 01009 real*8,dimension(:,:),intent(inout) :: x 01010 integer, intent(in) :: direction 01011 integer, intent(in) :: i1,i2 01012 01013 integer :: m,n,s1,s2,r1,r2 01014 integer, parameter :: nbord = 2, nover = 2*nbord 01015 01016 ! nover is the number of overlapping rows/columns 01017 01018 ! s1,s2 will contain the indices of the first and last row/column to send 01019 ! r1,r2 will contain the indices of the first and last row/column to send 01020 ! mn will contain the 1st or 2nd dimension as appropriate: 01021 ! shift in x direction: mn = m (= nx+1) 01022 ! shift in y direction: mn = n (= ny+1) 01023 01024 m = size(x,1) 01025 n = size(x,2) 01026 01027 ! sanity check 01028 select case(direction) 01029 case(SHIFT_Y_R,SHIFT_Y_L,SHIFT_X_U,SHIFT_X_D) 01030 continue 01031 case default 01032 if (xmaster) then 01033 write(*,*) 'Invalid value for direction in xmpi_shift_r2_l ',direction 01034 call halt_program 01035 endif 01036 endselect 01037 01038 select case(direction) 01039 case(SHIFT_Y_R) 01040 s1 = n - nover + i1 01041 s2 = n - nover + i2 01042 r1 = i1 01043 r2 = i2 01044 call xmpi_sendrecv(x(:,s1:s2),xmpi_right,x(:,r1:r2),xmpi_left) 01045 case(SHIFT_X_D) 01046 s1 = m - nover + i1 01047 s2 = m - nover + i2 01048 r1 = i1 01049 r2 = i2 01050 call xmpi_sendrecv(x(s1:s2,:),xmpi_bot, x(r1:r2,:),xmpi_top) 01051 case(SHIFT_Y_L) 01052 s1 = i1 01053 s2 = i2 01054 r1 = n - nover + i1 01055 r2 = n - nover + i2 01056 call xmpi_sendrecv(x(:,s1:s2),xmpi_left, x(:,r1:r2),xmpi_right) 01057 case(SHIFT_X_U) 01058 s1 = i1 01059 s2 = i2 01060 r1 = m - nover + i1 01061 r2 = m - nover + i2 01062 call xmpi_sendrecv(x(s1:s2,:),xmpi_top, x(r1:r2,:),xmpi_bot) 01063 endselect 01064 01065 end subroutine xmpi_shift_r2_l 01066 01067 subroutine xmpi_shift_i2_l(x,direction,i1,i2) 01068 implicit none 01069 integer,dimension(:,:),intent(inout) :: x 01070 integer, intent(in) :: direction 01071 integer, intent(in) :: i1,i2 01072 01073 integer :: m,n,s1,s2,r1,r2 01074 integer, parameter :: nbord = 2, nover = 2*nbord 01075 01076 ! nover is the number of overlapping rows/columns 01077 01078 ! s1,s2 will contain the indices of the first and last row/column to send 01079 ! r1,r2 will contain the indices of the first and last row/column to send 01080 ! mn will contain the 1st or 2nd dimension as appropriate: 01081 ! shift in x direction: mn = m (= nx+1) 01082 ! shift in y direction: mn = n (= ny+1) 01083 01084 m = size(x,1) 01085 n = size(x,2) 01086 01087 ! sanity check 01088 select case(direction) 01089 case(SHIFT_Y_R,SHIFT_Y_L,SHIFT_X_U,SHIFT_X_D) 01090 continue 01091 case default 01092 if (xmaster) then 01093 write(*,*) 'Invalid value for direction in xmpi_shift_r2_l ',direction 01094 call halt_program 01095 endif 01096 endselect 01097 01098 select case(direction) 01099 case(SHIFT_Y_R) 01100 s1 = n - nover + i1 01101 s2 = n - nover + i2 01102 r1 = i1 01103 r2 = i2 01104 call xmpi_sendrecv(x(:,s1:s2),xmpi_right,x(:,r1:r2),xmpi_left) 01105 case(SHIFT_X_D) 01106 s1 = m - nover + i1 01107 s2 = m - nover + i2 01108 r1 = i1 01109 r2 = i2 01110 call xmpi_sendrecv(x(s1:s2,:),xmpi_bot, x(r1:r2,:),xmpi_top) 01111 case(SHIFT_Y_L) 01112 s1 = i1 01113 s2 = i2 01114 r1 = n - nover + i1 01115 r2 = n - nover + i2 01116 call xmpi_sendrecv(x(:,s1:s2),xmpi_left, x(:,r1:r2),xmpi_right) 01117 case(SHIFT_X_U) 01118 s1 = i1 01119 s2 = i2 01120 r1 = m - nover + i1 01121 r2 = m - nover + i2 01122 call xmpi_sendrecv(x(s1:s2,:),xmpi_top, x(r1:r2,:),xmpi_bot) 01123 endselect 01124 01125 end subroutine xmpi_shift_i2_l 01126 01127 subroutine xmpi_shift_r3_l(x,direction,i1,i2) 01128 ! 01129 implicit none 01130 real*8,dimension(:,:,:),intent(inout) :: x 01131 integer, intent(in) :: direction 01132 integer, intent(in) :: i1,i2 01133 01134 integer :: m,n,s1,s2,r1,r2 01135 integer, parameter :: nbord = 2, nover = 2*nbord 01136 01137 01138 ! nover is the number of overlapping rows/columns 01139 01140 ! s1,s2 will contain the indices of the first and last row/column to send 01141 ! r1,r2 will contain the indices of the first and last row/column to send 01142 ! mn will contain the 1st or 2nd dimension as appropriate: 01143 ! shift in x direction: mn = m (= nx+1) 01144 ! shift in y direction: mn = n (= ny+1) 01145 01146 m = size(x,1) 01147 n = size(x,2) 01148 01149 ! sanity check 01150 select case(direction) 01151 case(SHIFT_Y_R,SHIFT_Y_L,SHIFT_X_U,SHIFT_X_D) 01152 continue 01153 case default 01154 if (xmaster) then 01155 write(*,*) 'Invalid value for direction in xmpi_shift_r2_l ',direction 01156 call halt_program 01157 endif 01158 endselect 01159 01160 select case(direction) 01161 case(SHIFT_Y_R) 01162 s1 = n - nover + i1 01163 s2 = n - nover + i2 01164 r1 = i1 01165 r2 = i2 01166 call xmpi_sendrecv(x(:,s1:s2,:),xmpi_right,x(:,r1:r2,:),xmpi_left) 01167 case(SHIFT_X_D) 01168 s1 = m - nover + i1 01169 s2 = m - nover + i2 01170 r1 = i1 01171 r2 = i2 01172 call xmpi_sendrecv(x(s1:s2,:,:),xmpi_bot, x(r1:r2,:,:),xmpi_top) 01173 case(SHIFT_Y_L) 01174 s1 = i1 01175 s2 = i2 01176 r1 = n - nover + i1 01177 r2 = n - nover + i2 01178 call xmpi_sendrecv(x(:,s1:s2,:),xmpi_left, x(:,r1:r2,:),xmpi_right) 01179 case(SHIFT_X_U) 01180 s1 = i1 01181 s2 = i2 01182 r1 = m - nover + i1 01183 r2 = m - nover + i2 01184 call xmpi_sendrecv(x(s1:s2,:,:),xmpi_top, x(r1:r2,:,:),xmpi_bot) 01185 endselect 01186 end subroutine xmpi_shift_r3_l 01187 01188 subroutine xmpi_shift_ee_r2(x) 01189 implicit none 01190 real*8,dimension(:,:),intent(inout) :: x 01191 #ifdef SHIFT_TIMER 01192 real*8 ttt 01193 call xmpi_barrier 01194 ttt=MPI_Wtime() 01195 #endif 01196 call xmpi_shift(x,SHIFT_Y_R,1,2) 01197 call xmpi_shift(x,SHIFT_Y_L,3,4) 01198 call xmpi_shift(x,SHIFT_X_U,3,4) 01199 call xmpi_shift(x,SHIFT_X_D,1,2) 01200 #ifdef SHIFT_TIMER 01201 call xmpi_barrier 01202 if(xmaster)print *,'shift_ee_r2:',MPI_Wtime()-ttt,size(x,1),size(x,2) 01203 #endif 01204 end subroutine xmpi_shift_ee_r2 01205 01206 subroutine xmpi_shift_ee_i2(xi) 01207 implicit none 01208 integer,dimension(:,:),intent(inout) :: xi 01209 #ifdef SHIFT_TIMER 01210 real*8 ttt 01211 call xmpi_barrier 01212 ttt=MPI_Wtime() 01213 #endif 01214 call xmpi_shift(xi,SHIFT_Y_R,1,2) 01215 call xmpi_shift(xi,SHIFT_Y_L,3,4) 01216 call xmpi_shift(xi,SHIFT_X_U,3,4) 01217 call xmpi_shift(xi,SHIFT_X_D,1,2) 01218 #ifdef SHIFT_TIMER 01219 call xmpi_barrier 01220 if(xmaster)print *,'shift_ee_i2:',MPI_Wtime()-ttt,size(xi,1),size(xi,2) 01221 #endif 01222 end subroutine xmpi_shift_ee_i2 01223 01224 subroutine xmpi_shift_ee_r3(x) 01225 implicit none 01226 real*8,dimension(:,:,:),intent(inout) :: x 01227 #ifdef SHIFT_TIMER 01228 real*8 ttt 01229 call xmpi_barrier 01230 ttt=MPI_Wtime() 01231 #endif 01232 call xmpi_shift(x,SHIFT_Y_R,1,2) 01233 call xmpi_shift(x,SHIFT_Y_L,3,4) 01234 call xmpi_shift(x,SHIFT_X_U,3,4) 01235 call xmpi_shift(x,SHIFT_X_D,1,2) 01236 #ifdef SHIFT_TIMER 01237 call xmpi_barrier 01238 if(xmaster)print *,'shift_ee_r3:',MPI_Wtime()-ttt 01239 #endif 01240 end subroutine xmpi_shift_ee_r3 01241 01242 subroutine xmpi_shift_uu_r2(x) 01243 implicit none 01244 real*8,dimension(:,:),intent(inout) :: x 01245 #ifdef SHIFT_TIMER 01246 real*8 ttt 01247 call xmpi_barrier 01248 ttt=MPI_Wtime() 01249 #endif 01250 call xmpi_shift(x,SHIFT_Y_R,1,2) 01251 call xmpi_shift(x,SHIFT_Y_L,3,4) 01252 call xmpi_shift(x,SHIFT_X_U,2,3) 01253 call xmpi_shift(x,SHIFT_X_D,1,1) 01254 #ifdef SHIFT_TIMER 01255 call xmpi_barrier 01256 if(xmaster)print *,'shift_uu_r2:',MPI_Wtime()-ttt 01257 #endif 01258 end subroutine xmpi_shift_uu_r2 01259 01260 subroutine xmpi_shift_uu_r3(x) 01261 implicit none 01262 real*8,dimension(:,:,:),intent(inout) :: x 01263 #ifdef SHIFT_TIMER 01264 real*8 ttt 01265 call xmpi_barrier 01266 ttt=MPI_Wtime() 01267 #endif 01268 call xmpi_shift(x,SHIFT_Y_R,1,2) 01269 call xmpi_shift(x,SHIFT_Y_L,3,4) 01270 call xmpi_shift(x,SHIFT_X_U,2,3) 01271 call xmpi_shift(x,SHIFT_X_D,1,1) 01272 #ifdef SHIFT_TIMER 01273 call xmpi_barrier 01274 if(xmaster)print *,'shift_uu_r3:',MPI_Wtime()-ttt 01275 #endif 01276 end subroutine xmpi_shift_uu_r3 01277 01278 subroutine xmpi_shift_vv_r2(x) 01279 implicit none 01280 real*8,dimension(:,:),intent(inout) :: x 01281 #ifdef SHIFT_TIMER 01282 real*8 ttt 01283 call xmpi_barrier 01284 ttt=MPI_Wtime() 01285 #endif 01286 call xmpi_shift(x,SHIFT_Y_R,1,1) 01287 call xmpi_shift(x,SHIFT_Y_L,2,3) 01288 call xmpi_shift(x,SHIFT_X_U,3,4) 01289 call xmpi_shift(x,SHIFT_X_D,1,2) 01290 #ifdef SHIFT_TIMER 01291 call xmpi_barrier 01292 if(xmaster)print *,'shift_vv_r2:',MPI_Wtime()-ttt 01293 #endif 01294 end subroutine xmpi_shift_vv_r2 01295 01296 subroutine xmpi_shift_vv_r3(x) 01297 implicit none 01298 real*8,dimension(:,:,:),intent(inout) :: x 01299 #ifdef SHIFT_TIMER 01300 real*8 ttt 01301 call xmpi_barrier 01302 ttt=MPI_Wtime() 01303 #endif 01304 call xmpi_shift(x,SHIFT_Y_R,1,1) 01305 call xmpi_shift(x,SHIFT_Y_L,2,3) 01306 call xmpi_shift(x,SHIFT_X_U,3,4) 01307 call xmpi_shift(x,SHIFT_X_D,1,2) 01308 #ifdef SHIFT_TIMER 01309 call xmpi_barrier 01310 if(xmaster)print *,'shift_vv_r3:',MPI_Wtime()-ttt 01311 #endif 01312 end subroutine xmpi_shift_vv_r3 01313 01314 subroutine xmpi_shift_zs_r2(x) 01315 implicit none 01316 real*8,dimension(:,:),intent(inout) :: x 01317 #ifdef SHIFT_TIMER 01318 real*8 ttt 01319 call xmpi_barrier 01320 ttt=MPI_Wtime() 01321 #endif 01322 call xmpi_shift(x,SHIFT_Y_R,2,2) 01323 call xmpi_shift(x,SHIFT_Y_L,3,3) 01324 call xmpi_shift(x,SHIFT_X_U,3,3) 01325 call xmpi_shift(x,SHIFT_X_D,2,2) 01326 #ifdef SHIFT_TIMER 01327 call xmpi_barrier 01328 if(xmaster)print *,'shift_zs_r2:',MPI_Wtime()-ttt 01329 #endif 01330 end subroutine xmpi_shift_zs_r2 01331 01332 subroutine xmpi_shift_zs_r3(x) 01333 implicit none 01334 real*8,dimension(:,:,:),intent(inout) :: x 01335 #ifdef SHIFT_TIMER 01336 real*8 ttt 01337 call xmpi_barrier 01338 ttt=MPI_Wtime() 01339 #endif 01340 call xmpi_shift(x,SHIFT_Y_R,2,2) 01341 call xmpi_shift(x,SHIFT_Y_L,3,3) 01342 call xmpi_shift(x,SHIFT_X_U,3,3) 01343 call xmpi_shift(x,SHIFT_X_D,2,2) 01344 #ifdef SHIFT_TIMER 01345 call xmpi_barrier 01346 if(xmaster)print *,'shift_zs_r3:',MPI_Wtime()-ttt 01347 #endif 01348 end subroutine xmpi_shift_zs_r3 01349 !________________________________________________________________________________ 01350 01351 subroutine xmpi_send_r0(from,to,x) 01352 ! use this to send to one process, in communicator xmpi_ocomm 01353 ! receiver and sender call this same subroutine with the same 01354 ! from and to 01355 integer, intent(in) :: from,to 01356 real*8, intent(inout) :: x 01357 01358 integer ier 01359 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01360 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01361 01362 if (from .eq. to) return 01363 01364 if (xmpi_orank .eq. from) then 01365 call MPI_Send(x, 1, MPI_DOUBLE_PRECISION, to, 1011, xmpi_ocomm, ier) 01366 elseif (xmpi_orank .eq. to) then 01367 call MPI_Recv(x, 1, MPI_DOUBLE_PRECISION, from, 1011, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01368 endif 01369 end subroutine xmpi_send_r0 01370 !________________________________________________________________________________ 01371 01372 subroutine xmpi_send_i0(from,to,x) 01373 ! use this to send to one process, in communicator xmpi_ocomm 01374 ! receiver and sender call this same subroutine with the same 01375 ! from and to 01376 integer, intent(in) :: from,to 01377 integer, intent(inout) :: x 01378 01379 integer ier 01380 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01381 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01382 01383 if (from .eq. to) return 01384 01385 if (xmpi_orank .eq. from) then 01386 call MPI_Send(x, 1, MPI_INTEGER, to, 1012, xmpi_ocomm, ier) 01387 elseif (xmpi_orank .eq. to) then 01388 call MPI_Recv(x, 1, MPI_INTEGER, from, 1012, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01389 endif 01390 end subroutine xmpi_send_i0 01391 !________________________________________________________________________________ 01392 01393 subroutine xmpi_send_l0(from,to,x) 01394 ! use this to send to one process, in communicator xmpi_ocomm 01395 ! receiver and sender call this same subroutine with the same 01396 ! from and to 01397 integer, intent(in) :: from,to 01398 logical, intent(inout) :: x 01399 01400 integer ier 01401 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01402 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01403 01404 if (from .eq. to) return 01405 01406 if (xmpi_orank .eq. from) then 01407 call MPI_Send(x, 1, MPI_LOGICAL, to, 1013, xmpi_ocomm, ier) 01408 elseif (xmpi_orank .eq. to) then 01409 call MPI_Recv(x, 1, MPI_LOGICAL, from, 1013, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01410 endif 01411 end subroutine xmpi_send_l0 01412 !________________________________________________________________________________ 01413 01414 subroutine xmpi_send_r1(from,to,x) 01415 ! use this to send to one process, in communicator xmpi_ocomm 01416 ! receiver and sender call this same subroutine with the same 01417 ! from and to 01418 integer, intent(in) :: from,to 01419 real*8, intent(inout) :: x(:) 01420 01421 integer ier 01422 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01423 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01424 01425 if (from .eq. to) return 01426 01427 if (xmpi_orank .eq. from) then 01428 call MPI_Send(x, size(x), MPI_DOUBLE_PRECISION, to, 1014, xmpi_ocomm, ier) 01429 elseif (xmpi_orank .eq. to) then 01430 call MPI_Recv(x, size(x), MPI_DOUBLE_PRECISION, from, 1014, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01431 endif 01432 end subroutine xmpi_send_r1 01433 !________________________________________________________________________________ 01434 01435 subroutine xmpi_send_r2(from,to,x) 01436 ! use this to send to one process, in communicator xmpi_ocomm 01437 ! receiver and sender call this same subroutine with the same 01438 ! from and to 01439 integer, intent(in) :: from,to 01440 real*8, intent(inout) :: x(:,:) 01441 01442 integer ier 01443 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01444 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01445 01446 if (from .eq. to) return 01447 01448 if (xmpi_orank .eq. from) then 01449 call MPI_Send(x, size(x), MPI_DOUBLE_PRECISION, to, 1018, xmpi_ocomm, ier) 01450 elseif (xmpi_orank .eq. to) then 01451 call MPI_Recv(x, size(x), MPI_DOUBLE_PRECISION, from, 1018, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01452 endif 01453 end subroutine xmpi_send_r2 01454 !________________________________________________________________________________ 01455 01456 subroutine xmpi_send_r3(from,to,x) 01457 ! use this to send to one process, in communicator xmpi_ocomm 01458 ! receiver and sender call this same subroutine with the same 01459 ! from and to 01460 integer, intent(in) :: from,to 01461 real*8, intent(inout) :: x(:,:,:) 01462 01463 integer ier 01464 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01465 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01466 01467 if (from .eq. to) return 01468 01469 if (xmpi_orank .eq. from) then 01470 call MPI_Send(x, size(x), MPI_DOUBLE_PRECISION, to, 1019, xmpi_ocomm, ier) 01471 elseif (xmpi_orank .eq. to) then 01472 call MPI_Recv(x, size(x), MPI_DOUBLE_PRECISION, from, 1019, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01473 endif 01474 end subroutine xmpi_send_r3 01475 !________________________________________________________________________________ 01476 01477 subroutine xmpi_send_r4(from,to,x) 01478 ! use this to send to one process, in communicator xmpi_ocomm 01479 ! receiver and sender call this same subroutine with the same 01480 ! from and to 01481 integer, intent(in) :: from,to 01482 real*8, intent(inout) :: x(:,:,:,:) 01483 01484 integer ier 01485 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01486 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01487 01488 if (from .eq. to) return 01489 01490 if (xmpi_orank .eq. from) then 01491 call MPI_Send(x, size(x), MPI_DOUBLE_PRECISION, to, 1020, xmpi_ocomm, ier) 01492 elseif (xmpi_orank .eq. to) then 01493 call MPI_Recv(x, size(x), MPI_DOUBLE_PRECISION, from, 1020, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01494 endif 01495 end subroutine xmpi_send_r4 01496 !________________________________________________________________________________ 01497 01498 subroutine xmpi_send_i1(from,to,x) 01499 ! use this to send to one process, in communicator xmpi_ocomm 01500 ! receiver and sender call this same subroutine with the same 01501 ! from and to 01502 integer, intent(in) :: from,to 01503 integer, intent(inout) :: x(:) 01504 01505 integer ier 01506 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01507 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01508 01509 if (from .eq. to) return 01510 01511 if (xmpi_orank .eq. from) then 01512 call MPI_Send(x, size(x), MPI_INTEGER, to, 1015, xmpi_ocomm, ier) 01513 elseif (xmpi_orank .eq. to) then 01514 call MPI_Recv(x, size(x), MPI_INTEGER, from, 1015, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01515 endif 01516 end subroutine xmpi_send_i1 01517 !________________________________________________________________________________ 01518 01519 subroutine xmpi_send_l1(from,to,x) 01520 ! use this to send to one process, in communicator xmpi_ocomm 01521 ! receiver and sender call this same subroutine with the same 01522 ! from and to 01523 integer, intent(in) :: from,to 01524 logical, intent(inout) :: x(:) 01525 01526 integer ier 01527 ! MPI_SEND(BUF, COUNT, DATATYPE, DEST, TAG, COMM, IERROR) 01528 ! MPI_RECV(BUF, COUNT, DATATYPE, SOURCE, TAG, COMM, STATUS, IERROR) 01529 01530 if (from .eq. to) return 01531 01532 if (xmpi_orank .eq. from) then 01533 call MPI_Send(x, size(x), MPI_LOGICAL, to, 1016, xmpi_ocomm, ier) 01534 elseif (xmpi_orank .eq. to) then 01535 call MPI_Recv(x, size(x), MPI_LOGICAL, from, 1016, xmpi_ocomm, MPI_STATUS_IGNORE, ier) 01536 endif 01537 end subroutine xmpi_send_l1 01538 !________________________________________________________________________________ 01539 01540 ! following sends one integer from 'from' to 'to'. 01541 ! The receive by 'to' is done using a sleep/MPI_Test cycle 01542 ! to prevent cpu usage when waiting for the message. 01543 ! 'from' and 'to' in xmpi_ocomm 01544 01545 subroutine xmpi_send_sleep(from,to) 01546 use sleeper 01547 integer, intent(in) :: from,to 01548 01549 integer :: ier, request 01550 integer :: buf = 0 01551 logical :: flag 01552 01553 if (xmpi_orank .eq. from) then 01554 call MPI_Send(buf, 1, MPI_INTEGER, to, 1017, xmpi_ocomm, ier) 01555 elseif (xmpi_orank .eq. to) then 01556 call MPI_Irecv(buf, 1, MPI_INTEGER, from, 1017, xmpi_ocomm, request, ier) 01557 do 01558 call MPI_test(request, flag, MPI_STATUS_IGNORE, ier) 01559 if (flag) exit 01560 call myusleep(2000) ! sleep 2 milliseconds 01561 enddo 01562 endif 01563 end subroutine xmpi_send_sleep 01564 !________________________________________________________________________________ 01565 01566 subroutine testje 01567 implicit none 01568 real*8, dimension(10,10) :: x 01569 real*8, dimension(10,10,10) :: y 01570 call xmpi_shift_ee(x) 01571 call xmpi_shift_uu(x) 01572 call xmpi_shift_vv(x) 01573 call xmpi_shift_zs(x) 01574 call xmpi_shift_ee(y) 01575 call xmpi_shift_uu(y) 01576 call xmpi_shift_vv(y) 01577 call xmpi_shift_zs(y) 01578 end subroutine testje 01579 01580 subroutine xmpi_barrier(toall) 01581 implicit none 01582 logical, intent(in), optional :: toall 01583 integer ierror,comm 01584 comm = xmpi_comm 01585 if (present(toall)) then 01586 if(toall) then 01587 comm = xmpi_ocomm 01588 endif 01589 endif 01590 call MPI_Barrier(comm,ierror) 01591 end subroutine xmpi_barrier 01592 01593 ! 01594 ! get a row from a matrix in the same processor column 01595 ! 01596 subroutine xmpi_getrow(a,n,l,prow,b) 01597 implicit none 01598 real*8, dimension(:,:), intent(in) :: a ! the matrix 01599 integer, intent(in) :: n ! number of elements in the row 01600 character, intent(in) :: l ! '1': get first row 01601 ! 'm': get last row 01602 integer, intent(in) :: prow ! row number of process 01603 ! to get the row from 01604 real*8, dimension(:), intent(out) :: b ! the row from process prow 01605 01606 ! Note: a and l are only needed at the sending process 01607 01608 integer :: row,ierror 01609 integer :: ll,dest,source,tag,r 01610 real*8, dimension(n) :: rowdata 01611 integer, allocatable, dimension(:) :: requests 01612 01613 source = (xmpi_pcol-1)*xmpi_m + prow -1 ! Sending process 01614 ll = 1 01615 01616 if (source .eq. xmpi_rank) then ! the sending process 01617 select case(l) 01618 case('1') 01619 ll = 1 01620 case('m') 01621 ll = size(a,1) 01622 case default 01623 write(*,*) 'Error in xmpi_getrow, l="'//l//'"' 01624 call halt_program 01625 end select 01626 01627 rowdata = a(ll,:) 01628 allocate(requests(xmpi_m-1)) 01629 dest = (xmpi_pcol-1)*xmpi_m ! First receiving process 01630 r = 0 01631 do row = 1,xmpi_m 01632 if (dest .eq. xmpi_rank) then ! do no send to myself 01633 b = rowdata ! but copy 01634 else 01635 r = r + 1 01636 tag = dest 01637 call MPI_Isend(rowdata(1), n, MPI_DOUBLE_PRECISION, & 01638 dest, tag, xmpi_comm, requests(r), ierror) 01639 endif 01640 dest = dest + 1 ! next receiving process 01641 enddo 01642 call MPI_Waitall(r,requests,MPI_STATUSES_IGNORE,ierror) 01643 deallocate(requests) 01644 else ! receiving process 01645 tag = xmpi_rank 01646 call MPI_Recv(b, n, MPI_DOUBLE_PRECISION, & 01647 source, tag, xmpi_comm, MPI_STATUS_IGNORE, ierror) 01648 endif 01649 01650 ! wwvv if this is needed often, than a neat subroutine, using 01651 ! column communicators and MPI_Bcast would be appropriate 01652 01653 end subroutine xmpi_getrow 01654 01655 01656 #endif 01657 subroutine halt_program(normal) 01658 logical,intent(in),optional :: normal 01659 logical :: lnormal 01660 integer :: ierr 01661 01662 if(present(normal)) then 01663 lnormal = normal 01664 else 01665 lnormal = .true. 01666 endif 01667 01668 write(0,*) 'halt_program called by process', xmpi_orank 01669 if (lnormal) then 01670 call backtrace 01671 endif 01672 01673 #ifdef USEMPI 01674 if (lnormal) then 01675 call xmpi_abort 01676 else 01677 call MPI_Abort(xmpi_ocomm,1,ierr) 01678 endif 01679 #else 01680 stop 1 01681 #endif 01682 end subroutine halt_program 01683 01684 end module xmpi_module 01685 01686 01687 #ifdef USEMPI 01688 subroutine comm_errhandler(comm,error_code) 01689 use mpi 01690 use xmpi_module 01691 implicit none 01692 integer comm,error_code 01693 01694 character(MPI_MAX_ERROR_STRING) e 01695 character(MPI_MAX_OBJECT_NAME) o 01696 integer r,ier,ra 01697 01698 call MPI_Comm_get_name(comm,o,r,ier) 01699 call MPI_Comm_rank(MPI_COMM_WORLD,ra,ier) 01700 write(0,*) 'MPI process #',ra,'in ',o(1:r),' generated an error:',error_code 01701 call MPI_Error_string(error_code,e,r,ier) 01702 write(0,*) e(1:r) 01703 call halt_program 01704 end subroutine comm_errhandler 01705 #endif 01706 01707 #define MBACKTRACE 01708 #ifdef __GFORTRAN__ 01709 #if __GNUC__ >=4 01710 #if __GNUC_MINOR__ >=8 01711 #undef MBACKTRACE 01712 #endif 01713 #endif 01714 #endif 01715 01716 #ifdef __INTEL_COMPILER 01717 #undef MBACKTRACE 01718 subroutine backtrace 01719 use ifcore 01720 call tracebackqq('traceback:',-1) 01721 end subroutine backtrace 01722 #endif 01723 01724 #ifdef MBACKTRACE 01725 subroutine backtrace 01726 implicit none 01727 integer, pointer :: x => null() 01728 write(0,*) 'no backtrace for this compiler' 01729 write(0,*) 'forcing a segmentation fault ...' 01730 x = 0 01731 print *,x 01732 end subroutine backtrace 01733 #endif