XBeach
C:/repositories/XBeach/trunk/src/xbeachlibrary/general_mpi.F90
Go to the documentation of this file.
00001 module general_mpi_module
00002    implicit none
00003    save
00004 
00005    !#define COLLECT_TIMER
00006    !#define DISTRIBUTE_TIMER
00007 
00008 #ifdef USEMPI
00009 
00010    !
00011    !  NOTE: the matrices are considered to be bordered by a columns
00012    !   left and right and rows up and down. So, n general,
00013    !   only b(2:mb-1,2:nb-1) is send to the master process.
00014    !   HOWEVER: if matrix b is
00015    !   on the left: b(:,1) is also sent
00016    !   on the right: b(:,nb) is also sent
00017    !   on the top:   b(1,:)  is also sent
00018    !   on the bottom: b(mb,:) is also sent
00019    ! The master process receives a(1:ma,1:na).
00020    !   dimension of b (1:mb,1:nb).
00021    !   dimension of a (1:ma,1:na)
00022    !
00023    !
00024 
00025    interface matrix_distr
00026       module procedure matrix_distr_real8
00027       module procedure matrix_distr_integer
00028    end interface matrix_distr
00029 
00030    interface matrix_coll
00031       module procedure matrix_coll_real8
00032       module procedure matrix_coll_integer
00033    end interface matrix_coll
00034 
00035    interface shift_borders
00036       module procedure shift_borders_matrix_real8
00037    end interface shift_borders
00038 
00039    interface vector_distr
00040       module procedure vector_distr_real8
00041       module procedure block_vector_distr_real8
00042    end interface vector_distr
00043 
00044 contains
00045    !_____________________________________________________________________________
00046    subroutine vector_distr_real8(a,b,is,lm,root,comm)
00047       !
00048       ! Distribute vector a on non-root processes
00049       ! Process P receives a(is(P):is(P)+lm(P)-1)
00050       !
00051       ! Parameters:
00052       ! real*8 a (in): vector to distribute, only used at root
00053       ! real*8 b (out): local vector
00054       ! integer is(number-of-processes) (in): see above: needed on all processes
00055       ! integer lm(number-of-processes) (in): see above: needed on all processes
00056       ! integer root (in): process that holds a, is, lm
00057       ! integer comm (in): MPI communicator
00058       !
00059       ! complication: a and b are not necessarily contiguous,
00060       !  but calling mpi_alltoallw with a and b, the compiler
00061       !  will generate contiguous arrays.
00062       !
00063       ! root process must have rank .eq. 0
00064       !
00065       use mpi
00066       implicit none
00067       real*8, dimension(:), intent(in)  :: a
00068       real*8, dimension(:), intent(out) :: b
00069       integer, dimension(:), intent(in) :: is
00070       integer, dimension(:), intent(in) :: lm
00071       integer, intent(in)               :: root
00072       integer, intent(in)               :: comm
00073 
00074       integer                          :: ier,ra,sz,basic_type,i
00075       integer,allocatable,dimension(:) :: recvtypes,sendtypes,recvcounts,sendcounts,sdispls,rdispls
00076       integer,dimension(1)             :: sizes,subsizes,starts
00077 
00078       basic_type = MPI_DOUBLE_PRECISION
00079       call MPI_Comm_size(comm, sz, ier)
00080       call MPI_Comm_rank(comm, ra, ier)
00081 
00082       if (root /= 0) then
00083          print *,'Error in vector_distr_real8: root must be 0, but is:',root
00084          call MPI_Abort(MPI_COMM_WORLD,1,ier)
00085       endif
00086 
00087       allocate(recvtypes(sz))
00088       allocate(sendtypes(sz))
00089       allocate(recvcounts(sz))
00090       allocate(sendcounts(sz))
00091       allocate(sdispls(sz))
00092       allocate(rdispls(sz))
00093 
00094       sdispls    = 0
00095       rdispls    = 0
00096       recvtypes  = MPI_CHARACTER
00097       sendtypes  = MPI_CHARACTER
00098       recvcounts = 0
00099       sendcounts = 0
00100 
00101       !
00102       ! Create MPI types
00103       !
00104 
00105       ! MPI_TYPE_CREATE_SUBARRAY(NDIMS, ARRAY_OF_SIZES, ARRAY_OF_SUBSIZES,
00106       !   ARRAY_OF_STARTS, ORDER, OLDTYPE, NEWTYPE, IERROR)
00107       !   INTEGER    NDIMS, ARRAY_OF_SIZES(*), ARRAY_OF_SUBSIZES(*),
00108       !   ARRAY_OF_STARTS(*), ORDER, OLDTYPE, NEWTYPE, IERROR
00109 
00110       ! determine mpi_types for the receive matrices
00111       ! all processes will receive only from root
00112       sizes     = shape(b)
00113       subsizes  = lm(ra+1)
00114       starts    = 0
00115       call MPI_Type_create_subarray(1,sizes,subsizes,starts, &
00116       MPI_ORDER_FORTRAN, basic_type,recvtypes(1), ier)
00117       call MPI_Type_commit(recvtypes(1),ier)
00118       recvcounts(1) = 1
00119 
00120 
00121       ! determine mpi types for the senders
00122       if(ra == root) then
00123          ! root sends to everyone
00124 
00125          do i=1,sz
00126             sizes      = shape(a)
00127             subsizes   = lm(i)
00128             starts     = is(i) - 1
00129             sendcounts = 1
00130             call MPI_Type_create_subarray(1,sizes,subsizes,starts,  &
00131             MPI_ORDER_FORTRAN, basic_type,sendtypes(i),ier)
00132             call MPI_Type_commit(sendtypes(i),ier)
00133          enddo
00134       endif
00135 
00136       call MPI_Alltoallw(a,sendcounts,sdispls,sendtypes, &
00137       b,recvcounts,rdispls,recvtypes,comm,ier)
00138 
00139       do i=1,sz
00140          if (sendtypes(i) /= MPI_CHARACTER) then
00141             call MPI_Type_free(sendtypes(i),ier)
00142          endif
00143          if (recvtypes(i) /= MPI_CHARACTER) then
00144             call MPI_Type_free(recvtypes(i),ier)
00145          endif
00146       enddo
00147 
00148    end subroutine vector_distr_real8
00149 
00150    !_____________________________________________________________________________
00151 
00152    subroutine block_vector_distr_real8(a,b,is,lm,root,comm)
00153       !
00154       ! Distribute matrix a on non-root processes
00155       ! Process P receives a(is(P):is(P)+lm(P)-1,:)
00156       !
00157       ! Parameters:
00158       ! real*8 a(:,:) (in): matrix to distribute, only used at root
00159       ! real*8 b(:,:) (out): local matrix
00160       ! integer is(number-of-processes) (in): see above: needed on all processes
00161       ! integer lm(number-of-processes) (in): see above: needed on all processes
00162       ! integer root (in): process that holds a, is, lm
00163       ! integer comm (in): MPI communicator
00164       !
00165       ! complication: a and b are not necessarily contiguous,
00166       !  but calling mpi_alltoallw with a and b, the compiler
00167       !  will generate contiguous arrays.
00168       !
00169       ! root process must have rank .eq. 0
00170       !
00171       use mpi
00172       implicit none
00173       real*8,  dimension(:,:), intent(in)  :: a
00174       real*8,  dimension(:,:), intent(out) :: b
00175       integer, dimension(:), intent(in) :: is
00176       integer, dimension(:), intent(in) :: lm
00177       integer, intent(in)               :: root
00178       integer, intent(in)               :: comm
00179 
00180       integer                          :: ier,ra,sz,basic_type,i
00181       integer,allocatable,dimension(:) :: recvtypes,sendtypes,recvcounts,sendcounts,sdispls,rdispls
00182       integer,dimension(2)             :: sizes,subsizes,starts
00183 
00184       basic_type = MPI_DOUBLE_PRECISION
00185       call MPI_Comm_size(comm, sz, ier)
00186       call MPI_Comm_rank(comm, ra, ier)
00187 
00188       if (root /= 0) then
00189          print *,'Error in block_vector_distr_real8: root must be 0, but is:',root
00190          call MPI_Abort(MPI_COMM_WORLD,1,ier)
00191       endif
00192 
00193       allocate(recvtypes(sz))
00194       allocate(sendtypes(sz))
00195       allocate(recvcounts(sz))
00196       allocate(sendcounts(sz))
00197       allocate(sdispls(sz))
00198       allocate(rdispls(sz))
00199 
00200       sdispls    = 0
00201       rdispls    = 0
00202       recvtypes  = MPI_CHARACTER
00203       sendtypes  = MPI_CHARACTER
00204       recvcounts = 0
00205       sendcounts = 0
00206 
00207       !
00208       ! Create MPI types
00209       !
00210 
00211       ! MPI_TYPE_CREATE_SUBARRAY(NDIMS, ARRAY_OF_SIZES, ARRAY_OF_SUBSIZES,
00212       !   ARRAY_OF_STARTS, ORDER, OLDTYPE, NEWTYPE, IERROR)
00213       !   INTEGER    NDIMS, ARRAY_OF_SIZES(*), ARRAY_OF_SUBSIZES(*),
00214       !   ARRAY_OF_STARTS(*), ORDER, OLDTYPE, NEWTYPE, IERROR
00215 
00216       ! determine mpi_types for the receive matrices
00217 
00218       ! all processes will receive only from root
00219       sizes     = shape(b)
00220       subsizes  = (/ lm(ra+1), size(b,2) /)
00221       starts    = 0
00222       call MPI_Type_create_subarray(2,sizes,subsizes,starts, &
00223       MPI_ORDER_FORTRAN, basic_type,recvtypes(1), ier)
00224       call MPI_Type_commit(recvtypes(1),ier)
00225       recvcounts(1) = 1
00226 
00227       ! determine mpi types for the senders
00228       if (ra == root) then
00229          ! root sends to everyone
00230 
00231          do i=1,sz
00232             sizes      = shape(a)
00233             subsizes   = (/ lm(i),     size(a,2) /)
00234             starts     = (/ is(i) - 1, 0         /)
00235             sendcounts = 1
00236             call MPI_Type_create_subarray(2,sizes,subsizes,starts,  &
00237             MPI_ORDER_FORTRAN, basic_type,sendtypes(i),ier)
00238             call MPI_Type_commit(sendtypes(i),ier)
00239          enddo
00240       endif
00241 
00242       call MPI_Alltoallw(a,sendcounts,sdispls,sendtypes, &
00243       b,recvcounts,rdispls,recvtypes,comm,ier)
00244 
00245       do i=1,sz
00246          if (sendtypes(i) /= MPI_CHARACTER) then
00247             call MPI_Type_free(sendtypes(i),ier)
00248          endif
00249          if (recvtypes(i) /= MPI_CHARACTER) then
00250             call MPI_Type_free(recvtypes(i),ier)
00251          endif
00252       enddo
00253 
00254    end subroutine block_vector_distr_real8
00255 
00256    !___________________________________________________________________________
00257 
00258    subroutine matrix_distr_real8(a,b,is,lm,js,ln,  &
00259    root,comm)
00260       !
00261       ! This subroutine distributes matrix on root to the non-root
00262       ! processes.
00263       ! Root will not send to itself.
00264       ! Root MUST be process #0 in comm
00265       !
00266       ! a: matrix to be distributed
00267       ! b: receive matrix, process p
00268       !    (1:lm(p),1:ln(p)) will be filled (p=1..size of comm)
00269       !    Normally: the whole of b
00270       ! is,js: on process p, b(1,1) coincides with a(is(p),js(p)),
00271       !        p=1..size of comm
00272       ! root: root process, must be zero
00273       ! comm: communicator
00274       !
00275       ! Note: root process does not send or copy anything to itself,
00276       !       'there is no b on root'
00277       !
00278 
00279       use mpi
00280       implicit none
00281       real*8, intent(in),  dimension(:,:)  :: a
00282       real*8, intent(out), dimension(:,:)  :: b
00283       integer, intent(in), dimension(:)    :: is, lm
00284       integer, intent(in), dimension(:)    :: js, ln
00285       integer, intent(in)                  :: root, comm
00286 
00287       integer              :: sz,ier,ra,i
00288       integer, allocatable :: recvtypes(:), sendtypes(:), recvcounts(:),sendcounts(:)
00289       integer, allocatable :: sdispls(:), rdispls(:)
00290       integer sizes(2),subsizes(2),starts(2)
00291       integer basic_type
00292 
00293       basic_type = MPI_DOUBLE_PRECISION
00294 
00295       include 'genmpi_distr.inc'
00296 
00297    end subroutine matrix_distr_real8
00298 
00299    !___________________________________________________________________________
00300 
00301    subroutine matrix_distr_integer(a,b,is,lm,js,ln,  &
00302    root,comm)
00303       !
00304       ! This subroutine distributes matrix on root to the non-root
00305       ! processes.
00306       ! Root will not send to itself.
00307       ! Root MUST be process #0 in comm
00308       !
00309       ! a: matrix to be distributed
00310       ! b: receive matrix, process p
00311       !    (1:lm(p),1:ln(p)) will be filled (p=1..size of comm)
00312       !    Normally: the whole of b
00313       ! is,js: on process p, b(1,1) coincides with a(is(p),js(p)),
00314       !        p=1..size of comm
00315       ! root: root process, must be zero
00316       ! comm: communicator
00317       !
00318       ! Note: root process does not send or copy anything to itself,
00319       !       'there is no b on root'
00320       !
00321 
00322       use mpi
00323       implicit none
00324       integer, intent(in),  dimension(:,:)  :: a
00325       integer, intent(out), dimension(:,:)  :: b
00326       integer, intent(in), dimension(:)    :: is, lm
00327       integer, intent(in), dimension(:)    :: js, ln
00328       integer, intent(in)                  :: root, comm
00329 
00330       integer              :: sz,ier,ra,i
00331       integer, allocatable :: recvtypes(:), sendtypes(:), recvcounts(:),sendcounts(:)
00332       integer, allocatable :: sdispls(:), rdispls(:)
00333       integer sizes(2),subsizes(2),starts(2)
00334       integer basic_type
00335 
00336       basic_type = MPI_INTEGER
00337 
00338       include 'genmpi_distr.inc'
00339 
00340    end subroutine matrix_distr_integer
00341 
00342    !___________________________________________________________________________
00343 
00344    subroutine matrix_coll_real8(a,b,is,lm,js,ln,&
00345    isleft,isright,istop,isbot,root,comm)
00346       ! collect matrix on root process
00347       !  NOTE: the collect is done from the non-root processes only
00348       !
00349       ! parameters:
00350       ! real*8 a(:,:) (out): matrix to be collected.
00351       !                      This matrix has to be available, it is not
00352       !                      allocated by this subroutine.
00353       ! real*8 b(:,:) (in): local scattered matrix
00354       ! integer is,lm,js,ln(number of non-root processes) (in): description of
00355       !   the location of the submatrices b in matrix a:
00356       !   Element b(1,1) coincides with a(is(rank+1),js(rank+1)), where rank is
00357       !   the rank of the process (0..procs-1)
00358       !   The length of the 1st dimension of b is given by lm(rank+1)
00359       !   The length of the 2nd dimension of b is given by ln(rank+1)
00360       ! logical isleft, isright, istop, isbot (in): is this submatrix at the
00361       !  left, right top or bottom of the general matrix.
00362       ! integer root (in): the rank of the process where a is available
00363       ! integer comm (in): the MPI communicator to be used.
00364       !
00365       ! Note: a,is,js are used at the root process only
00366       !
00367       ! Note: the root process must have rank .eq. 0
00368       !
00369       use mpi
00370       implicit none
00371       real*8, intent(out),  dimension(:,:)  :: a
00372       real*8, intent(in), dimension(:,:)  :: b
00373       integer, intent(in), dimension(:)    :: is,lm
00374       integer, intent(in), dimension(:)    :: js,ln
00375       logical, intent(in), dimension(:)    :: isleft, isright, istop, isbot
00376       integer, intent(in)                  :: root,comm
00377 
00378       integer sz,ier,ra,i
00379       integer, allocatable :: recvtypes(:), sendtypes(:), recvcounts(:),sendcounts(:)
00380       integer, allocatable :: sdispls(:), rdispls(:)
00381       integer sizes(2),subsizes(2),starts(2)
00382       integer basic_type
00383       ! nover is number of overlapping rows/columns of submatrices.
00384       ! nbord is number of borders.
00385 
00386       integer, parameter                   :: nbord = 2
00387       integer, parameter                   :: nover = 2*nbord
00388 
00389       basic_type = MPI_DOUBLE_PRECISION
00390 
00391       include 'genmpi_coll.inc'
00392 
00393    end subroutine matrix_coll_real8
00394    !___________________________________________________________________________
00395 
00396    subroutine matrix_coll_integer(a,b,is,lm,js,ln,&
00397    isleft,isright,istop,isbot,root,comm)
00398       ! for description, see matrix-coll_real8
00399       use mpi
00400       implicit none
00401       integer, intent(out),  dimension(:,:) :: a
00402       integer, intent(in), dimension(:,:)   :: b
00403       integer, intent(in), dimension(:)     :: is, lm
00404       integer, intent(in), dimension(:)     :: js, ln
00405       logical, intent(in), dimension(:)     :: isleft, isright, istop, isbot
00406       integer, intent(in)                   :: root, comm
00407 
00408       integer sz,ier,ra,i
00409       integer, allocatable :: recvtypes(:), sendtypes(:), recvcounts(:),sendcounts(:)
00410       integer, allocatable :: sdispls(:), rdispls(:)
00411       integer sizes(2),subsizes(2),starts(2)
00412       integer basic_type
00413       ! nover is number of overlapping rows/columns of submatrices.
00414       ! nbord is number of borders.
00415 
00416       integer, parameter                   :: nbord = 2
00417       integer, parameter                   :: nover = 2*nbord
00418 
00419       basic_type = MPI_INTEGER
00420 
00421       include 'genmpi_coll.inc'
00422 
00423    end subroutine matrix_coll_integer
00424    !___________________________________________________________________________
00425 
00426 
00427 
00428    subroutine decomp(n, numprocs, myid, s, e)
00429 
00430       !  From the mpich-distribution: MPE_Decomp1d
00431       !
00432       !  n        : integer, input (for example: number of tasks or
00433       !                               dimension of an array)
00434       !  numprocs : integer, input (number of processes)
00435       !  myid     : integer, input (MPI id of this process)
00436       !  s        : integer, output
00437       !  e        : integer, output
00438       !
00439       !  The purpose of this subroutine is to get an as equal as possible
00440       !  division of the integers 1..n among numprocs intervals.
00441       !  A common use is to divide work among MPI processes.
00442       !
00443       !  example:
00444       !
00445       !  integer numprocs, ier, myid, n, s, e, i
00446       !  call MPI_Comm_size(MPI_COMM_WORLD, numprocs, ier)
00447       !  call MPI_Comm_rank(MPI_COMM_WORLD, myid, ier)
00448       !  n = 1000
00449       !  call decomp(n, nprocs, myid, s, e)
00450       !  do i = s,e
00451       !    ...
00452       !  enddo
00453       !
00454       integer, intent(in) :: n, numprocs, myid
00455       integer, intent(out):: s, e
00456       integer nlocal
00457       integer deficit
00458 
00459       nlocal  = n / numprocs
00460       s       = myid * nlocal + 1
00461       deficit = mod(n,numprocs)
00462       s       = s + min(myid,deficit)
00463       if (myid .lt. deficit) then
00464          nlocal = nlocal + 1
00465       endif
00466       e = s + nlocal - 1
00467       if (e .gt. n .or. myid .eq. numprocs-1) then
00468          e = n
00469       endif
00470 
00471    end subroutine decomp
00472 
00473    subroutine det_submatrices(ma,na,mp,np,is,lm,js,ln,isleft,isright,istop,isbot)
00474       !
00475       ! determine a division of a ma*na matrix on mp*np processes
00476       ! ma: integer(in): 1st dimension of matrix to divide
00477       ! na: integer(in): 2nd dimension of this matrix
00478       ! mp: integer(in): 1st dimension of processorgrid
00479       ! np: integer(in): 2nd dimension of processorgrid
00480       ! is(mp*np): integer(out): is(i) will be equal to the
00481       !                          start row of the i-th submatrix
00482       ! lm(mp*np): integer(out): lm(i) will be equal to the
00483       !                          first dimension of the i-th submatrix
00484       ! js(mp*np): integer(out): js(i) will be equal to the
00485       !                          start column of the i-th submatrix
00486       ! ln(mp*np): integer(out): ln(i) will be equal to the
00487       !                          2nd dimension of the i-th submatrix
00488       ! isleft(mp*np): logical(out): isleft(i) is true if the i-th submatrix
00489       !                              shares the first column with the global matrix
00490       !                              a(:,1)
00491       ! isright(mp*np): logical(out): isright(i) is true if the i-th submatrix
00492       !                              shares the last column with the global matrix
00493       !                              a(ma,:)
00494       ! istop(mp*np): logical(out): istop(i) is true if the i-th submatrix
00495       !                              shares the first row with the global matrix
00496       !                              a(1,:)
00497       ! isbot(mp*np): logical(out): isbot(i) is true if the i-th submatrix
00498       !                              shares the last row with the global matrix
00499       !                              a(na,:)
00500       !
00501       ! The submatrices overlap:
00502       ! 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
00503       ! 1 2 3 4 5 6 7 8 9           1 2 3 4 5 6 7 8 9 0 1
00504       !           1 2 3 4 5 6 7 8 9 0 1 2 3
00505       !
00506       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00507       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00508       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00509       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00510       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00511       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00512       ! + + + + + x x x x + + + + + x x x x + + + + + + +
00513       !
00514       ! This picture is for the overlap in horizontal direction,
00515       ! The overlap is 4.
00516       ! + -> matrix element
00517       ! x -> overlapping matrix element
00518       ! the order of the submatrices is 9,13,11.
00519       ! The order of the global matrix is 25
00520       !
00521       implicit none
00522       integer, intent(in)                :: ma,na,mp,np
00523       integer, dimension(:), intent(out) :: is,lm,js,ln
00524       logical, dimension(:), intent(out) :: isleft,isright,istop,isbot
00525 
00526       integer i,j,s,e,k
00527       integer, parameter :: nover = 4
00528 
00529       isleft = .false.
00530       isright = .false.
00531       istop = .false.
00532       isbot = .false.
00533 
00534       do i = 1, mp
00535          call decomp(ma-nover, mp, i - 1, s, e)
00536          k = 0
00537          do j = 1, np
00538             if (j .eq. 1) then
00539                isleft(i+k) = .true.
00540             endif
00541             if (j .eq. np) then
00542                isright(i+k) = .true.
00543             endif
00544             is(i + k) = s
00545             lm(i + k) = e - s + nover + 1
00546             k = k + mp
00547          enddo
00548       enddo
00549       k = 0
00550       do j=1, np
00551          call decomp(na-nover, np, j - 1, s, e)
00552          do i = 1, mp
00553             if (i .eq. 1) then
00554                istop(i+k) = .true.
00555             endif
00556             if (i .eq. mp) then
00557                isbot(i+k) = .true.
00558             endif
00559             js(i + k) = s
00560             ln(i + k) = e - s + nover + 1
00561          enddo
00562          k = k + mp
00563       enddo
00564 
00565    end subroutine det_submatrices
00566 
00567    subroutine shift_borders_matrix_real8(a,left,right,top,bot,comm)
00568       !
00569       ! shift borders to and from neighbours.
00570       !
00571       use mpi
00572       implicit none
00573       real*8, dimension(:,:), intent(inout) :: a
00574       integer, intent(in)                   :: left,right,top,bot,comm
00575 
00576       integer, parameter                    :: datatype = MPI_DOUBLE_PRECISION
00577       integer ierror,ma,na
00578 
00579       ma = ubound(a,1)
00580       na = ubound(a,2)
00581 
00582       ! send to left, receive from right
00583       call MPI_Sendrecv(a(2:ma-1,2), ma-2, datatype,       &
00584       left, 1,                           &
00585       a(2:ma-1,na), ma-2, datatype,      &
00586       right, 1,                          &
00587       comm, MPI_STATUS_IGNORE, ierror)
00588       ! send to right, receive from left
00589       call MPI_Sendrecv(a(2:ma-1,na-1), ma-2, datatype,    &
00590       right, 2,                          &
00591       a(2:ma-1,1), ma-2, datatype,       &
00592       left, 2,                           &
00593       comm, MPI_STATUS_IGNORE, ierror)
00594 
00595       ! send to bot, receive from top
00596       call MPI_Sendrecv(a(ma-1,:), na, datatype,           &
00597       bot, 3,                            &
00598       a(1,:), na, datatype,              &
00599       top, 3,                            &
00600       comm, MPI_STATUS_IGNORE, ierror)
00601       ! send to top, receive from bot
00602       call MPI_Sendrecv(a(2,:), na, datatype,              &
00603       top, 4,                            &
00604       a(ma,:), na, datatype,             &
00605       bot, 4,                            &
00606       comm, MPI_STATUS_IGNORE, ierror)
00607 
00608    end subroutine shift_borders_matrix_real8
00609 
00610 #endif
00611 end module general_mpi_module
 All Classes Files Functions Variables Defines