program advection_2d ! ! ---------------------------------------------------------------------- ! | Parallel Version by: Rosalinda de Fainchtein. | ! | Original advection code by: Steve Zalesak | ! | Written for the 2001 HPCC Summer School of Computational Methods | ! | for Space and Earth Sciences. | ! ---------------------------------------------------------------------- ! Solve the equation d/dt rho + d/dx (rho vx) + d/dy (rho vy)= 0 ! vx = +d/dy (phi), vy = -d/dx (phi); phi a streamfunction ! on a 2-D uniform mesh with periodic boundary conditions. ! implicit none !--Include the mpi header file include 'mpif.h' integer, parameter :: mx = 14, my = 14, nx=9, ny=9 real, parameter :: dx = 1.0, dy = 1.0 !---For MPI... integer :: ierr,rank,my_rank,numprocs,itag integer :: status(MPI_STATUS_SIZE) integer, parameter :: ndims=2 !dimension of the cartesian grid integer, parameter :: dims_x=2, dims_y=2 !same as dims(1:2) integer dims(ndims) integer COMM_CART logical periods(ndims),reorder integer coords(ndims) ! ---After adding the "implicit none" statement..... integer :: nsteps,k,j,i real :: crn,dt,vxh, vyh, fact real x(nx), y(ny) real phi(nx,ny), vx(nx,ny), vy(nx,ny) real rho(nx,ny), flux_x(nx,ny), flux_y(nx,ny) !--Initialize MPI and set Cartesian block data.. call init_parallel Write(*,*) 'returned from init_parallel' ! read in Courant number, nsteps if (my_rank == 0) then write(*,*)' enter Courant number ( <= 0.25), nsteps' read(*,*) crn, nsteps end if ! ---Broadcast crn and nsteps to the rest of the processes. call MPI_BCAST(crn,1,MPI_REAL,0, MPI_COMM_WORLD, ierr) call MPI_BCAST(nsteps,1,MPI_INTEGER,0, MPI_COMM_WORLD, ierr) !-----set initial conditions write (*,*) 'at proc ',my_rank,':',nx,ny,nsteps,crn write (*,*) 'at proc ',my_rank,'my coords are ',coords call set_ic (x, y, phi, rho, vx, vy, dx, dy, mx, my, nx, ny,dims,& & coords, comm_cart, my_rank ) call output (my_rank,coords,x,y,0,'rho',rho, nx, ny) !-----update solution nsteps do k=1,nsteps !-----compute time step call dtset (vx, vy, dx, dy, crn, nx, ny, dt, my_rank) !-----compute numerical fluxes do j=3,ny-2 do i=2,nx-2 vxh = 0.5*(vx(i,j)+vx(i+1,j)) flux_x(i,j) = vxh*0.5*(rho(i,j)+rho(i+1,j)) & -0.5*abs(vxh)*(rho(i+1,j)-rho(i,j)) flux_x(i,j) = flux_x(i,j)*dy enddo enddo do j=2,ny-2 do i=3,nx-2 vyh = 0.5*(vy(i,j)+vy(i,j+1)) flux_y(i,j) = vyh*0.5*(rho(i,j)+rho(i,j+1)) & -0.5*abs(vyh)*(rho(i,j+1)-rho(i,j)) flux_y(i,j) = flux_y(i,j)*dx enddo enddo !-----use fluxes to update solution fact = dt/(dx*dy) do j=3,ny-2 do i=3,nx-2 rho(i,j) = rho(i,j) - fact*(flux_x(i,j) - flux_x(i-1,j) & +flux_y(i,j) - flux_y(i,j-1)) enddo enddo ! --Write out solution BEFORE updating guard cells write(my_rank*100+9,*)'Before updating guard cells....' call output (my_rank,coords,x,y,k,'rho',rho, nx,ny) !---Update Guard Cells (periodic boundary conditions are applied by ! specifying periods=.true. in the MPI_CART_SHIFT routines.... call guard_cells(rho,nx,ny,comm_cart) write(6,*) ' step number, dt = ', k, dt call output (my_rank,coords,x,y,k,'rho',rho, nx,ny) enddo ! k=1,nsteps stop contains !............................................................................ subroutine init_parallel !--Initialize MPI call MPI_INIT( ierr ) !--Who am I? --- get my rank=my_rank call MPI_COMM_RANK( MPI_COMM_WORLD, my_rank, ierr ) !--How many processes in the global group? call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr ) !--Create a cartesian topology dims(1)=dims_x dims(2)=dims_y periods(1)=.true. ! periodic bdry. cond. along x periods(2)=.true. ! non-periodic bdry. cond. along y reorder=.false. call MPI_CART_CREATE(MPI_COMM_WORLD,ndims,dims,periods, & reorder,COMM_CART,ierr) ! ---------------------------------------------------------------- ! | MPI_CART_CREATE will provide a 2D cartesian array of processes | ! ======>| of dimensions 2x2. | ! | Each process rank is associated with a given sub-domain. | ! ---------------------------------------------------------------- !--Find my coordinate parameters in the Cartesian topology (needed ! to compute the arrays x,y). call MPI_CART_COORDS(COMM_CART,my_rank,2,coords,ierr) return end subroutine init_parallel !........................................................................... end program advection_2d !--------------------------------------------------------------- subroutine set_ic (x, y, phi, rho, vx, vy, dx, dy, mx, my, nx, ny,dims,& & coords, comm_cart, my_rank ) implicit none !--Include the mpi header file include 'mpif.h' real x(nx), y(ny) real phi(nx,ny), vx(nx,ny), vy(nx,ny) real rho(nx,ny) real :: dx,dy integer :: mx,my,nx,ny integer :: comm_cart,my_rank,k real :: x_corner, y_corner integer :: dims(2),coords(2) real :: rsq integer :: i,j !--The physical coordinates of my left-bottom corner: x_corner=( float( mx/dims(1)-2)*coords(1) - 0.5*float(mx+1) ) * dx y_corner=( float( my/dims(2)-2)*coords(2) - 0.5*float(my+1) ) * dy !-----set initial conditions (2D square wave) do i=1,nx x(i) = x_corner + float(i)*dx enddo do j=1,ny y(j) = y_corner + float(j)*dy enddo do j=1,ny do i=1,nx phi(i,j) = y(j) !----This creates uniform velocity along the ! +x direction rho(i,j) = 0.0 !----- Other initial conditions to try.... ! rsq = x(i)**2+y(j)**2 ! 1.--This would create ! phi(i,j) = rsq ! solid body rotation. ! ! phi(i,j) = -x(i) ! 2.--This would create uniform ! velocity along the +y direction. enddo enddo !---Initial Density do i=1,nx do j=1,ny if (x(i)<3.6 .and. x(i)>2.4 .and. y(j)<0.6 .and. y(j)>-.6 ) & & rho(i,j) = 1.0 end do end do ! do i=3*mx/4,3*mx/4+1 ! do j=my/2,my/2+1 ! rho(i,j) = 1.0 ! enddo ! enddo do j=3,ny-2 do i=3,nx-2 vx(i,j) = (phi(i,j+1)-phi(i,j-1))/(2.0*dy) vy(i,j) = -(phi(i+1,j)-phi(i-1,j))/(2.0*dx) enddo enddo ! call bcset (vx, nx,my) ! call bcset (vy, nx,my) call guard_cells(vx,nx,ny,comm_cart) call guard_cells(vy,nx,ny,comm_cart) call output (my_rank,coords,x,y,k,'vx ',vx , nx,ny) call output (my_rank,coords,x,y,k,'vy ',vy , nx,ny) return end !--------------------------------------------------------------- subroutine dtset (vx, vy, dx, dy, crn, nx, ny, dt,my_rank) implicit none !--Include the mpi header file include 'mpif.h' real :: vx(nx,ny), vy(nx,ny) real :: crn,dt,rdt,dx,dy,rrdt integer :: my_rank, nx,ny integer :: i,j,ierr integer :: MPI_COMM_WORLD,MPI_REAL,MPI_IN_PLACE,MPI_MAX !---Compute the local value of rdt rdt = abs(vx(3,3))/dx do j=3,ny-2 do i=3,nx-2 rdt = max( abs(vx(i,j))/dx, abs(vy(i,j))/dy, rdt) enddo enddo !---Compute the global value of rdt call MPI_ALLREDUCE(rdt,rrdt,1,MPI_REAL,MPI_MAX, & & MPI_COMM_WORLD,ierr) write(*,*)'at proc ',my_rank,' global rdt=',rrdt dt = crn/rrdt return end !--------------------------------------------------------------- ! subroutine bcset (a, nx,my) ! real a(nx,my) ! do j=3,my-2 ! a(1,j) = a(mx-3,j) ! a(2,j) = a(mx-2,j) ! a(mx-1,j) = a(3,j) ! a(mx,j) = a(4,j) ! enddo ! do i=1,mx ! a(i,1) = a(i,my-3) ! a(i,2) = a(i,my-2) ! a(i,my-1) = a(i,3) ! a(i,my) = a(i,4) ! enddo ! return ! end !--------------------------------------------------------------- subroutine output (my_rank,coords,x,y,istep,name,array, nx, ny) integer :: istep,nx,ny,my_rank,coords(2) real :: array(nx,ny),x(nx),y(ny),xx character *3, name xx=0. write(100*my_rank+9,9002) istep,name write(100*my_rank+9,*)' y' write(100*my_rank+9,*)' ^' do j=ny,1,-1 write(100*my_rank+9,9001) y(j),(array(i,j),i=1,nx) enddo write(100*my_rank+9,*) '-----------------------------------',& & '--------------------------------> x' write(100*my_rank+9,9001) xx,(x(i),i=1,nx) write(100*my_rank+9,*)' ' write(100*my_rank+9,*)' -o- ' write(100*my_rank+9,*)' ' return 9001 format( 1x,f5.2,'|',14(f5.2,1x) ) 9002 format(i4,1x,a3/) end ! -------------------------------------------------------------------------- subroutine guard_cells(fun,nx,ny,comm_cart) !---Update guard cells along the 4 "bands" of guard cells around the block, ! one band at a time. ! NOTE: The corner guard cells are not appropriately updated because they ! are not required for this application. implicit none !--Include the mpi header file include 'mpif.h' integer :: ix,iy integer :: nx,ny,comm_cart integer :: source,dest,ierr real, dimension(nx,ny) :: fun real buf_r_send(nx-3:nx-2,3:ny-2) !-interim buffer for non-contiguous data real buf_r_recv(1:2,3:ny-2) ! " " " " " real buf_l_send(3:4,3:ny-2) !-interim buffer for non-contiguous data real buf_l_recv(nx-1:nx,3:ny-2) ! " " " " " integer :: count, status(MPI_STATUS_SIZE) integer :: tag_x_r, tag_x_l, tag_y_r, tag_y_l !--First: send guard cell data to the right x (positive x direction). call MPI_CART_SHIFT(comm_cart, 0, 1, source, dest, ierr) buf_r_send=fun(nx-3:nx-2,3:ny-2) !-> pack into contiguous data buffer count=2*(ny-4) tag_x_r=1 !--> tag for message sent to the "right" along x call MPI_SENDRECV(buf_r_send,count,MPI_REAL,dest, tag_x_r, & & buf_r_recv, count, MPI_REAL, source, tag_x_r, & & comm_cart, status, ierr) !-------------------------->transfer guard cell data to the "fun" array fun(1:2,3:ny-2)=buf_r_recv !--Second: send guard cell data to to left x (negative x direction). call MPI_CART_SHIFT(comm_cart,0,-1,source,dest,ierr) buf_l_send=fun(3:4,3:ny-2) buf_l_recv=fun(nx-1:nx,3:ny-2) count=2*(ny-4) tag_x_l=2 !--> tag for message sent to the "left " along x call MPI_SENDRECV(buf_l_send,count,MPI_REAL,dest, tag_x_l, & & buf_l_recv, count, MPI_REAL, source, tag_x_l, & & comm_cart, status, ierr) !-------------------------->transfer guard cell data to the fun array fun(nx-1:nx,3:ny-2)=buf_l_recv !--Third: send guard cell data to to right y (positive y direction). call MPI_CART_SHIFT(comm_cart, 1, 1, source, dest, ierr) count=2*nx tag_y_r=3 !--> tag for message sent to the "right" along y call MPI_SENDRECV(fun(1,ny-3),count,MPI_REAL,dest, tag_y_r, & & fun(1,1 ),count, MPI_REAL, source, tag_y_r, & & comm_cart, status, ierr) !--Fourth: send guard cell data to the left y (negative y direction). call MPI_CART_SHIFT(comm_cart, 1, -1, source, dest, ierr) count=2*nx tag_y_l=4 !--> tag for message sent to the "left " along y call MPI_SENDRECV(fun(1, 3 ),count,MPI_REAL,dest, tag_y_l, & & fun(1,ny-1),count, MPI_REAL, source, tag_y_l, & & comm_cart, status, ierr) return end