#!/bin/sh #$ -V #$ -j yes # # This script starts a single-domain Delft3D-FLOW computation on Linux in parallel mode # # Usage: # qsub -pe distrib 2 runlin_parallel # qsub -pe distrib 4 runlin_parallel # qsub -pe distrib 8 runlin_parallel # # # Specify the config file to be used here # argfile=config.ini # # Set the directory containing deltares_hydro.exe and libflow2d3d.so here # exedir=/p/1200327-bwn-sandmud-model/Sand_Mud_tests/Freek/executable/intel/flow/bin/ # Set some (environment) parameters export ARCH=intel export D3D_HOME=~ export LD_LIBRARY_PATH=$exedir:$LD_LIBRARY_PATH . /opt/intel/Compiler/11.0/081/bin/ifortvars.sh ia32 ### Specific setting for H3/H4 linuxclusters, needed for MPICH2 ### commands (mpdboot, mpirun, mpiexed, mpdallexit etc.). export PATH="/opt/mpich2/bin:${PATH}" ### Some general information available via SGE. Note that NHOSTS can be ### smaller than NSLOTS (for instance on multicore nodes). echo ---------------------------------------------------------------------- echo Parallel run of Delft3D-FLOW with MPICH2 on H4 linuxcluster. echo SGE_O_WORKDIR: $SGE_O_WORKDIR echo HOSTNAME : $HOSTNAME echo NHOSTS : $NHOSTS echo NQUEUES : $NQUEUES echo NSLOTS : $NSLOTS echo PE_HOSTFILE : $PE_HOSTFILE echo MPI_VER : $MPI_VER ### General for MPICH2, create needed machinefile for mpdboot and ### mpdexec from $PE_HOSTFILE. The first column contains the node ### identifier, the second column the number of processes to be started ### on this node. awk '{print $1":"$2}' $PE_HOSTFILE > $(pwd)/machinefile echo Contents of machinefile: cat $(pwd)/machinefile echo ---------------------------------------------------------------------- nhosts2=`expr $NHOSTS \* 2` for ((i=0; i<=nhosts2; i++)) ; do logfile="log$i.irlog" if [ -L $logfile ] ; then echo "$logfile is already a symlink" elif [ -f $logfile ]; then rm -f $logfile ln -s /dev/null $logfile else ln -s /dev/null $logfile fi done # Run ### command="d3d.run -nproc "$NHOSTS" -input "$inputfile" -back no ### eval $command ### General for MPICH2, startup your MPICH2 communication network (you ### can check if it is already there with mpdtrace). mpdboot -n $NHOSTS -f $(pwd)/machinefile -r rsh ### General, start delftflow in parallel by means of mpirun. mpirun -np `expr $NHOSTS \* 2` $exedir/deltares_hydro.exe $argfile ### alternatives: ### mpiexec -n $DELTAQ_NumNodes delftflow_91.exe -r $inputfile.mdf ### mpiexec -n `expr $DELTAQ_NumNodes \* 2` $exedir/deltares_hydro.exe $argfile rm -f log*.irlog ### General for MPICH2, finish your MPICH2 communication network. mpdallexit