Alpine3D

Alpine3D Svn Source Tree

Root/trunk/run.sh

  • Property svn:executable set to *
1#!/bin/bash
2#This is the launching script for Alpine3D. Please make sure the user section matches your needs!
3########################### Slurm directives: edit for the Slurm job manager
4#SBATCH --job-name=urumqi
5#SBATCH --nodes=1
6#SBATCH --ntasks=1
7#SBATCH --ntasks-per-node=1
8#SBATCH --cpus-per-task=10
9########################### End of Slurm directives
10##either N or MPI, MPI_SGE or OPENMP
11PARALLEL=N
12
13##When using MPI (NOT MPI_SGE!), specify the machinefile you want to use. The machinefile format allows one
14##hostname per line followed optionally by a colon and the number of processes to run on that
15##host, e. g. 192.168.0.11:2 means: use 2 cores of the machine at address 192.168.0.11
16##use as many hosts as you deem reasonable, not defining a machinefile results in a local run.
17##Ultimately only NPROC many processes are used.
18MACHINEFILE=
19NPROC=8
20
21##when using openmp but NOT relying on a job manager, how many cores to use (otherwise it will be ignored)
22NCORES=2
23
24BEGIN="2001-10-02T01:00"
25END="2013-10-01T01:00"
26PROG_ROOTDIR=../bin#if you do not want to use Alpine3D from PATH, then point to where it is
27#should screen messages be redirected to stdouterr.log or printed directly on the screen?
28REDIRECT_LOGS=Y
29########################## END OF USER CONFIGURATION
30
31export DYLD_FALLBACK_LIBRARY_PATH=${PROG_ROOTDIR}:${DYLD_FALLBACK_LIBRARY_PATH}#for osX
32export LD_LIBRARY_PATH=${PROG_ROOTDIR}:${LD_LIBRARY_PATH}#for Linux
33EXE="${PROG_ROOTDIR}/alpine3d"
34if [ ! -f ${EXE} ]; then
35EXE=`which alpine3d`
36fi
37N_EB=1
38N_SN=1
39
40#to combine OPENMP and MPI, run as MPI but with N_EB & N_SN > 1
41if [ "${PARALLEL}" == "MPI" ]; then
42echo "Running with MPI"
43MPIEXEC=${MPIEXEC:="mpiexec"}
44MFILE=${MACHINEFILE:+"-machinefile ${MACHINEFILE}"}
45EXE="${MPIEXEC} -n ${NPROC} ${MFILE} ${EXE}"
46N_EB=1
47N_SN=1
48elif [ "${PARALLEL}" == "MPI_SGE" ]; then
49echo "Running with MPI under SGE"
50 MPIEXEC=${MPIEXEC:="mpiexec"}
51 if [ $SLURM_TASKS_PER_NODE ]; then
52 export NSLOTS=$SLURM_TASKS_PER_NODE
53 fi
54 EXE="${MPIEXEC} -np ${NSLOTS} ${EXE}"
55 N_EB=1
56 N_SN=1
57elif [ "${PARALLEL}" == "OPENMP" ]; then
58echo "Running with OPENMP"
59if [ $SLURM_CPUS_PER_TASK ]; then
60export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
61else
62export OMP_NUM_THREADS=${NSLOTS:=$NCORES}
63fi
64N_EB=$OMP_NUM_THREADS
65N_SN=$OMP_NUM_THREADS
66else
67unset OMP_NUM_THREADS
68echo "Running sequentially"
69fi
70
71##Below, always use a double pound sign for comments, otherwise SGE believes this is a job manager command and refuses to run
72##TOOL="valgrind --leak-check=full --show-reachable=yes --leak-resolution=high --undef-value-errors=yes --track-origins=yes --log-file=valgrind.log"
73
74A3D_CMD="${TOOL} ${EXE} \
75--iofile=./io.ini \
76--enable-eb \
77--np-ebalance=${N_EB} \
78--np-snowpack=${N_SN} \
79--startdate=${BEGIN} --enddate=${END}"
80
81date
82if [[ ("${REDIRECT_LOGS}" == "Y") || ("${REDIRECT_LOGS}" == "y") ]]; then
83${A3D_CMD} > stdouterr.log 2>&1 $*
84else
85${A3D_CMD} 2>&1 $*
86fi
87ret=$?
88
89echo "Done Alpine3D Simulation. Return code=$ret"
90date
91echo
92exit $ret

Archive Download this file

Revision: HEAD