Skip to content
Snippets Groups Projects
Commit f23c5f1b authored by Paul Gierz's avatar Paul Gierz
Browse files

feat(minimal-example): include Enrico's newer example, which has a...

feat(minimal-example): include Enrico's newer example, which has a hetereogenous run setup with dummy binaries
parent 5d3a1521
No related branches found
No related tags found
No related merge requests found
#! /bin/ksh
#SBATCH --job-name=test_run
#SBATCH --output=test_run_%j.log
#SBATCH --error=test_run_%j.log
### MPIOM/PISM ###
#SBATCH --partition=compute
#SBATCH --account=bk0993
#SBATCH --propagate=STACK,CORE
#SBATCH --ntasks-per-core=1
#SBATCH --ntasks=48
#SBATCH --time=00:03:00
#SBATCH packjob
### ECHAM6 ###
#SBATCH --partition=compute
#SBATCH --account=bk0993
#SBATCH --propagate=STACK,CORE
#SBATCH --ntasks-per-core=1
#SBATCH --ntasks=48
#SBATCH --time=00:03:00
ulimit -s 102400
ulimit -c 0
# OpenMPI optimised
export OMPI_MCA_pml=cm # sets the point-to-point management layer
export OMPI_MCA_mtl=mxm # sets the matching transport layer (MPI-2 one-sided comm.)
export MXM_RDMA_PORTS=mlx5_0:1
export MXM_LOG_LEVEL=ERROR
export MXM_HANDLE_ERRORS=bt
export UCX_HANDLE_ERRORS=bt
# enable HCOLL based collectives
export OMPI_MCA_coll=^fca # disable FCA for collective MPI routines
export OMPI_MCA_coll_hcoll_enable=1 # enable HCOLL for collective MPI routines
export OMPI_MCA_coll_hcoll_priority=95
export OMPI_MCA_coll_hcoll_np=8 # use HCOLL for all communications with more than 8 tasks
export OMPI_MCA_async_mpi_init=1 # enable async mpi_init
export HCOLL_MAIN_IB=mlx5_0:1
export HCOLL_ENABLE_MCAST=1
export HCOLL_ENABLE_MCAST_ALL=1
# disable specific HCOLL functions (strongly depends on the application)
export HCOLL_ML_DISABLE_BARRIER=1
export HCOLL_ML_DISABLE_IBARRIER=1
export HCOLL_ML_DISABLE_BCAST=1
export HCOLL_ML_DISABLE_REDUCE=1
export MALLOC_TRIM_THRESHOLD_=-1
export SLURM_CPU_FREQ_REQ=High
MPIOM_ND=1
PISM_ND=1
hostname="m"
# Job nodes extraction
echo "nodelist = $SLURM_JOB_NODELIST"
nodeslurm=$SLURM_JOB_NODELIST
tmp=${nodeslurm#"m["}
nodes=${tmp%]*}
myarray=(`echo $nodes | sed 's/,/\n/g'`)
idx=0
for element in "${myarray[@]}"
do
if [[ "$element" == *"-"* ]]; then
array=(`echo $element | sed 's/-/\n/g'`)
for node in $(seq ${array[0]} ${array[1]})
do
nodelist[$idx]=$node
idx=$idx+1
done
else
nodelist[$idx]=$element
idx=$idx+1
fi
done
for element in "${nodelist[@]}"
do
echo "$element"
done
# MPIOM nodes list
mpiom=""
for idx in $(seq 0 $((MPIOM_ND-1)))
do
if [[ $idx == $((MPIOM_ND-1)) ]]; then
mpiom="$mpiom${nodelist[$idx]}"
else
mpiom="$mpiom${nodelist[$idx]},"
fi
done
echo "MPIOM nodes: $mpiom"
# PISM nodes list
length=${#nodelist[@]}
pism=""
for idx in $(seq $MPIOM_ND $((length-1)))
do
if [[ $idx == $((length-1)) ]]; then
pism="$pism${nodelist[$idx]}"
else
pism="$pism${nodelist[$idx]},"
fi
done
echo "PISM nodes: $pism"
STARTTIME=`date +%s`
# Run PISM
echo "srun --pack-group=0 --mpi=openmpi --cpu_bind=cores --nodelist="$hostname[$pism]" -n 24 ./pism &"
srun --pack-group=0 --mpi=openmpi --cpu_bind=cores --nodelist="$hostname[$pism]" -n 24 ./pism &
# Run AWI-CM
echo "srun --pack-group=0 --mpi=openmpi --cpu_bind=cores --distribution=block:block --nodelist="$hostname[$mpiom]" -n 24 ./mpiom : --pack-group=1 --mpi=openmpi --cpu_bind=cores --distribution=block:block -n 48 ./echam &"
srun --pack-group=0 --mpi=openmpi --cpu_bind=cores --distribution=block:block --nodelist="$hostname[$mpiom]" -n 24 ./mpiom : --pack-group=1 --mpi=openmpi --cpu_bind=cores --distribution=block:block -n 48 ./echam &
wait
ENDTIME=`date +%s`
echo "Total time: $((ENDTIME - STARTTIME)) seconds"
echo "Done"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment