Commit 341d6eb0 authored by Matthieu Dorier's avatar Matthieu Dorier

updated files to suit MCS workstations

parent cef48446
......@@ -5,51 +5,42 @@ SANDBOX=$1
. $SANDBOX/spack/share/spack/setup-env.sh
spack load -r ssg
spack load -r bake
spack load -r mpich
module list
# NOTE: rpath doesn't seem to be set correctly, and the paths we need are
# in LIBRARY_PATH instead of LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$LIBRARY_PATH
export PATH=$PATH:$HOME/sds-tests-install/bin
# find nodes in job. We have to do this so that we can manually specify
# in each aprun so that server ranks consitently run on node where we
# in each mpirun so that server ranks consitently run on node where we
# set up storage space
declare -a nodes=($(python /home/carns/bin/run_on_all_nids.py));
echo "### NOTE: all benchmarks are using numactl to keep processes on socket 0"
#declare -a nodes=($(python /home/carns/bin/run_on_all_nids.py));
echo "## testing launcher placement:"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} hostname
#echo "### NOTE: all benchmarks are using numactl to keep processes on socket 0"
echo "## Bake OFI/GNI:"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} bake-mkpool -s 60G /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 2 -N 1 -L ${nodes[0]},${nodes[1]} numactl -N 0 -m 0 ./bake-p2p-bw -x 16777216 -m 34359738368 -n "ofi+gni://ipogif0:5000" -p /dev/shm/foo.dat -c 1
rm -f /dev/shm/foo.dat
bake-mkpool -s 60G /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 2 bake-p2p-bw -x 16777216 -m 34359738368 -n "tcp" -p /dev/shm/foo.dat -c 1
echo "## Bake OFI/GNI (8x concurrency):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} bake-mkpool -s 60G /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 2 -N 1 -L ${nodes[0]},${nodes[1]} numactl -N 0 -m 0 ./bake-p2p-bw -x 16777216 -m 34359738368 -n "ofi+gni://ipogif0:5000" -p /dev/shm/foo.dat -c 8
rm -f /dev/shm/foo.dat
bake-mkpool -s 60G /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 2 bake-p2p-bw -x 16777216 -m 34359738368 -n "tcp" -p /dev/shm/foo.dat -c 8
echo "## Bake OFI/GNI (Hg busy spin):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} bake-mkpool -s 60G /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 2 -N 1 -L ${nodes[0]},${nodes[1]} numactl -N 0 -m 0 ./bake-p2p-bw -x 16777216 -m 34359738368 -n "ofi+gni://ipogif0:5000" -p /dev/shm/foo.dat -c 1 -t 0,0
rm -f /dev/shm/foo.dat
bake-mkpool -s 60G /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 2 bake-p2p-bw -x 16777216 -m 34359738368 -n "tcp" -p /dev/shm/foo.dat -c 1 -t 0,0
echo "## Bake OFI/GNI (8x concurrency, Hg busy spin):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} bake-mkpool -s 60G /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 2 -N 1 -L ${nodes[0]},${nodes[1]} numactl -N 0 -m 0 ./bake-p2p-bw -x 16777216 -m 34359738368 -n "ofi+gni://ipogif0:5000" -p /dev/shm/foo.dat -c 8 -t 0,0
rm -f /dev/shm/foo.dat
bake-mkpool -s 60G /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 2 bake-p2p-bw -x 16777216 -m 34359738368 -n "tcp" -p /dev/shm/foo.dat -c 8 -t 0,0
rm -f /dev/shm/foo.dat
compilers:
- compiler:
environment: {}
extra_rpaths: []
flags: {}
modules: []
operating_system: ubuntu14.04
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: null
fc: null
spec: clang@3.4-1ubuntu3
target: x86_64
- compiler:
environment: {}
extra_rpaths: []
flags: {}
modules: []
operating_system: ubuntu14.04
paths:
cc: /usr/bin/gcc-4.8
cxx: /usr/bin/g++-4.8
f77: /usr/bin/gfortran-4.8
fc: /usr/bin/gfortran-4.8
spec: gcc@4.8
target: x86_64
- compiler:
environment: {}
extra_rpaths: []
flags: {}
modules: []
operating_system: ubuntu14.04
paths:
cc: /usr/bin/gcc-4.4
cxx: /usr/bin/g++-4.4
f77: /usr/bin/gfortran-4.4
fc: /usr/bin/gfortran-4.4
spec: gcc@4.4.7
target: x86_64
- compiler:
environment: {}
extra_rpaths:
- /soft/apps/packages/gcc/gcc-8.2.0/lib64
flags: {}
modules: []
operating_system: ubuntu14.04
paths:
cc: /soft/apps/packages/gcc/gcc-8.2.0/bin/gcc
cxx: /soft/apps/packages/gcc/gcc-8.2.0/bin/g++
f77: /soft/apps/packages/gcc/gcc-8.2.0/bin/gfortran
fc: /soft/apps/packages/gcc/gcc-8.2.0/bin/gfortran
spec: gcc@8.2.1
target: x86_64
- compiler:
environment: {}
extra_rpaths: []
flags: {}
modules: []
operating_system: ubuntu14.04
paths:
cc: /soft/apps/packages/clang+llvm-8.0.0/bin/clang
cxx: /soft/apps/packages/clang+llvm-8.0.0/bin/clang++
f77: null
fc: null
spec: clang@8.0.0
target: x86_64
......@@ -5,13 +5,14 @@ SANDBOX=$1
. $SANDBOX/spack/share/spack/setup-env.sh
spack load -r ssg
spack load -r bake
spack load -r mpich
module list
export LD_LIBRARY_PATH=$LIBRARY_PATH
#ldd ./margo-p2p-latency
export PATH=$PATH:$HOME/sds-tests-install/bin
# NOTE: needed as of January 2018 to avoid conflicts between MPI and
# libfabric GNI provider
# NOTE: doing this with -e option to aprun
......@@ -23,33 +24,33 @@ export LD_LIBRARY_PATH=$LIBRARY_PATH
# aprun -n 2 -N 1 ./osu_latency
echo "## Margo OFI/GNI (round trip):"
mpirun -np 2 ./margo-p2p-latency -i 100000 -n tcp
mpirun -np 2 margo-p2p-latency -i 100000 -n tcp
echo "## Margo OFI/GNI (bw, 1MiB):"
mpirun -np 2 ./margo-p2p-bw -x 1048576 -n tcp -c 1 -D 20
mpirun -np 2 margo-p2p-bw -x 1048576 -n tcp -c 1 -D 20
echo "## Margo OFI/GNI (bw, 1MiB, 8x concurrency):"
mpirun -np 2 ./margo-p2p-bw -x 1048576 -n tcp -c 8 -D 20
mpirun -np 2 margo-p2p-bw -x 1048576 -n tcp -c 8 -D 20
echo "## Margo OFI/GNI (bw, 8MiB):"
mpirun -np 2 ./margo-p2p-bw -x 8388608 -n tcp -c 1 -D 20
mpirun -np 2 margo-p2p-bw -x 8388608 -n tcp -c 1 -D 20
echo "## Margo OFI/GNI (bw, 8MiB, 8x concurrency):"
mpirun -np 2 ./margo-p2p-bw -x 8388608 -n tcp -c 8 -D 20
mpirun -np 2 margo-p2p-bw -x 8388608 -n tcp -c 8 -D 20
echo "## Margo OFI/GNI (bw, 1MB unaligned):"
mpirun -np 2 ./margo-p2p-bw -x 1000000 -n tcp -c 1 -D 20
mpirun -np 2 margo-p2p-bw -x 1000000 -n tcp -c 1 -D 20
echo "## Margo OFI/GNI (bw, 1MB unaligned, 8x concurrency):"
mpirun -np 2 ./margo-p2p-bw -x 1000000 -n tcp -c 8 -D 20
mpirun -np 2 margo-p2p-bw -x 1000000 -n tcp -c 8 -D 20
echo "## Margo OFI/GNI (round trip, Hg busy spin):"
mpirun -np 2 ./margo-p2p-latency -i 100000 -n tcp -t 0,0
mpirun -np 2 margo-p2p-latency -i 100000 -n tcp -t 0,0
echo "## Margo OFI/GNI (bw, 1MiB, Hg busy spin):"
mpirun -np 2 ./margo-p2p-bw -x 1048576 -n tcp -c 1 -D 20 -t 0,0
mpirun -np 2 margo-p2p-bw -x 1048576 -n tcp -c 1 -D 20 -t 0,0
echo "## Margo OFI/GNI (bw, 1MiB, 8x concurrency, Hg busy spin):"
mpirun -np 2 ./margo-p2p-bw -x 1048576 -n tcp -c 8 -D 20 -t 0,0
mpirun -np 2 margo-p2p-bw -x 1048576 -n tcp -c 8 -D 20 -t 0,0
echo "## Margo OFI/GNI (bw, 8MiB, Hg busy spin):"
mpirun -np 2 ./margo-p2p-bw -x 8388608 -n tcp -c 1 -D 20 -t 0,0
mpirun -np 2 margo-p2p-bw -x 8388608 -n tcp -c 1 -D 20 -t 0,0
echo "## Margo OFI/GNI (bw, 8MiB, 8x concurrency, Hg busy spin):"
mpirun -np 2 ./margo-p2p-bw -x 8388608 -n tcp -c 8 -D 20-t 0,0
mpirun -np 2 margo-p2p-bw -x 8388608 -n tcp -c 8 -D 20-t 0,0
echo "## Margo OFI/GNI (bw, 1MB unaligned, Hg busy spin):"
mpirun -np 2 ./margo-p2p-bw -x 1000000 -n tcp -c 1 -D 20-t 0,0
mpirun -np 2 margo-p2p-bw -x 1000000 -n tcp -c 1 -D 20-t 0,0
echo "## Margo OFI/GNI (bw, 1MB unaligned, 8x concurrency, Hg busy spin):"
mpirun -np 2 ./margo-p2p-bw -x 1000000 -n tcp -c 8 -D 20 -t 0,0
mpirun -np 2 margo-p2p-bw -x 1000000 -n tcp -c 8 -D 20 -t 0,0
......@@ -5,6 +5,7 @@ SANDBOX=$1
. $SANDBOX/spack/share/spack/setup-env.sh
spack load -r ssg
spack load -r bake
spack load -r mpich
module list
......@@ -12,40 +13,31 @@ module list
# in LIBRARY_PATH instead of LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$LIBRARY_PATH
# find nodes in job. We have to do this so that we can manually specify
# in each aprun so that server ranks consitently run on node where we
# set up storage space
declare -a nodes=($(python /home/carns/bin/run_on_all_nids.py));
echo "## PMDK (8x concurrency):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} truncate -s 60G /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} pmempool create obj /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8
rm -f /dev/shm/foo.dat
truncate -s 60G /dev/shm/foo.dat
pmempool create obj /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 1 ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8
echo "## PMDK (8x concurrency, 8 es):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} truncate -s 60G /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} pmempool create obj /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8 -T 8
rm -f /dev/shm/foo.dat
truncate -s 60G /dev/shm/foo.dat
pmempool create obj /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 1 ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8 -T 8
echo "## PMDK (8x concurrency, preallocated pool):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} dd if=/dev/zero of=/dev/shm/foo.dat bs=1M count=61440
aprun -n 1 -N 1 -L ${nodes[0]} pmempool create obj /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8
rm -f /dev/shm/foo.dat
dd if=/dev/zero of=/dev/shm/foo.dat bs=1M count=61440
pmempool create obj /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 1 ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8
echo "## PMDK (8x concurrency, 8 es, preallocated pool):"
aprun -n 1 -N 1 -L ${nodes[0]} hostname
aprun -n 1 -N 1 -L ${nodes[0]} rm -f /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} dd if=/dev/zero of=/dev/shm/foo.dat bs=1M count=61440
aprun -n 1 -N 1 -L ${nodes[0]} pmempool create obj /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ls -alh /dev/shm/foo.dat
aprun -n 1 -N 1 -L ${nodes[0]} ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8 -T 8
rm -f /dev/shm/foo.dat
dd if=/dev/zero of=/dev/shm/foo.dat bs=1M count=61440
pmempool create obj /dev/shm/foo.dat
ls -alh /dev/shm/foo.dat
mpirun -np 1 ./pmdk-bw -x 16777216 -m 34359738368 -p /dev/shm/foo.dat -c 8 -T 8
......@@ -25,9 +25,11 @@ export HOME=$SANDBOX
mkdir $SANDBOX
mkdir $PREFIX
mkdir $JOBDIR
cp $ORIGIN/margo-regression.qsub $JOBDIR
cp $ORIGIN/bake-regression.qsub $JOBDIR
cp $ORIGIN/pmdk-regression.qsub $JOBDIR
mkdir -p $HOME/.spack/linux
cp $ORIGIN/margo-regression.sh $JOBDIR
cp $ORIGIN/bake-regression.sh $JOBDIR
cp $ORIGIN/pmdk-regression.sh $JOBDIR
# set up build environment
cd $SANDBOX
......@@ -42,11 +44,15 @@ spack compilers
# use our own packages.yaml for the workstation-specific preferences
cp $ORIGIN/packages.yaml $SPACK_ROOT/etc/spack
cp $ORIGIN/compilers.yaml $HOME/.spack/linux
# add external repo for mochi. Note that this will not modify the
# user's ~/.spack/ files because we modified $HOME above
spack repo add ${SANDBOX}/sds-repo
# sanity check
spack repo list
# bootstrap spack to get environment modules
spack bootstrap
. $SANDBOX/spack/share/spack/setup-env.sh
# clean out any stray packages from previous runs, just in case
spack uninstall -R -y argobots mercury libfabric || true
# ior acts as our "apex" package here, causing several other packages to build
......@@ -58,6 +64,7 @@ spack install ior@develop +mobject ^bake@develop
# spack later in this script
spack load -r ssg
spack load -r bake
spack load -r mpich
# sds-tests
echo "=== BUILDING SDS TEST PROGRAMS ==="
......@@ -66,7 +73,7 @@ libtoolize
./prepare.sh
mkdir build
cd build
../configure --prefix=$PREFIX CC=cc
../configure --prefix=$PREFIX CC=mpicc
make -j 3
make install
......@@ -79,7 +86,7 @@ cp $PREFIX/bin/pmdk-bw $JOBDIR
cd $JOBDIR
./margo-regression.sh $SANDBOX
#./bake-regression.sh $SANDBOX
./bake-regression.sh $SANDBOX
#./pmdk-regression.sh $SANDBOX
echo "=== JOB DONE, COLLECTING AND SENDING RESULTS ==="
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment