Commit 36f54291 authored by Misbah Mubarak's avatar Misbah Mubarak

Adding shell script for the runs, making broadcast size a runtime argument

parent 78e8d940
#!/bin/bash
network_size=1056
for alloc in 128 256 512 1024 2048 4096 6144 8192 10240 12288 14336 16512 # job allocation size
do
# generate config file with job allocation
# if [! -f config_alloc_$alloc.conf];
# then
# echo "Creating config file "
printf "rand\n$network_size\n$alloc" >> config_alloc_$alloc.conf
# now generate allocation file
python listgen.py config_alloc_$alloc.conf
# fi
# if [! -f workload_$alloc.conf];
# then
echo "Creating workload file "
# generate the workload file
printf "$alloc collective" >> workload_$alloc.conf
# fi
#for routing in ['minimal','nonminimal','adaptive']
for routing in "minimal" "nonminimal" "adaptive"
do
for data_size in 1024 1048576 # 1KB, 1MB
do
for algo in 0 1 2 3 # TREE, LLF, GLF, FOREST
do
# now do 5 iterations of simulation
for iter in {1..5}
do
echo "./src/network-workloads/model-net-mpi-replay --sync=1
--bcast_size=$data_size --algo_type=$algo
--alloc_file=allocation.conf --workload_type=cortex-workload
--workload_conf_file=workload_$alloc.conf
../src/network-workloads/cortex-conf/modelnet-mpi-test-dragonfly-$network_size-$routing.conf
>> dragonfly-$network_size-$bcast_size-$routing-$algo.out"
done
done
done
done
done
#$network_size = 5616
#
#for routing in ['minimal','nonminimal','adaptive']
# for data_size in [1024,1048576] # 1KB, 1MB
# for algo in [0,1,2,3] # TREE, LLF, GLF, FOREST
# for alloc in [128,256,512,1024,2*1024,3*1024,4*1024]
# for bg_traffic in [false,true]
# run_exp($network_size,routing,data_size,alloc,algo,bg_traffic)
# end
# end
# end
# end
#end
......@@ -22,7 +22,6 @@
#define TRACE -1
#define MAX_WAIT_REQS 512
#define CHK_LP_NM "nw-lp"
#define COL_MSG_SIZE 1024
#define CS_LP_DBG 0
#define lprintf(_fmt, ...) \
do {if (CS_LP_DBG) printf(_fmt, __VA_ARGS__);} while (0)
......@@ -43,6 +42,7 @@ static lp_io_handle io_handle;
static unsigned int lp_io_use_suffix = 0;
static int do_lp_io = 0;
static tw_stime mean_interval = 100000;
static unsigned int bcast_size = 0;
/* variables for loading multiple applications */
/* Xu's additions start */
......@@ -1432,7 +1432,7 @@ void nw_test_init(nw_state* s, tw_lp* lp)
tw_error(TW_LOC, "\n Unknown collective algo type %d must be 0-2 ", algo_type);
params_c.root = 0;
params_c.size = COL_MSG_SIZE;
params_c.size = bcast_size;
params = (char*)&params_c;
wrkld_id = codes_workload_load("cortex-workload", params, s->app_id, s->local_rank);
......@@ -1831,6 +1831,7 @@ const tw_optdef app_opt [] =
TWOPT_CHAR("alloc_file", alloc_file, "allocation file name"),
TWOPT_CHAR("workload_conf_file", workloads_conf_file, "workload config file name"),
TWOPT_UINT("num_net_traces", num_net_traces, "number of network traces"),
TWOPT_UINT("bcast_size", bcast_size, "size of the broadcast"),
TWOPT_UINT("algo_type", algo_type, "collective algorithm type 0 : TREE, 1: LLF, 2: GLF"),
TWOPT_UINT("disable_compute", disable_delay, "disable compute simulation"),
TWOPT_UINT("enable_mpi_debug", enable_debug, "enable debugging of MPI sim layer (works with sync=1 only)"),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment