GitLab maintenance scheduled for Tomorrow, 2019-04-24, from 12:00 to 13:00 CDT - Services will be unavailable during this time.

...
 
Commits (10)
......@@ -61,9 +61,23 @@ endif
if USE_ONLINE
AM_CPPFLAGS += ${ARGOBOTS_CFLAGS} ${SWM_CFLAGS} -DUSE_ONLINE=1
LDADD += ${SWM_LIBS} ${ARGOBOTS_LIBS}
LDADD += ${ARGOBOTS_LIBS}
if USE_SWM
AM_CPPFLAGS += -DUSE_SWM=1
LDADD += ${SWM_LIBS}
src_libcodes_la_SOURCES += src/workload/methods/codes-online-comm-wrkld.C
endif
if USE_CONC
src_libcodes_la_SOURCES += src/workload/methods/codes-conc-online-comm-wrkld.C
AM_CPPFLAGS += ${CONCEPTUAL_CFLAGS} -DUSE_CONC=1
LDADD += ${CONCEPTUAL_LIBS}
src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-bisect.c
src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-cosmoflow.c
src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-hotpotato.c
src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-latencyall.c
src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-latency.c
endif
endif
if USE_DUMPI
AM_CPPFLAGS += ${DUMPI_CFLAGS} -DUSE_DUMPI=1
......
/*
* Copyright (C) 2017 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#ifndef CODES_CONC_ADDON_H
#define CODES_CONC_ADDON_H
#ifdef __cplusplus
extern "C" {
#endif
#ifdef USE_CONC
#include <ncptl/ncptl.h>
#endif
#include <mpi.h>
#define MAX_CONC_ARGV 128
typedef struct conc_bench_param conc_bench_param;
struct conc_bench_param {
char conc_program[MAX_CONC_ARGV];
int conc_argc;
char config_in[MAX_CONC_ARGV][MAX_CONC_ARGV];
char *conc_argv[MAX_CONC_ARGV];
};
int codes_conc_bench_load(
const char* program,
int argc,
char *argv[]);
void CODES_MPI_Comm_size (MPI_Comm comm, int *size);
void CODES_MPI_Comm_rank( MPI_Comm comm, int *rank );
void CODES_MPI_Finalize();
void CODES_MPI_Send(const void *buf,
int count,
MPI_Datatype datatype,
int dest,
int tag,
MPI_Comm comm);
void CODES_MPI_Recv(void *buf,
int count,
MPI_Datatype datatype,
int source,
int tag,
MPI_Comm comm,
MPI_Status *status);
void CODES_MPI_Sendrecv(const void *sendbuf,
int sendcount,
MPI_Datatype sendtype,
int dest,
int sendtag,
void *recvbuf,
int recvcount,
MPI_Datatype recvtype,
int source,
int recvtag,
MPI_Comm comm,
MPI_Status *status);
void CODES_MPI_Barrier(MPI_Comm comm);
void CODES_MPI_Isend(const void *buf,
int count,
MPI_Datatype datatype,
int dest,
int tag,
MPI_Comm comm,
MPI_Request *request);
void CODES_MPI_Irecv(void *buf,
int count,
MPI_Datatype datatype,
int source,
int tag,
MPI_Comm comm,
MPI_Request *request);
void CODES_MPI_Waitall(int count,
MPI_Request array_of_requests[],
MPI_Status array_of_statuses[]);
void CODES_MPI_Reduce(const void *sendbuf,
void *recvbuf,
int count,
MPI_Datatype datatype,
MPI_Op op,
int root,
MPI_Comm comm);
void CODES_MPI_Allreduce(const void *sendbuf,
void *recvbuf,
int count,
MPI_Datatype datatype,
MPI_Op op,
MPI_Comm comm);
void CODES_MPI_Bcast(void *buffer,
int count,
MPI_Datatype datatype,
int root,
MPI_Comm comm);
void CODES_MPI_Alltoall(const void *sendbuf,
int sendcount,
MPI_Datatype sendtype,
void *recvbuf,
int recvcount,
MPI_Datatype recvtype,
MPI_Comm comm);
void CODES_MPI_Alltoallv(const void *sendbuf,
const int *sendcounts,
const int *sdispls,
MPI_Datatype sendtype,
void *recvbuf,
const int *recvcounts,
const int *rdispls,
MPI_Datatype recvtype,
MPI_Comm comm);
/* implementation structure */
struct codes_conceptual_bench {
char *program_name; /* name of the conceptual program */
int (*conceptual_main)(int argc, char *argv[]);
};
void codes_conceptual_add_bench(struct codes_conceptual_bench const * method);
#ifdef __cplusplus
}
#endif
#endif /* CODES_CONC_ADDON_H */
......@@ -121,22 +121,48 @@ AM_CONDITIONAL(USE_DARSHAN, [test "x${use_darshan}" = xyes])
# check for Argobots
AC_ARG_WITH([online],[AS_HELP_STRING([--with-online@<:@=DIR@:>@],
[Build with the online workloads and argobots support])],
[use_online=yes],[use_online=no])
if test "x${use_online}" != "xno" ; then
[Build with the online workloads and argobots support])])
if test "x${with_online}" != "x" ; then
AM_CONDITIONAL(USE_ONLINE, true)
PKG_CHECK_MODULES_STATIC([ARGOBOTS], [argobots], [],
[AC_MSG_ERROR([Could not find working argobots installation via pkg-config])])
AC_DEFINE_UNQUOTED([ONLINE_CONFIGDIR], ["$with_online"], [if using json data files,
specify config directory])
else
AM_CONDITIONAL(USE_ONLINE, false)
fi
#check for SWM
AC_ARG_WITH([swm],[AS_HELP_STRING([--with-swm@<:@=DIR@:>@],
[location of SWM installation])])
if test "x${with_swm}" != "x" ; then
AM_CONDITIONAL(USE_SWM, true)
PKG_CHECK_MODULES_STATIC([SWM], [swm], [],
[AC_MSG_ERROR([Could not find working swm installation via pkg-config])])
PKG_CHECK_VAR([SWM_DATAROOTDIR], [swm], [datarootdir], [],
[AC_MSG_ERROR[Could not find shared directory in SWM]])
AC_DEFINE_UNQUOTED([SWM_DATAROOTDIR], ["$SWM_DATAROOTDIR"], [if using json
data files])
data files])
else
AM_CONDITIONAL(USE_ONLINE, false)
AM_CONDITIONAL(USE_SWM, false)
fi
#check for Conceptual
AC_ARG_WITH([conceptual],[AS_HELP_STRING([--with-conceptual@<:@=DIR@:>@],
[location of Conceptual installation])])
if test "x${with_conceptual}" != "x" ; then
AC_CHECK_FILES([${with_conceptual}/lib/libncptl.a],
AM_CONDITIONAL(USE_CONC, true),
AC_MSG_ERROR(Could not find Conceptual libraries libncptl.a))
CONCEPTUAL_CFLAGS="-I${with_conceptual}/include"
CONCEPTUAL_LIBS="-L${with_conceptual}/lib/ -lncptl"
AC_SUBST(CONCEPTUAL_LIBS)
AC_SUBST(CONCEPTUAL_CFLAGS)
else
AM_CONDITIONAL(USE_CONC, false)
fi
# check for Recorder
AM_CONDITIONAL(USE_RECORDER, true)
RECORDER_CPPFLAGS="-DUSE_RECORDER=1"
......
......@@ -16,6 +16,8 @@ boost_cflags=@BOOST_CFLAGS@
boost_libs=@BOOST_LIBS@
argobots_libs=@ARGOBOTS_LIBS@
argobots_cflags=@ARGOBOTS_CFLAGS@
conceptual_libs=@CONCEPTUAL_LIBS@
conceptual_cflags=@CONCEPTUAL_CFLAGS@
swm_libs=@SWM_LIBS@
swm_cflags=@SWM_CFLAGS@
swm_datarootdir=@SWM_DATAROOTDIR@
......@@ -25,5 +27,5 @@ Description: Base functionality for CODES storage simulation
Version: @PACKAGE_VERSION@
URL: http://trac.mcs.anl.gov/projects/CODES
Requires:
Libs: -L${libdir} -lcodes ${ross_libs} ${argobots_libs} ${swm_libs} ${darshan_libs} ${dumpi_libs} ${cortex_libs}
Cflags: -I${includedir} -I${swm_datarootdir} ${ross_cflags} ${darshan_cflags} ${swm_cflags} ${argobots_cflags} ${dumpi_cflags} ${cortex_cflags}
Libs: -L${libdir} -lcodes ${ross_libs} ${argobots_libs} ${conceptual_libs} ${swm_libs} ${darshan_libs} ${dumpi_libs} ${cortex_libs}
Cflags: -I${includedir} ${swm_datarootdir} ${ross_cflags} ${darshan_cflags} ${swm_cflags} ${argobots_cflags} ${conceptual_cflags} ${dumpi_cflags} ${cortex_cflags}
This diff is collapsed.
###################################################
# Measure random bisection-bandwidth patterns #
# By Scott Pakin <pakin@lanl.gov> #
# #
# Inspired by Hoefler, Schneider, and Lumsdaine's #
# "Multistage Switches are not Crossbars" paper #
###################################################
Require language version "1.5".
nummsgs is "Number of messages per trial" and comes from "--nummsgs" or "-n" with default 100.
wups is "Number of warmup messages" and comes from "--wups" or "-w" with default 3.
msgsize is "Message size in bytes" and comes from "--bytes" or "-b" with default 1M.
numtrials is "Number of bisection patterns" and comes from "--trials" or "-t" with default 5000.
rounding is "Round measurements to the nearest N" and comes from "--round" or "-r" with default 5.
For each trial in {1, ..., numtrials} {
task 0 is assigned to processor 0 then
task 0 outputs "Testing random bisection pattern " and trial and "/" and numtrials then
all tasks are assigned to a random processor then
tasks src such that src is even send wups msgsize-byte messages to task src+1 then
all tasks synchronize then
all tasks reset their counters then
tasks src such that src is even send nummsgs msgsize-byte messages to task src+1 then
all tasks synchronize then
all tasks log a histogram of rounding*round(total_bytes*1E6/(elapsed_usecs*1M)/rounding) as "Bandwidth (MiB/s)"
}
This diff is collapsed.
# Measure the performance of MPI_Allgather()
# By Scott Pakin <pakin@lanl.gov>
#
# N.B. Requires the c_mpi backend.
Require language version "1.5".
# Parse the command line.
numwords is "Message size (words)" and comes from "--msgsize" or "-s" with default 28825K.
reps is "Number of repetitions" and comes from "--reps" or "-r" with default 100.
computetime is "Computation time (ms)" and comes from "--compute" or "-c" with default 129.
# Allocate a send buffer and a receive buffer.
Task 0 multicasts a numwords*num_tasks word message from buffer 0 to all other tasks.
Task 0 multicasts a numwords*num_tasks word message from buffer 1 to all other tasks.
# Measure the performance of MPI_Allreduce().
Task 0 resets its counters then
for reps repetitions {
all tasks COMPUTES FOR computetime MILLISECONDS then
all tasks backend execute "
MPI_Allreduce([MESSAGE BUFFER 0], [MESSAGE BUFFER 1], (int)" and numwords and ",
MPI_INT, MPI_SUM, MPI_COMM_WORLD);
" then
all tasks backend execute "
MPI_Allreduce([MESSAGE BUFFER 0], [MESSAGE BUFFER 1], (int)" and numwords and ",
MPI_INT, MPI_SUM, MPI_COMM_WORLD);
"
} then
task 0 logs elapsed_usecs/1000 as "Elapse time (ms)".
This diff is collapsed.
# Virtual ring "hot potato" test
Require language version "1.5".
trials is "Number of trials to perform" and comes from "--trials" or
"-t" with default 100000.
Assert that "the hot-potato test requires at least two tasks" with num_tasks>=2.
Let len be 0 while {
for each task_count in {2, ..., num_tasks} {
task 0 outputs "Performing " and trials and " " and
task_count and "-task runs...." then
for trials repetitions plus 5 warmup repetitions and a synchronization {
task 0 resets its counters then
task 0 sends a len byte message to unsuspecting task 1 then
task (n+1) mod task_count receives a len byte message from task n such that n<task_count then
task n such that n>0 /\ n<task_count sends a len byte message to unsuspecting task (n+1) mod task_count then
task 0 logs the task_count as "# of tasks" and
the minimum of elapsed_usecs/task_count as "Latency (usecs)" and
the mean of elapsed_usecs/task_count as "Latency (usecs)" and
the variance of elapsed_usecs/task_count as "Latency (usecs)"
} then
task 0 computes aggregates
}
}
# An all-pairs ping-pong latency test written in coNCePTuaL
# By Scott Pakin <pakin@lanl.gov>
Require language version "1.5".
# Parse the command line.
reps is "Number of repetitions of each message size" and comes from "--reps" or "-r" with default 1000.
maxbytes is "Maximum number of bytes to transmit" and comes from "--maxbytes" or "-m" with default 1M.
# Ensure that we have a peer with whom to communicate.
Assert that "the latency test requires at least two tasks" with num_tasks>=2.
# Perform the benchmark.
For each msgsize in {0}, {1, 2, 4, ..., maxbytes} {
for reps repetitions {
tasks ev such that ev is even reset their counters then
tasks ev such that ev is even send a msgsize byte message to task ev+1 then
tasks od such that od is odd send a msgsize byte message to task od-1 then
tasks ev such that ev is even log the msgsize as "Bytes" and
the median of elapsed_usecs/2 as "1/2 RTT (usecs)"
} then
tasks ev such that ev is even compute aggregates
}
# A ping-pong latency test written in coNCePTuaL
Require language version "1.5".
# Parse the command line.
reps is "Number of repetitions of each message size" and comes from
"--reps" or "-r" with default 1000.
maxbytes is "Maximum number of bytes to transmit" and comes from
"--maxbytes" or "-m" with default 1M.
# Ensure that we have a peer with whom to communicate.
Assert that "the latency test requires at least two tasks" with num_tasks>=2.
# Perform the benchmark.
For each msgsize in {0}, {1, 2, 4, ..., maxbytes} {
for reps repetitions {
task 0 resets its counters then
task 0 sends a msgsize byte message to task 1 then
task 1 sends a msgsize byte message to task 0 then
task 0 logs the msgsize as "Bytes" and
the median of elapsed_usecs/2 as "1/2 RTT (usecs)"
} then
task 0 computes aggregates
}
import string
import sys
import os
MPI_OPS = [ 'MPI_Send', 'MPI_Recv', 'MPI_Barrier', 'MPI_Isend', 'MPI_Irecv', 'MPI_Waitall',
'MPI_Reduce', 'MPI_Allreduce', 'MPI_Bcast', 'MPI_Alltoall', 'MPI_Alltoallv',
'MPI_Comm_size', 'MPI_Comm_rank']
LOG = [ 'logfiletmpl_default', 'ncptl_log_write', 'ncptl_log_compute_aggregates', 'ncptl_log_commit_data']
def eliminate_logging(inLines):
for idx, line in enumerate(inLines):
if 'Generate and broadcast a UUID' in line:
for i in range(1, 3):
inLines[idx+i] = "//"+inLines[idx+i]
elif 'ncptl_free (logfile_uuid)' in line:
for i in range(0, 12):
inLines[idx-i] = "//"+inLines[idx-i]
elif 'int mpiresult' in line:
for i in range(0,30):
inLines[idx+i] = "//"+inLines[idx+i]
else:
for elem in LOG:
if elem in line:
inLines[idx] = "//"+line
def eliminate_conc_init(inLines):
for idx, line in enumerate(inLines):
if 'NCPTL_RUN_TIME_VERSION' in line:
inLines[idx] = "//"+line
if 'atexit (conc_exit_handler)' in line:
inLines[idx] = "//"+line
if 'Inform the run-time library' in line:
for i in range(1, 4):
inLines[idx+i] = "//"+inLines[idx+i]
def make_static_var(inLines):
for idx, line in enumerate(inLines):
if 'Dummy variable to help mark other variables as used' in line:
inLines[idx+1]="static " + inLines[idx+1]
if 'void conc_mark_variables_used' in line:
inLines[idx]="static " + line
if '/* Program-specific variables */' in line:
start = idx+1
if '* Function declarations *' in line:
end = idx-2
for i in range(start, end):
inLines[i]="static "+inLines[i]
def manipulate_mpi_ops(inLines, program_name):
for idx, line in enumerate(inLines):
# subcomm
if 'MPI_' not in line: # not MPI
if "int main" in line:
# inLines[idx] = "static int "+program_name+"_main(int* argc, char *argv[])"
inLines[idx] = line.replace("int main", "static int "+program_name+"_main")
else:
continue
else: # MPI
if 'MPI_Init' in line:
inLines[idx] = "//"+line
elif 'MPI_Errhandler_' in line: # error handling ignored
inLines[idx] = "//"+line
elif 'mpiresult = MPI_Finalize();' in line:
inLines[idx] = "CODES_MPI_Finalize();"
inLines[idx+2] = "exitcode = 0;"
elif 'MPI_Comm_get_attr' in line:
inLines[idx] = "//"+line
else:
for ops in MPI_OPS:
if ops in line:
inLines[idx] = line.replace(ops,"CODES_"+ops)
def adding_struct(inLines, program_name):
new_struct = [ '/* fill in function pointers for this method */' ,
'struct codes_conceptual_bench '+program_name+'_bench = ' ,
'{' ,
'.program_name = "'+program_name+'",' ,
'.conceptual_main = '+program_name+'_main,' ,
'};' ]
codes_include = '#include "codes/codes-conc-addon.h"'
for idx, line in enumerate(inLines):
if "* Include files *" in line:
inLines.insert(idx-1, codes_include)
break
# adding struct at the end
for i in range(0, len(new_struct)):
inLines.append(new_struct[i])
def insert_if_not_exist(content, idx, hls):
exist = False
for i in range(idx[0], idx[1]):
if hls[i] in content:
exist = True
break
if not exist:
hls.insert(idx[0], content)
def translate_conc_to_codes(filepath, codespath):
# get program name
program_name = filepath.split("/")[-1].replace(".c","")
with open(filepath, 'r') as infile:
content = infile.read()
inLines = content.split('\n')
eliminate_logging(inLines)
eliminate_conc_init(inLines)
make_static_var(inLines)
manipulate_mpi_ops(inLines, program_name)
adding_struct(inLines, program_name)
# output program file
with open(codespath+"src/workload/conceputal-skeleton-apps/conc-"+program_name+".c","w+") as outFile:
outFile.writelines(["%s\n" % item for item in inLines])
# modify interface file
program_struct = "extern struct codes_conceptual_bench "+program_name+"_bench;\n"
program_struct_idx=[]
program_definition = " &"+program_name+"_bench,\n"
program_definition_idx=[]
with open(codespath+"src/workload/codes-conc-addon.c","r+") as header:
hls = header.readlines()
for idx, line in enumerate(hls):
if '/* list of available benchmarks begin */' in line:
program_struct_idx.append(idx+1)
elif '/* list of available benchmarks end */' in line:
program_struct_idx.append(idx)
insert_if_not_exist(program_struct, program_struct_idx, hls)
for idx, line in enumerate(hls):
if '/* default benchmarks begin */' in line:
program_definition_idx.append(idx+1)
elif '/* default benchmarks end */' in line:
program_definition_idx.append(idx)
insert_if_not_exist(program_definition, program_definition_idx, hls)
header.seek(0)
header.writelines(hls)
# modify makefile
program_compile = "src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-"+program_name+".c\n"
program_compile_idx = []
with open(codespath+"Makefile.am","r+") as makefile:
mfls = makefile.readlines()
for idx, line in enumerate(mfls):
if "CONCEPTUAL_LIBS" in line:
program_compile_idx.append(idx+1)
break
for i in range(program_compile_idx[0], len(mfls)):
if 'endif' in mfls[i]:
program_compile_idx.append(i)
break
insert_if_not_exist(program_compile, program_compile_idx, mfls)
makefile.seek(0)
makefile.writelines(mfls)
if __name__ == "__main__":
if len(sys.argv) != 4:
print 'Need 2 arguments: 1. path to files to be converted \t2. path to CODES directory\t3. path to ncptl executable'
sys.exit(1)
os.chdir(sys.argv[1])
for benchfile in next(os.walk(sys.argv[1]))[2]: # for all files
if benchfile.lower().endswith('.ncptl'):
cfile = benchfile.replace('.ncptl','.c')
cfile = cfile.replace("-","")
os.system(sys.argv[3]+' --backend=c_mpi --no-compile '+benchfile+' --output '+cfile)
print "adding bench file: %s" % cfile
translate_conc_to_codes(sys.argv[1]+cfile, sys.argv[2])
......@@ -105,7 +105,8 @@ nobase_include_HEADERS = \
codes/net/express-mesh.h \
codes/net/torus.h \
codes/codes-mpi-replay.h \
codes/configfile.h
codes/configfile.h \
codes/codes-conc-addon.h
#codes/codes-nw-workload.h
......@@ -151,6 +152,7 @@ src_libcodes_la_SOURCES = \
src/util/codes-mapping-context.c \
src/util/codes-comm.c \
src/workload/codes-workload.c \
src/workload/codes-conc-addon.c \
src/workload/methods/codes-iolang-wrkld.c \
src/workload/methods/codes-checkpoint-wrkld.c \
src/workload/methods/test-workload-method.c \
......
2 conceptual-latency
2 conceptual-latencyall
......@@ -2402,8 +2402,8 @@ void dragonfly_custom_router_final(router_state * s,
written = 0;
if(!s->router_id)
{
written = sprintf(s->output_buf, "# Format <LP ID> <Group ID> <Router ID> <Link Traffic per router port(s)>");
written += sprintf(s->output_buf + written, "# Router ports in the order: %d green links, %d black links %d global channels \n",
written = sprintf(s->output_buf2, "# Format <LP ID> <Group ID> <Router ID> <Link Traffic per router port(s)>");
written += sprintf(s->output_buf2 + written, "# Router ports in the order: %d green links, %d black links %d global channels \n",
p->num_router_cols * p->num_row_chans, p->num_router_rows * p->num_col_chans, p->num_global_channels);
}
written += sprintf(s->output_buf2 + written, "\n %llu %d %d",
......
/*
* Copyright (C) 2013 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#include <assert.h>
#include <ross.h>
#include <codes/codes.h>
#include <codes/codes-conc-addon.h>
/* list of available benchmarks begin */
extern struct codes_conceptual_bench bisect_bench;
extern struct codes_conceptual_bench cosmoflow_bench;
extern struct codes_conceptual_bench hotpotato_bench;
extern struct codes_conceptual_bench latencyall_bench;
extern struct codes_conceptual_bench latency_bench;
/* list of available benchmarks end */
static struct codes_conceptual_bench const * bench_array_default[] =
{
/* default benchmarks begin */
&bisect_bench,
&cosmoflow_bench,
&hotpotato_bench,
&latencyall_bench,
&latency_bench,
/* default benchmarks end */
NULL
};
// once initialized, adding a bench generator is an error
static int is_bench_init = 0;
static int num_user_benchs = 0;
static struct codes_conceptual_bench const ** bench_array = NULL;
// only call this once
static void init_bench_methods(void)
{
if (is_bench_init)
return;
if (bench_array == NULL)
bench_array = bench_array_default;
else {
// note - includes null char
int num_default_benchs =
(sizeof(bench_array_default) / sizeof(bench_array_default[0]));
printf("\n Num default methods %d ", num_default_benchs);
bench_array = realloc(bench_array,
(num_default_benchs + num_user_benchs + 1) *
sizeof(*bench_array));
memcpy(bench_array+num_user_benchs, bench_array_default,
num_default_benchs * sizeof(*bench_array_default));
}
is_bench_init = 1;
}
int codes_conc_bench_load(
const char *program,
int argc,
char *argv[])
{
init_bench_methods();
int i;
int ret;
for(i=0; bench_array[i] != NULL; i++)
{
if(strcmp(bench_array[i]->program_name, program) == 0)
{
/* load appropriate workload generator */
ret = bench_array[i]->conceptual_main(argc, argv);
if(ret < 0)
{
return(-1);
}
return(i);
}
}
fprintf(stderr, "Error: failed to find benchmark program %s\n", program);
return(-1);
}
void codes_conceptual_add_bench(struct codes_conceptual_bench const * bench)
{
static int bench_array_cap = 10;
if (is_bench_init)
tw_error(TW_LOC,
"adding a conceptual benchmark method after initialization is forbidden");
else if (bench_array == NULL){
bench_array = malloc(bench_array_cap * sizeof(*bench_array));
assert(bench_array);
}
if (num_user_benchs == bench_array_cap) {
bench_array_cap *= 2;
bench_array = realloc(bench_array,
bench_array_cap * sizeof(*bench_array));
assert(bench_array);
}
bench_array[num_user_benchs++] = bench;
}
......@@ -215,7 +215,7 @@ int main(int argc, char *argv[])
wparams = (char*)&d_params;
}
}
else if(strcmp(type, "online_comm_workload") == 0){
else if(strcmp(type, "swm_online_comm_workload") == 0 || strcmp(type, "conc_online_comm_workload") == 0){
if (n == -1){
fprintf(stderr,
"Expected \"--num-ranks\" argument for online workload\n");
......@@ -448,7 +448,7 @@ int main(int argc, char *argv[])
}
} while (op.op_type != CODES_WK_END);
if(strcmp(type, "online_comm_workload") == 0)
if(strcmp(type, "swm_online_comm_workload") == 0 || strcmp(type, "conc_online_comm_workload") == 0)
{
codes_workload_finalize(type, wparams, 0, i);
}
......
......@@ -34,9 +34,14 @@ extern struct codes_workload_method darshan_mpi_io_workload_method;
#ifdef USE_RECORDER
extern struct codes_workload_method recorder_io_workload_method;
#endif
#ifdef USE_ONLINE
extern struct codes_workload_method online_comm_workload_method;
#ifdef USE_SWM
extern struct codes_workload_method swm_online_comm_workload_method;
#endif
#ifdef USE_CONC
extern struct codes_workload_method conc_online_comm_workload_method;
#endif
extern struct codes_workload_method checkpoint_workload_method;
extern struct codes_workload_method iomock_workload_method;
......@@ -58,9 +63,13 @@ static struct codes_workload_method const * method_array_default[] =
#endif
#endif
#ifdef USE_ONLINE
&online_comm_workload_method,
#ifdef USE_SWM
&swm_online_comm_workload_method,
#endif
#ifdef USE_CONC
&conc_online_comm_workload_method,
#endif
#ifdef USE_RECORDER
&recorder_io_workload_method,
#endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -950,10 +950,10 @@ static int comm_online_workload_finalize(const char* params, int app_id, int ran
}
extern "C" {
/* workload method name and function pointers for the CODES workload API */
struct codes_workload_method online_comm_workload_method =
struct codes_workload_method swm_online_comm_workload_method =
{
//.method_name =
(char*)"online_comm_workload",
(char*)"swm_online_comm_workload",
//.codes_workload_read_config =
NULL,
//.codes_workload_load =
......