Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
codes
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
38
Issues
38
List
Boards
Labels
Milestones
Merge Requests
8
Merge Requests
8
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
codes
codes
Commits
1f9837d9
Commit
1f9837d9
authored
Oct 05, 2018
by
Xin Wang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
adding Conceptural online workload to a seperate workload method
parent
75a2ddb9
Changes
14
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
1120 additions
and
211 deletions
+1120
-211
Makefile.am
Makefile.am
+9
-3
codes/codes-conc-addon.h
codes/codes-conc-addon.h
+94
-11
configure.ac
configure.ac
+19
-5
scripts/conceptual_benchmarks/latency-all.ncptl
scripts/conceptual_benchmarks/latency-all.ncptl
+23
-0
scripts/conceptual_benchmarks/latency.ncptl
scripts/conceptual_benchmarks/latency.ncptl
+24
-0
scripts/conceptual_benchmarks/translate_conc_src.py
scripts/conceptual_benchmarks/translate_conc_src.py
+76
-18
src/network-workloads/model-net-mpi-replay.c
src/network-workloads/model-net-mpi-replay.c
+27
-6
src/workload/codes-conc-addon.c
src/workload/codes-conc-addon.c
+6
-5
src/workload/codes-workload-dump.c
src/workload/codes-workload-dump.c
+2
-2
src/workload/codes-workload.c
src/workload/codes-workload.c
+13
-4
src/workload/conceputal-skeleton-apps/conc-latency.c
src/workload/conceputal-skeleton-apps/conc-latency.c
+23
-23
src/workload/conceputal-skeleton-apps/conc-latencyall.c
src/workload/conceputal-skeleton-apps/conc-latencyall.c
+163
-132
src/workload/methods/codes-conc-online-comm-wrkld.C
src/workload/methods/codes-conc-online-comm-wrkld.C
+639
-0
src/workload/methods/codes-online-comm-wrkld.C
src/workload/methods/codes-online-comm-wrkld.C
+2
-2
No files found.
Makefile.am
View file @
1f9837d9
...
...
@@ -61,12 +61,18 @@ endif
if
USE_ONLINE
AM_CPPFLAGS
+=
${ARGOBOTS_CFLAGS}
${SWM_CFLAGS}
-DUSE_ONLINE
=
1
LDADD
+=
${SWM_LIBS}
${ARGOBOTS_LIBS}
LDADD
+=
${ARGOBOTS_LIBS}
if
USE_SWM
AM_CPPFLAGS
+=
-DUSE_SWM
=
1
LDADD
+=
${SWM_LIBS}
src_libcodes_la_SOURCES
+=
src/workload/methods/codes-online-comm-wrkld.C
endif
if
USE_CONC
AM_CPPFLAGS
+=
${CONCEPTUAL_CFLAGS}
src_libcodes_la_SOURCES
+=
src/workload/methods/codes-conc-online-comm-wrkld.C
AM_CPPFLAGS
+=
${CONCEPTUAL_CFLAGS}
-DUSE_CONC
=
1
LDADD
+=
${CONCEPTUAL_LIBS}
src_libcodes_la_SOURCES
+=
src/workload/methods/conc-latency.c
src_libcodes_la_SOURCES
+=
src/workload/conceputal-skeleton-apps/conc-latencyall.c
src_libcodes_la_SOURCES
+=
src/workload/conceputal-skeleton-apps/conc-latency.c
endif
endif
...
...
codes/codes-conc-addon.h
View file @
1f9837d9
...
...
@@ -14,33 +14,116 @@ extern "C" {
#ifdef USE_CONC
#include <ncptl/ncptl.h>
#endif
#include <mpi.h>
#define MAX_CONC_ARG
C 20
#define MAX_CONC_ARG
V 128
typedef
struct
conc_bench_param
conc_bench_param
;
/* implementation structure */
struct
codes_conceptual_bench
{
char
*
program_name
;
/* name of the conceptual program */
int
(
*
conceptual_main
)(
int
*
argc
,
char
*
argv
[]);
};
struct
conc_bench_param
{
char
*
conc_program
;
char
conc_program
[
MAX_CONC_ARGV
]
;
int
conc_argc
;
char
*
conc_argv
[
MAX_CONC_ARGC
];
char
config_in
[
MAX_CONC_ARGV
][
MAX_CONC_ARGV
];
char
*
conc_argv
[
MAX_CONC_ARGV
];
};
int
codes_conc_bench_load
(
const
char
*
program
,
int
*
argc
,
c
onst
c
har
*
argv
[]);
int
argc
,
char
*
argv
[]);
void
CODES_MPI_Comm_size
(
MPI_Comm
comm
,
int
*
size
);
void
CODES_MPI_Comm_rank
(
MPI_Comm
comm
,
int
*
rank
);
void
CODES_MPI_Finalize
();
void
CODES_MPI_Send
(
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
dest
,
int
tag
,
MPI_Comm
comm
);
void
CODES_MPI_Recv
(
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
source
,
int
tag
,
MPI_Comm
comm
,
MPI_Status
*
status
);
void
CODES_MPI_Sendrecv
(
const
void
*
sendbuf
,
int
sendcount
,
MPI_Datatype
sendtype
,
int
dest
,
int
sendtag
,
void
*
recvbuf
,
int
recvcount
,
MPI_Datatype
recvtype
,
int
source
,
int
recvtag
,
MPI_Comm
comm
,
MPI_Status
*
status
);
void
CODES_MPI_Barrier
(
MPI_Comm
comm
);
void
CODES_MPI_Isend
(
const
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
dest
,
int
tag
,
MPI_Comm
comm
,
MPI_Request
*
request
);
void
CODES_MPI_Irecv
(
void
*
buf
,
int
count
,
MPI_Datatype
datatype
,
int
source
,
int
tag
,
MPI_Comm
comm
,
MPI_Request
*
request
);
void
CODES_MPI_Waitall
(
int
count
,
MPI_Request
array_of_requests
[],
MPI_Status
array_of_statuses
[]);
void
CODES_MPI_Reduce
(
const
void
*
sendbuf
,
void
*
recvbuf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Op
op
,
int
root
,
MPI_Comm
comm
);
void
CODES_MPI_Allreduce
(
const
void
*
sendbuf
,
void
*
recvbuf
,
int
count
,
MPI_Datatype
datatype
,
MPI_Op
op
,
MPI_Comm
comm
);
void
CODES_MPI_Bcast
(
void
*
buffer
,
int
count
,
MPI_Datatype
datatype
,
int
root
,
MPI_Comm
comm
);
void
CODES_MPI_Alltoall
(
const
void
*
sendbuf
,
int
sendcount
,
MPI_Datatype
sendtype
,
void
*
recvbuf
,
int
recvcount
,
MPI_Datatype
recvtype
,
MPI_Comm
comm
);
void
CODES_MPI_Alltoallv
(
const
void
*
sendbuf
,
const
int
*
sendcounts
,
const
int
*
sdispls
,
MPI_Datatype
sendtype
,
void
*
recvbuf
,
const
int
*
recvcounts
,
const
int
*
rdispls
,
MPI_Datatype
recvtype
,
MPI_Comm
comm
);
void
codes_conceptual_add_bench
(
struct
codes_conceptual_bench
const
*
method
);
/* implementation structure */
struct
codes_conceptual_bench
{
char
*
program_name
;
/* name of the conceptual program */
int
(
*
conceptual_main
)(
int
argc
,
char
*
argv
[]);
};
void
codes_conceptual_add_bench
(
struct
codes_conceptual_bench
const
*
method
);
#ifdef __cplusplus
}
#endif
...
...
configure.ac
View file @
1f9837d9
...
...
@@ -121,18 +121,32 @@ AM_CONDITIONAL(USE_DARSHAN, [test "x${use_darshan}" = xyes])
# check for Argobots
AC_ARG_WITH([online],[AS_HELP_STRING([--with-online@<:@=DIR@:>@],
[Build with the online workloads and argobots support])],
[use_online=yes],[use_online=no])
if test "x${use_online}" != "xno" ; then
[Build with the online workloads and argobots support])])
if test "x${with_online}" != "x" ; then
AM_CONDITIONAL(USE_ONLINE, true)
PKG_CHECK_MODULES_STATIC([ARGOBOTS], [argobots], [],
[AC_MSG_ERROR([Could not find working argobots installation via pkg-config])])
AC_DEFINE_UNQUOTED([
SWM_DATAROOTDIR], ["${use_online}"], [if using json
data files
])
AC_DEFINE_UNQUOTED([
ONLINE_CONFIGDIR], ["$with_online"], [if using json data files,
specify config directory
])
else
AM_CONDITIONAL(USE_ONLINE, false)
fi
#check for SWM
AC_ARG_WITH([swm],[AS_HELP_STRING([--with-swm@<:@=DIR@:>@],
[location of SWM installation])])
if test "x${with_swm}" != "x" ; then
AM_CONDITIONAL(USE_SWM, true)
PKG_CHECK_MODULES_STATIC([SWM], [swm], [],
[AC_MSG_ERROR([Could not find working swm installation via pkg-config])])
PKG_CHECK_VAR([SWM_DATAROOTDIR], [swm], [datarootdir], [],
[AC_MSG_ERROR[Could not find shared directory in SWM]])
AC_DEFINE_UNQUOTED([SWM_DATAROOTDIR], ["$SWM_DATAROOTDIR"], [if using json
data files])
else
AM_CONDITIONAL(USE_SWM, false)
fi
#check for Conceptual
AC_ARG_WITH([conceptual],[AS_HELP_STRING([--with-conceptual@<:@=DIR@:>@],
[location of Conceptual installation])])
...
...
scripts/conceptual_benchmarks/latency-all.ncptl
0 → 100644
View file @
1f9837d9
# An all-pairs ping-pong latency test written in coNCePTuaL
# By Scott Pakin <pakin@lanl.gov>
Require language version "1.5".
# Parse the command line.
reps is "Number of repetitions of each message size" and comes from "--reps" or "-r" with default 1000.
maxbytes is "Maximum number of bytes to transmit" and comes from "--maxbytes" or "-m" with default 1M.
# Ensure that we have a peer with whom to communicate.
Assert that "the latency test requires at least two tasks" with num_tasks>=2.
# Perform the benchmark.
For each msgsize in {0}, {1, 2, 4, ..., maxbytes} {
for reps repetitions {
tasks ev such that ev is even reset their counters then
tasks ev such that ev is even send a msgsize byte message to task ev+1 then
tasks od such that od is odd send a msgsize byte message to task od-1 then
tasks ev such that ev is even log the msgsize as "Bytes" and
the median of elapsed_usecs/2 as "1/2 RTT (usecs)"
} then
tasks ev such that ev is even compute aggregates
}
scripts/conceptual_benchmarks/latency.ncptl
0 → 100644
View file @
1f9837d9
# A ping-pong latency test written in coNCePTuaL
Require language version "1.5".
# Parse the command line.
reps is "Number of repetitions of each message size" and comes from
"--reps" or "-r" with default 1000.
maxbytes is "Maximum number of bytes to transmit" and comes from
"--maxbytes" or "-m" with default 1M.
# Ensure that we have a peer with whom to communicate.
Assert that "the latency test requires at least two tasks" with num_tasks>=2.
# Perform the benchmark.
For each msgsize in {0}, {1, 2, 4, ..., maxbytes} {
for reps repetitions {
task 0 resets its counters then
task 0 sends a msgsize byte message to task 1 then
task 1 sends a msgsize byte message to task 0 then
task 0 logs the msgsize as "Bytes" and
the median of elapsed_usecs/2 as "1/2 RTT (usecs)"
} then
task 0 computes aggregates
}
scripts/translate_conc_src.py
→
scripts/
conceptual_benchmarks/
translate_conc_src.py
View file @
1f9837d9
...
...
@@ -6,7 +6,7 @@ MPI_OPS = [ 'MPI_Send', 'MPI_Recv', 'MPI_Barrier', 'MPI_Isend', 'MPI_Irecv', 'MP
'MPI_Reduce'
,
'MPI_Allreduce'
,
'MPI_Bcast'
,
'MPI_Alltoall'
,
'MPI_Alltoallv'
,
'MPI_Comm_size'
,
'MPI_Comm_rank'
]
LOG
=
[
'logfiletmpl_default'
,
'ncptl_log_compute_aggregates'
,
'ncptl_log_commit_data'
]
LOG
=
[
'logfiletmpl_default'
,
'ncptl_log_
write'
,
'ncptl_log_
compute_aggregates'
,
'ncptl_log_commit_data'
]
def
eliminate_logging
(
inLines
):
for
idx
,
line
in
enumerate
(
inLines
):
...
...
@@ -27,6 +27,30 @@ def eliminate_logging(inLines):
if
elem
in
line
:
inLines
[
idx
]
=
"//"
+
line
def
eliminate_conc_init
(
inLines
):
for
idx
,
line
in
enumerate
(
inLines
):
if
'NCPTL_RUN_TIME_VERSION'
in
line
:
inLines
[
idx
]
=
"//"
+
line
if
'atexit (conc_exit_handler)'
in
line
:
inLines
[
idx
]
=
"//"
+
line
if
'Inform the run-time library'
in
line
:
for
i
in
range
(
1
,
4
):
inLines
[
idx
+
i
]
=
"//"
+
inLines
[
idx
+
i
]
def
make_static_var
(
inLines
):
for
idx
,
line
in
enumerate
(
inLines
):
if
'Dummy variable to help mark other variables as used'
in
line
:
inLines
[
idx
+
1
]
=
"static "
+
inLines
[
idx
+
1
]
if
'void conc_mark_variables_used'
in
line
:
inLines
[
idx
]
=
"static "
+
line
if
'/* Program-specific variables */'
in
line
:
start
=
idx
+
1
if
'* Function declarations *'
in
line
:
end
=
idx
-
2
for
i
in
range
(
start
,
end
):
inLines
[
i
]
=
"static "
+
inLines
[
i
]
def
manipulate_mpi_ops
(
inLines
,
program_name
):
for
idx
,
line
in
enumerate
(
inLines
):
...
...
@@ -45,6 +69,8 @@ def manipulate_mpi_ops(inLines, program_name):
elif
'mpiresult = MPI_Finalize();'
in
line
:
inLines
[
idx
]
=
"CODES_MPI_Finalize();"
inLines
[
idx
+
2
]
=
"exitcode = 0;"
elif
'MPI_Comm_get_attr'
in
line
:
inLines
[
idx
]
=
"//"
+
line
else
:
for
ops
in
MPI_OPS
:
if
ops
in
line
:
...
...
@@ -64,12 +90,20 @@ def adding_struct(inLines, program_name):
inLines
.
insert
(
idx
-
1
,
codes_include
)
break
for
idx
,
line
in
enumerate
(
inLines
):
if
"* Global variables *"
in
line
:
for
i
in
range
(
len
(
new_struct
)
-
1
,
-
1
,
-
1
):
inLines
.
insert
(
idx
-
1
,
new_struct
[
i
])
# adding struct at the end
for
i
in
range
(
0
,
len
(
new_struct
)):
inLines
.
append
(
new_struct
[
i
])
def
insert_if_not_exist
(
content
,
idx
,
hls
):
exist
=
False
for
i
in
range
(
idx
[
0
],
idx
[
1
]):
if
hls
[
i
]
in
content
:
exist
=
True
break
if
not
exist
:
hls
.
insert
(
idx
[
0
],
content
)
def
translate_conc_to_codes
(
filepath
,
codespath
):
# get program name
...
...
@@ -77,49 +111,73 @@ def translate_conc_to_codes(filepath, codespath):
with
open
(
filepath
,
'r'
)
as
infile
:
content
=
infile
.
read
()
# print content
inLines
=
content
.
split
(
'
\
n
'
)
eliminate_logging
(
inLines
)
eliminate_conc_init
(
inLines
)
make_static_var
(
inLines
)
manipulate_mpi_ops
(
inLines
,
program_name
)
adding_struct
(
inLines
,
program_name
)
# output program file
with
open
(
codespath
+
"src/workload/
method
s/conc-"
+
program_name
+
".c"
,
"w+"
)
as
outFile
:
with
open
(
codespath
+
"src/workload/
conceputal-skeleton-app
s/conc-"
+
program_name
+
".c"
,
"w+"
)
as
outFile
:
outFile
.
writelines
([
"%s
\
n
"
%
item
for
item
in
inLines
])
# modify interface file
program_struct
=
"extern struct codes_conceptual_bench "
+
program_name
+
"_bench;
\
n
"
program_struct_idx
=
[]
program_definition
=
" &"
+
program_name
+
"_bench,
\
n
"
program_definition_idx
=
[]
with
open
(
codespath
+
"src/workload/codes-conc-addon.c"
,
"r+"
)
as
header
:
hls
=
header
.
readlines
()
for
idx
,
line
in
enumerate
(
hls
):
if
'/* list of available benchmarks begin */'
in
line
and
program_struct
not
in
hls
[
idx
+
1
]:
hls
.
insert
(
idx
+
1
,
program_struct
)
elif
'/* default benchmarks begin */'
in
line
and
program_definition
not
in
hls
[
idx
+
1
]:
hls
.
insert
(
idx
+
1
,
program_definition
)
if
'/* list of available benchmarks begin */'
in
line
:
program_struct_idx
.
append
(
idx
+
1
)
elif
'/* list of available benchmarks end */'
in
line
:
program_struct_idx
.
append
(
idx
)
insert_if_not_exist
(
program_struct
,
program_struct_idx
,
hls
)
for
idx
,
line
in
enumerate
(
hls
):
if
'/* default benchmarks begin */'
in
line
:
program_definition_idx
.
append
(
idx
+
1
)
elif
'/* default benchmarks end */'
in
line
:
program_definition_idx
.
append
(
idx
)
insert_if_not_exist
(
program_definition
,
program_definition_idx
,
hls
)
header
.
seek
(
0
)
header
.
writelines
(
hls
)
# modify makefile
program_compile
=
"src_libcodes_la_SOURCES += src/workload/methods/conc-"
+
program_name
+
".c
\
n
"
program_compile
=
"src_libcodes_la_SOURCES += src/workload/conceputal-skeleton-apps/conc-"
+
program_name
+
".c
\
n
"
program_compile_idx
=
[]
with
open
(
codespath
+
"Makefile.am"
,
"r+"
)
as
makefile
:
mfls
=
makefile
.
readlines
()
for
idx
,
line
in
enumerate
(
mfls
):
if
"CONCEPTUAL_LIBS"
in
line
and
program_compile
not
in
mfls
[
idx
+
1
]:
mfls
.
insert
(
idx
+
1
,
program_compile
)
if
"CONCEPTUAL_LIBS"
in
line
:
program_compile_idx
.
append
(
idx
+
1
)
break
for
i
in
range
(
program_compile_idx
[
0
],
len
(
mfls
)):
if
'endif'
in
mfls
[
i
]:
program_compile_idx
.
append
(
i
)
break
insert_if_not_exist
(
program_compile
,
program_compile_idx
,
mfls
)
makefile
.
seek
(
0
)
makefile
.
writelines
(
mfls
)
if
__name__
==
"__main__"
:
if
len
(
sys
.
argv
)
!=
3
:
print
'Need 2 arguments: 1. path to files to be converted
\
t
2. path to CODES directory'
if
len
(
sys
.
argv
)
!=
4
:
print
'Need 2 arguments: 1. path to files to be converted
\
t
2. path to CODES directory
\
t
3. path to ncptl executable
'
sys
.
exit
(
1
)
os
.
chdir
(
sys
.
argv
[
1
])
for
benchfile
in
next
(
os
.
walk
(
sys
.
argv
[
1
]))[
2
]:
# for all files
translate_conc_to_codes
(
sys
.
argv
[
1
]
+
benchfile
,
sys
.
argv
[
2
])
if
benchfile
.
lower
().
endswith
(
'.ncptl'
):
cfile
=
benchfile
.
replace
(
'.ncptl'
,
'.c'
)
cfile
=
cfile
.
replace
(
"-"
,
""
)
os
.
system
(
sys
.
argv
[
3
]
+
' --backend=c_mpi --no-compile '
+
benchfile
+
' --output '
+
cfile
)
print
"adding bench file: %s"
%
cfile
translate_conc_to_codes
(
sys
.
argv
[
1
]
+
cfile
,
sys
.
argv
[
2
])
...
...
src/network-workloads/model-net-mpi-replay.c
View file @
1f9837d9
...
...
@@ -1960,7 +1960,7 @@ void nw_test_init(nw_state* s, tw_lp* lp)
strcpy
(
params_d
.
cortex_gen
,
cortex_gen
);
#endif
}
else
if
(
strcmp
(
workload_type
,
"online"
)
==
0
){
else
if
(
strcmp
(
workload_type
,
"
swm-
online"
)
==
0
){
online_comm_params
oc_params
;
...
...
@@ -1973,15 +1973,34 @@ void nw_test_init(nw_state* s, tw_lp* lp)
strcpy
(
oc_params
.
workload_name
,
file_name_of_job
[
lid
.
job
]);
}
//assert(strcmp(oc_params.workload_name, "lammps") == 0 || strcmp(oc_params.workload_name, "nekbone") == 0);
/*TODO: nprocs is different for dumpi and online workload. for
* online, it is the number of ranks to be simulated. */
oc_params
.
nprocs
=
num_traces_of_job
[
lid
.
job
];
params
=
(
char
*
)
&
oc_params
;
strcpy
(
type_name
,
"online_comm_workload"
);
strcpy
(
type_name
,
"
swm_
online_comm_workload"
);
}
else
if
(
strcmp
(
workload_type
,
"conc-online"
)
==
0
){
online_comm_params
oc_params
;
if
(
strlen
(
workload_name
)
>
0
)
{
strcpy
(
oc_params
.
workload_name
,
workload_name
);
}
else
if
(
strlen
(
workloads_conf_file
)
>
0
)
{
strcpy
(
oc_params
.
workload_name
,
file_name_of_job
[
lid
.
job
]);
}
//assert(strcmp(oc_params.workload_name, "lammps") == 0 || strcmp(oc_params.workload_name, "nekbone") == 0);
/*TODO: nprocs is different for dumpi and online workload. for
* online, it is the number of ranks to be simulated. */
oc_params
.
nprocs
=
num_traces_of_job
[
lid
.
job
];
params
=
(
char
*
)
&
oc_params
;
strcpy
(
type_name
,
"conc_online_comm_workload"
);
}
s
->
app_id
=
lid
.
job
;
s
->
local_rank
=
lid
.
rank
;
...
...
@@ -2446,8 +2465,10 @@ void nw_test_finalize(nw_state* s, tw_lp* lp)
if
(
s
->
nw_id
>=
(
tw_lpid
)
num_net_traces
)
return
;
}
if
(
strcmp
(
workload_type
,
"online"
)
==
0
)
codes_workload_finalize
(
"online_comm_workload"
,
params
,
s
->
app_id
,
s
->
local_rank
);
if
(
strcmp
(
workload_type
,
"swm-online"
)
==
0
)
codes_workload_finalize
(
"swm-online_comm_workload"
,
params
,
s
->
app_id
,
s
->
local_rank
);
if
(
strcmp
(
workload_type
,
"conc-online"
)
==
0
)
codes_workload_finalize
(
"conc-online_comm_workload"
,
params
,
s
->
app_id
,
s
->
local_rank
);
struct
msg_size_info
*
tmp_msg
=
NULL
;
struct
qlist_head
*
ent
=
NULL
;
...
...
@@ -2731,7 +2752,7 @@ int modelnet_mpi_replay(MPI_Comm comm, int* argc, char*** argv )
#endif
codes_comm_update
();
if
(
strcmp
(
workload_type
,
"dumpi"
)
!=
0
&&
strcmp
(
workload_type
,
"online"
)
!=
0
)
if
(
strcmp
(
workload_type
,
"dumpi"
)
!=
0
&&
strcmp
(
workload_type
,
"
swm-online"
)
!=
0
&&
strcmp
(
workload_type
,
"conc-
online"
)
!=
0
)
{
if
(
tw_ismaster
())
printf
(
"Usage: mpirun -np n ./modelnet-mpi-replay --sync=1/3"
...
...
src/workload/codes-conc-addon.c
View file @
1f9837d9
...
...
@@ -10,14 +10,16 @@
/* list of available benchmarks begin */
extern
struct
codes_conceptual_bench
latencyall_bench
;
extern
struct
codes_conceptual_bench
latency_bench
;
/* list of available benchmarks end */
static
struct
codes_conceptual_bench
const
*
bench_array_default
[]
=
{
/* default benchmarks begin */
&
latencyall_bench
,
&
latency_bench
,
/* default benchmark end */
/* default benchmark
s
end */
NULL
};
...
...
@@ -52,8 +54,8 @@ static void init_bench_methods(void)
int
codes_conc_bench_load
(
const
char
*
program
,
int
*
argc
,
c
onst
c
har
*
argv
[])
int
argc
,
char
*
argv
[])
{
init_bench_methods
();
...
...
@@ -73,7 +75,7 @@ int codes_conc_bench_load(
return
(
i
);
}
}
fprintf
(
stderr
,
"Error: failed to find
workload generator
%s
\n
"
,
program
);
fprintf
(
stderr
,
"Error: failed to find
benchmark program
%s
\n
"
,
program
);
return
(
-
1
);
}
...
...
@@ -94,7 +96,6 @@ void codes_conceptual_add_bench(struct codes_conceptual_bench const * bench)
bench_array_cap
*
sizeof
(
*
bench_array
));
assert
(
bench_array
);
}
bench_array
[
num_user_benchs
++
]
=
bench
;
}
...
...
src/workload/codes-workload-dump.c
View file @
1f9837d9
...
...
@@ -215,7 +215,7 @@ int main(int argc, char *argv[])
wparams
=
(
char
*
)
&
d_params
;
}
}
else
if
(
strcmp
(
type
,
"online_comm_workload"
)
==
0
){
else
if
(
strcmp
(
type
,
"
swm_online_comm_workload"
)
==
0
||
strcmp
(
type
,
"conc_
online_comm_workload"
)
==
0
){
if
(
n
==
-
1
){
fprintf
(
stderr
,
"Expected
\"
--num-ranks
\"
argument for online workload
\n
"
);
...
...
@@ -448,7 +448,7 @@ int main(int argc, char *argv[])
}
}
while
(
op
.
op_type
!=
CODES_WK_END
);
if
(
strcmp
(
type
,
"online_comm_workload"
)
==
0
)
if
(
strcmp
(
type
,
"
swm_online_comm_workload"
)
==
0
||
strcmp
(
type
,
"conc_
online_comm_workload"
)
==
0
)
{
codes_workload_finalize
(
type
,
wparams
,
0
,
i
);
}
...
...
src/workload/codes-workload.c
View file @
1f9837d9
...
...
@@ -34,9 +34,14 @@ extern struct codes_workload_method darshan_mpi_io_workload_method;
#ifdef USE_RECORDER
extern
struct
codes_workload_method
recorder_io_workload_method
;
#endif
#ifdef USE_ONLINE
extern
struct
codes_workload_method
online_comm_workload_method
;
#ifdef USE_SWM
extern
struct
codes_workload_method
swm_online_comm_workload_method
;
#endif
#ifdef USE_CONC
extern
struct
codes_workload_method
conc_online_comm_workload_method
;
#endif
extern
struct
codes_workload_method
checkpoint_workload_method
;
extern
struct
codes_workload_method
iomock_workload_method
;
...
...
@@ -58,9 +63,13 @@ static struct codes_workload_method const * method_array_default[] =
#endif
#endif
#ifdef USE_
ONLINE
&
online_comm_workload_method
,
#ifdef USE_
SWM
&
swm_
online_comm_workload_method
,
#endif
#ifdef USE_CONC
&
conc_online_comm_workload_method
,
#endif
#ifdef USE_RECORDER
&
recorder_io_workload_method
,
#endif
...
...
src/workload/
method
s/conc-latency.c
→
src/workload/
conceputal-skeleton-app
s/conc-latency.c
View file @
1f9837d9
/**********************************************************************
* This file was generated by coNCePTuaL on
Fri Aug 10 04:47:59
2018
* This file was generated by coNCePTuaL on
Thu Oct 4 23:46:17
2018
* using the c_mpi backend (C + MPI).
* Do not modify this file; modify /Users/xin/macworkspace/co
nceptual-1.5.1/example
s/latency.ncptl instead.
* Do not modify this file; modify /Users/xin/macworkspace/co
des-dev/codes/scripts/conceptual_benchmark
s/latency.ncptl instead.
*
* Entire source program
* ---------------------
...
...
@@ -319,12 +319,6 @@ double incval; /* Loop-variable increment */
}
u
;
}
LOOPBOUNDS
;
/* fill in function pointers for this method */
struct
codes_conceptual_bench
latency_bench
=
{
.
program_name
=
"latency"
,
.
conceptual_main
=
latency_main
,
};
/********************
* Global variables *
********************/
...
...
@@ -341,7 +335,7 @@ static ncptl_int var_elapsed_usecs = 0; /* Elapsed time in microseconds */
static
ncptl_int
var_total_bytes
=
0
;
/* Sum of bytes sent and bytes received */
/* Dummy variable to help mark other variables as used */
union
{
static
union
{
ncptl_int
ni
;
int
i
;
void
*
vp
;
...
...
@@ -382,8 +376,8 @@ static ncptl_int mpi_tag_ub; /* Upper bound on an MPI tag value */
static
ncptl_int
conc_mcast_tallies
[
CONC_MCAST_MPI_NUM_FUNCS
]
=
{
0
};
/* Tallies of (static) multicast implementation functions */
/* Program-specific variables */
ncptl_int
var_reps
;
/* Number of repetitions of each message size (command-line argument) */
ncptl_int
var_maxbytes
;
/* Maximum number of bytes to transmit (command-line argument) */
static
ncptl_int
var_reps
;
/* Number of repetitions of each message size (command-line argument) */
static
ncptl_int
var_maxbytes
;
/* Maximum number of bytes to transmit (command-line argument) */
/*************************
* Function declarations *
...
...
@@ -559,7 +553,7 @@ ncptl_fatal ("Internal error -- unknown incrementer");
/* Inhibit the compiler from complaining that
* certain variables are defined but not used.
* This function should never be called. */
void
conc_mark_variables_used
(
void
)
static
void
conc_mark_variables_used
(
void
)
{
conc_dummy_var
.
ni
=
var_bytes_received
;
conc_dummy_var
.
ni
=
var_msgs_received
;
...
...
@@ -614,7 +608,7 @@ NCPTL_CMDLINE arguments[] = {
};
/* Incorporate the complete coNCePTuaL source code as an array
* for use by ncptl_log_write_prologue(). */
//
* for use by ncptl_log_write_prologue(). */
char
*
sourcecode
[]
=
{
"# A ping-pong latency test written in coNCePTuaL"
,
""
,
...
...
@@ -670,8 +664,8 @@ mpi_is_running = 1;
/* Initialize the coNCePTuaL run-time library. */
if
(
!
help_only
)
ncptl_init
(
NCPTL_RUN_TIME_VERSION
,
argv
[
0
]);
(
void
)
atexit
(
conc_exit_handler
);
//
ncptl_init (NCPTL_RUN_TIME_VERSION, argv[0]);
//
(void) atexit (conc_exit_handler);
/* Initialize the communication routines needed by the c_mpi backend. */
//(void) MPI_Errhandler_create ((MPI_Handler_function *)handle_MPI_error, &mpi_error_handler);
...
...
@@ -679,7 +673,7 @@ ncptl_init (NCPTL_RUN_TIME_VERSION, argv[0]);
(
void
)
CODES_MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
physrank
);
(
void
)
CODES_MPI_Comm_size
(
MPI_COMM_WORLD
,
&
num_tasks
);
var_num_tasks
=
(
ncptl_int
)
num_tasks
;
(
void
)
MPI_Comm_get_attr
(
MPI_COMM_WORLD
,
MPI_TAG_UB
,
&
attr_val
,
&
attr_flag
);
//
(void) MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &attr_val, &attr_flag);
mpi_tag_ub
=
(
ncptl_int
)
(
attr_flag
?
*
(
int
*
)
attr_val
:
32767
);
/* Generate and broadcast a UUID. */
...
...
@@ -715,7 +709,7 @@ virtrank = ncptl_physical_to_virtual (procmap, physrank);
//
// /* Open the log file and write some standard prologue information to it. */
//logstate = ncptl_log_open (logfiletmpl, physrank);
//ncptl_log_write_prologue (logstate, argv[0], logfile_uuid, "c_mpi", "C + MPI",
//
//
ncptl_log_write_prologue (logstate, argv[0], logfile_uuid, "c_mpi", "C + MPI",
//var_num_tasks,
//arguments, sizeof(arguments)/sizeof(NCPTL_CMDLINE),
//sourcecode);
...
...
@@ -1167,8 +1161,8 @@ case 0:
if
(
!
suppress_output
)
{
uint64_t
stop_elapsed_usecs
=
ncptl_time
();
var_elapsed_usecs
=
stop_elapsed_usecs
-
starttime
;
ncptl_log_write
(
logstate
,
0
,
"Bytes"
,
NCPTL_FUNC_ONLY
,
0
.
0
,
(
double
)
thisev
->
s
.
code
.
var_msgsize
);
ncptl_log_write
(
logstate
,
1
,
"1/2 RTT (usecs)"
,
NCPTL_FUNC_MEDIAN
,
0
.
0
,
((
double
)
var_elapsed_usecs
)
/
(
2
.
0
));
//
ncptl_log_write (logstate, 0, "Bytes", NCPTL_FUNC_ONLY, 0.0, (double)thisev->s.code.var_msgsize);
//
ncptl_log_write (logstate, 1, "1/2 RTT (usecs)", NCPTL_FUNC_MEDIAN, 0.0, ((double)var_elapsed_usecs)/(2.0));
starttime
+=
ncptl_time
()
-
stop_elapsed_usecs
;
}
break
;
...
...
@@ -1222,13 +1216,13 @@ int exitcode = 0; /* Program exit code (to pass to exit()) */
//
// /* Write a standard epilogue to the log file. */
////ncptl_log_commit_data (logstate);
//ncptl_log_write_epilogue (logstate);
//
//
ncptl_log_write_epilogue (logstate);
//ncptl_log_close (logstate);
/* Inform the run-time library that it's no longer needed. */
ncptl_queue_empty
(
eventqueue
);
ncptl_free
(
eventqueue
);
ncptl_finalize
();
//
ncptl_queue_empty (eventqueue);
//
ncptl_free (eventqueue);
//
ncptl_finalize();
/* Finalization code specific to the c_mpi backend */
CODES_MPI_Finalize
();
...
...
@@ -1267,3 +1261,9 @@ conc_process_events (eventlist, 0, numevents-1, 1);
return
conc_finalize
();
}
/* fill in function pointers for this method */
struct
codes_conceptual_bench
latency_bench
=
{
.
program_name
=
"latency"
,
.
conceptual_main
=
latency_main
,
};
s
cripts/conceptual_benchmarks/latency
.c
→
s
rc/workload/conceputal-skeleton-apps/conc-latencyall
.c
View file @
1f9837d9
This diff is collapsed.
Click to expand it.
src/workload/methods/codes-
online-comm-wrkld-dev
.C
→
src/workload/methods/codes-
conc-online-comm-wrkld
.C
View file @
1f9837d9
This diff is collapsed.
Click to expand it.
src/workload/methods/codes-online-comm-wrkld.C
View file @
1f9837d9
...
...
@@ -923,10 +923,10 @@ static int comm_online_workload_finalize(const char* params, int app_id, int ran
}
extern
"C"
{
/* workload method name and function pointers for the CODES workload API */
struct
codes_workload_method
online_comm_workload_method
=
struct
codes_workload_method
swm_
online_comm_workload_method
=
{
//.method_name =
(
char
*
)
"online_comm_workload"
,
(
char
*
)
"
swm_
online_comm_workload"
,
//.codes_workload_read_config =
NULL
,
//.codes_workload_load =
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment