Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ExaHDF5
node_local_storage
Commits
b2968518
Commit
b2968518
authored
Apr 23, 2020
by
Huihuo Zheng
Browse files
changed to MPI_THREAD_MULTIPLE
parent
d5a75058
Changes
6
Hide whitespace changes
Inline
Side-by-side
hdf5_write/H5SSD.c
View file @
b2968518
...
...
@@ -8,6 +8,9 @@
We create a pthread for doing I/O work using a first-in-first-out
framework.
Notice that in order for this to work, one has to set
* MPI_Init_thread(..., ..., MPI_THREAD_MULTIPLE, ...)
Huihuo Zheng <huihuo.zheng@anl.gov>
1/24/2020
*/
...
...
hdf5_write/H5SSD.h
View file @
b2968518
...
...
@@ -25,11 +25,10 @@ typedef struct _thread_data_t {
hid_t
file_space_id
;
hid_t
xfer_plist_id
;
int
id
;
hid_t
offset
;
hid_t
offset
;
// offset in the files on SSD
hsize_t
size
;
void
*
buf
;
struct
_thread_data_t
*
next
;
}
thread_data_t
;
void
check_pthread_data
(
thread_data_t
*
pt
);
...
...
hdf5_write/Makefile
View file @
b2968518
...
...
@@ -2,13 +2,13 @@
#!/bin/sh
include
make.inc
%.o
:
%.c
$(CXX)
$(CFLAGS)
-o
$@
-c
$<
%.o
:
%.cpp
$(CXX)
$(CFLAGS)
-o
$@
-c
$<
all
:
parallel_hdf5
parallel_hdf5
:
H5SSD.o parallel_hdf5.o ../utils/debug.o
$(CXX)
$(CFLAGS)
-I
./
-o
parallel_hdf5.x parallel_hdf5.o H5SSD.o ../utils/debug.o
$(LIBS)
parallel_hdf5.o
:
parallel_hdf5.cpp
$(CXX)
$(CFLAGS)
-o
parallel_hdf5.o
-c
parallel_hdf5.cpp
H5SSD.o
:
H5SSD.c
$(CXX)
$(CFLAGS)
-o
H5SSD.o
-c
H5SSD.c
clean
:
rm
-rf
*
.x
*
.h5
*
.hdf5
*
.o
hdf5_write/README.md
View file @
b2968518
...
...
@@ -5,6 +5,8 @@ Huihuo Zheng @ Argonne Leadership Computing Facility
This folder contains the source code for system-aware
optimization of HDF5 custom collective VFD.
Importance: in order to use this feature, one has to use MPI_THREAD_MULTIPLE.
*
H5SSD.c, H5SSD.h - files for HDF5 functions incorparating
local storage.
...
...
hdf5_write/parallel_hdf5.cpp
View file @
b2968518
...
...
@@ -76,8 +76,10 @@ int main(int argc, char **argv) {
MPI_Info
info
=
MPI_INFO_NULL
;
int
rank
,
nproc
,
provided
;
MPI_Init_thread
(
&
argc
,
&
argv
,
MPI_THREAD_MULTIPLE
,
&
provided
);
MPI_Comm_size
(
comm
,
&
nproc
);
MPI_Comm_rank
(
comm
,
&
rank
);
if
(
rank
==
0
)
cout
<<
"provided: "
<<
provided
<<
endl
;
Timing
tt
(
rank
==
io_node
());
//printf(" MPI: I am rank %d of %d \n", rank, nproc);
// find local array dimension and offset;
...
...
@@ -137,10 +139,11 @@ int main(int argc, char **argv) {
hsize_t
count
[
2
]
=
{
1
,
1
};
tt
.
start_clock
(
"Init_array"
);
for
(
int
j
=
0
;
j
<
ldims
[
0
]
*
ldims
[
1
];
j
++
)
data
[
j
]
=
0
;
tt
.
stop_clock
(
"Init_array"
);
for
(
int
i
=
0
;
i
<
niter
;
i
++
)
{
for
(
int
j
=
0
;
j
<
ldims
[
0
]
*
ldims
[
1
];
j
++
)
data
[
j
]
=
i
;
offset
[
0
]
=
i
*
gdims
[
0
]
+
rank
*
ldims
[
0
];
// select hyperslab
H5Sselect_hyperslab
(
filespace
,
H5S_SELECT_SET
,
offset
,
NULL
,
ldims
,
count
);
...
...
hdf5_write/run.py
View file @
b2968518
...
...
@@ -23,8 +23,8 @@ if hostname.find("theta")!=-1:
parser
.
add_argument
(
"--num_nodes"
,
default
=
int
(
os
.
environ
[
'COBALT_JOBSIZE'
]),
type
=
int
)
parser
.
add_argument
(
"--ppn"
,
default
=
32
,
type
=
int
)
else
:
#
root="/Users/zhenghh/Documents/Research/ExaHDF5/io_benchmarks/ssd_cache/hdf5/"
root
=
"/gpfs/mira-home/hzheng/io_benchmarks/node_local_storage/hdf5/"
root
=
"/Users/zhenghh/Documents/Research/ExaHDF5/io_benchmarks/ssd_cache/hdf5
_write
/"
#
root="/gpfs/mira-home/hzheng/io_benchmarks/node_local_storage/hdf5/"
parser
.
add_argument
(
"--num_nodes"
,
default
=
1
,
type
=
int
)
parser
.
add_argument
(
"--SSD"
,
default
=
"SSD"
)
parser
.
add_argument
(
"--ppn"
,
default
=
2
,
type
=
int
)
...
...
@@ -71,7 +71,7 @@ if hostname.find("theta")!=-1:
print
(
"cd %s; aprun -n %s -N %s %s --dim %s %s --scratch %s --niter %s %s |& tee %s; cd - "
%
(
args
.
directory
,
args
.
num_nodes
*
args
.
ppn
,
args
.
ppn
,
exe
,
d1
,
d2
,
args
.
lustre
,
args
.
niter
,
extra_opts
,
root
+
args
.
directory
+
"/"
+
args
.
output
))
os
.
system
(
"cd %s; aprun -n %s -N %s %s --dim %s %s --scratch %s --niter %s %s |& tee %s; cd - "
%
(
args
.
directory
,
args
.
num_nodes
*
args
.
ppn
,
args
.
ppn
,
exe
,
d1
,
d2
,
args
.
lustre
,
args
.
niter
,
extra_opts
,
root
+
args
.
directory
+
"/"
+
args
.
output
))
else
:
cmd
=
"cd %s; %s mpirun -np %s %s --lustre %s --niter %s %s | tee %s; cd -"
%
(
args
.
directory
,
env
,
args
.
ppn
,
exe
,
args
.
lustre
,
args
.
niter
,
extra_opts
,
args
.
output
)
cmd
=
"cd %s; %s mpirun -np %s %s --lustre %s --niter %s %s
--dim %s %s
| tee %s; cd -"
%
(
args
.
directory
,
env
,
args
.
ppn
,
exe
,
args
.
lustre
,
args
.
niter
,
extra_opts
,
d1
,
d2
,
args
.
output
)
print
(
cmd
)
os
.
system
(
cmd
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment