Commit b2968518 authored by Huihuo Zheng's avatar Huihuo Zheng
Browse files

changed to MPI_THREAD_MULTIPLE

parent d5a75058
......@@ -8,6 +8,9 @@
We create a pthread for doing I/O work using a first-in-first-out
framework.
Notice that in order for this to work, one has to set
* MPI_Init_thread(..., ..., MPI_THREAD_MULTIPLE, ...)
Huihuo Zheng <huihuo.zheng@anl.gov>
1/24/2020
*/
......
......@@ -25,11 +25,10 @@ typedef struct _thread_data_t {
hid_t file_space_id;
hid_t xfer_plist_id;
int id;
hid_t offset;
hid_t offset; // offset in the files on SSD
hsize_t size;
void *buf;
struct _thread_data_t *next;
} thread_data_t;
void check_pthread_data(thread_data_t *pt);
......
......@@ -2,13 +2,13 @@
#!/bin/sh
include make.inc
%.o: %.c
$(CXX) $(CFLAGS) -o $@ -c $<
%.o: %.cpp
$(CXX) $(CFLAGS) -o $@ -c $<
all: parallel_hdf5
parallel_hdf5: H5SSD.o parallel_hdf5.o ../utils/debug.o
$(CXX) $(CFLAGS) -I./ -o parallel_hdf5.x parallel_hdf5.o H5SSD.o ../utils/debug.o $(LIBS)
parallel_hdf5.o : parallel_hdf5.cpp
$(CXX) $(CFLAGS) -o parallel_hdf5.o -c parallel_hdf5.cpp
H5SSD.o : H5SSD.c
$(CXX) $(CFLAGS) -o H5SSD.o -c H5SSD.c
clean:
rm -rf *.x *.h5 *.hdf5 *.o
......@@ -5,6 +5,8 @@ Huihuo Zheng @ Argonne Leadership Computing Facility
This folder contains the source code for system-aware
optimization of HDF5 custom collective VFD.
Importance: in order to use this feature, one has to use MPI_THREAD_MULTIPLE.
* H5SSD.c, H5SSD.h - files for HDF5 functions incorparating
local storage.
......
......@@ -76,8 +76,10 @@ int main(int argc, char **argv) {
MPI_Info info = MPI_INFO_NULL;
int rank, nproc, provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
MPI_Comm_size(comm, &nproc);
MPI_Comm_rank(comm, &rank);
if (rank==0) cout << "provided: " << provided << endl;
Timing tt(rank==io_node());
//printf(" MPI: I am rank %d of %d \n", rank, nproc);
// find local array dimension and offset;
......@@ -137,10 +139,11 @@ int main(int argc, char **argv) {
hsize_t count[2] = {1, 1};
tt.start_clock("Init_array");
for(int j=0; j<ldims[0]*ldims[1]; j++)
data[j] = 0;
tt.stop_clock("Init_array");
for (int i=0; i<niter; i++) {
for(int j=0; j<ldims[0]*ldims[1]; j++)
data[j] = i;
offset[0]= i*gdims[0] + rank*ldims[0];
// select hyperslab
H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, ldims, count);
......
......@@ -23,8 +23,8 @@ if hostname.find("theta")!=-1:
parser.add_argument("--num_nodes", default=int(os.environ['COBALT_JOBSIZE']), type=int)
parser.add_argument("--ppn", default=32, type=int)
else:
# root="/Users/zhenghh/Documents/Research/ExaHDF5/io_benchmarks/ssd_cache/hdf5/"
root="/gpfs/mira-home/hzheng/io_benchmarks/node_local_storage/hdf5/"
root="/Users/zhenghh/Documents/Research/ExaHDF5/io_benchmarks/ssd_cache/hdf5_write/"
# root="/gpfs/mira-home/hzheng/io_benchmarks/node_local_storage/hdf5/"
parser.add_argument("--num_nodes", default=1, type=int)
parser.add_argument("--SSD", default="SSD")
parser.add_argument("--ppn", default=2, type=int)
......@@ -71,7 +71,7 @@ if hostname.find("theta")!=-1:
print("cd %s; aprun -n %s -N %s %s --dim %s %s --scratch %s --niter %s %s |& tee %s; cd - " %(args.directory, args.num_nodes*args.ppn, args.ppn, exe, d1, d2, args.lustre, args.niter, extra_opts, root + args.directory + "/"+args.output))
os.system("cd %s; aprun -n %s -N %s %s --dim %s %s --scratch %s --niter %s %s |& tee %s; cd - " %(args.directory, args.num_nodes*args.ppn, args.ppn, exe, d1, d2, args.lustre, args.niter, extra_opts, root + args.directory + "/"+args.output))
else:
cmd ="cd %s; %s mpirun -np %s %s --lustre %s --niter %s %s | tee %s; cd -" %(args.directory, env, args.ppn, exe, args.lustre, args.niter, extra_opts, args.output)
cmd ="cd %s; %s mpirun -np %s %s --lustre %s --niter %s %s --dim %s %s | tee %s; cd -" %(args.directory, env, args.ppn, exe, args.lustre, args.niter, extra_opts, d1, d2, args.output)
print(cmd)
os.system(cmd)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment