Commit 98c94b3a authored by Huihuo Zheng's avatar Huihuo Zheng
Browse files

revert

parents aab17378 a8929dd4
......@@ -72,20 +72,7 @@ SSD = {
.mspace_left = 137438953472,
.offset = 0
};
H5Dwrite_cache_metadata
H5DWMM = {
.mpi.ppn = 1, // number of proc per node
.mpi.rank = 0, // rank id in H5F comm
.mpi.local_rank = 0, // local rank id in a node
.io.num_request = 0,//number of I/O request
.io.master_cond = PTHREAD_COND_INITIALIZER, // condition variable
.io.io_cond = PTHREAD_COND_INITIALIZER,
.io.request_lock = PTHREAD_MUTEX_INITIALIZER,
.io.request_list = NULL,
.io.current_request = NULL,
.io.first_request = NULL,
.ssd = &SSD
};
H5Dwrite_cache_metadata H5DWMM;
/*
Function for set up the local storage path and capacity.
......@@ -178,6 +165,11 @@ hid_t H5Fcreate_cache( const char *name, unsigned flags, hid_t fcpl_id, hid_t fa
int rc = pthread_create(&H5DWMM.io.pthread, NULL, H5Dwrite_pthread_func, NULL);
srand(time(NULL)); // Initialization, should only be called once.
setH5SSD();
H5DWMM.io.num_request = 0;
H5DWMM.io.master_cond = PTHREAD_COND_INITIALIZER;
H5DWMM.io.io_cond = PTHREAD_COND_INITIALIZER;
H5DWMM.io.request_lock = PTHREAD_MUTEX_INITIALIZER;
H5DWMM.ssd = &SSD;
MPI_Comm comm, comm_dup;
MPI_Info info;
H5Pget_fapl_mpio(fapl_id, &comm, &info);
......@@ -316,6 +308,7 @@ herr_t H5Sclose_cache(hid_t filespace) {
The following functions are for parallel read.
*/
<<<<<<< HEAD
H5Dread_cache_metadata
......@@ -327,6 +320,9 @@ H5DRMM = {
.io.dset_cached = false,
.ssd = &SSD
};
=======
H5Dread_cache_metadata H5DRMM;
>>>>>>> a8929dd4386a5dd20edfe4b59b807130188063d7
/*
Helper function to compute the local number of samples and the offset.
*/
......@@ -469,6 +465,32 @@ void create_mmap(const char *prefix) {
MPI_Type_commit(&H5DRMM.dset.mpi_datatype);
MPI_Win_create(H5DRMM.mmap.buf, ss, H5DRMM.dset.esize, MPI_INFO_NULL, H5DRMM.mpi.comm, &H5DRMM.mpi.win);
}
<<<<<<< HEAD
=======
hid_t H5Fopen_cache( const char *name, hid_t fcpl_id, hid_t fapl_id ) {
srand(time(NULL)); // Initialization, should only be called once.
setH5SSD();
H5DRMM.io.master_cond = PTHREAD_COND_INITIALIZER;
H5DRMM.io.io_cond = PTHREAD_COND_INITIALIZER;
H5DRMM.io.request_lock = PTHREAD_MUTEX_INITIALIZER;
H5DRMM.io.batch_cached = true;
H5DRMM.io.dset_cached = false;
H5DRMM.ssd = &SSD;
MPI_Comm comm;
MPI_Info info;
H5Pget_fapl_mpio(fapl_id, &comm, &info);
MPI_Comm_dup(comm, &H5DRMM.mpi.comm);
MPI_Comm_rank(comm, &H5DRMM.mpi.rank);
MPI_Comm_size(comm, &H5DRMM.mpi.nproc);
MPI_Comm_split_type(H5DRMM.mpi.comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &H5DRMM.mpi.node_comm);
MPI_Comm_rank(H5DRMM.mpi.node_comm, &H5DRMM.mpi.local_rank);
MPI_Comm_size(H5DRMM.mpi.node_comm, &H5DRMM.mpi.ppn);
H5DRMM.ssd->mspace_per_rank_total = H5DRMM.ssd->mspace_total / H5DRMM.mpi.ppn;
H5DRMM.ssd->mspace_per_rank_left = H5DRMM.ssd->mspace_per_rank_total;
return H5Fopen(name, fcpl_id, fapl_id);
}
>>>>>>> a8929dd4386a5dd20edfe4b59b807130188063d7
/*
Open a dataset, can create memory map.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment