Commit dfb153ba authored by Huihuo Zheng's avatar Huihuo Zheng
Browse files

roll backup

parent 089925c2
......@@ -72,7 +72,20 @@ SSD = {
.mspace_left = 137438953472,
.offset = 0
};
H5Dwrite_cache_metadata H5DWMM;
H5Dwrite_cache_metadata
H5DWMM = {
.mpi.ppn = 1, // number of proc per node
.mpi.rank = 0, // rank id in H5F comm
.mpi.local_rank = 0, // local rank id in a node
.io.num_request = 0,//number of I/O request
.io.master_cond = PTHREAD_COND_INITIALIZER, // condition variable
.io.io_cond = PTHREAD_COND_INITIALIZER,
.io.request_lock = PTHREAD_MUTEX_INITIALIZER,
.io.request_list = NULL,
.io.current_request = NULL,
.io.first_request = NULL,
.ssd = &SSD
};
/*
Function for set up the local storage path and capacity.
......@@ -161,23 +174,19 @@ void *H5Dwrite_pthread_func(void *arg) {
enough for storing the buffer of the current task, it will wait for the
I/O thread to finsh all the previous tasks.
*/
hid_t H5Fcreate_cache( const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id ) {
int rc = pthread_create(&H5DWMM.io.pthread, NULL, H5Dwrite_pthread_func, NULL);
srand(time(NULL)); // Initialization, should only be called once.
setH5SSD();
H5DWMM.io.num_request = 0;
H5DWMM.io.master_cond = PTHREAD_COND_INITIALIZER;
H5DWMM.io.io_cond = PTHREAD_COND_INITIALIZER;
H5DWMM.io.request_lock = PTHREAD_MUTEX_INITIALIZER;
H5DWMM.ssd = &SSD;
MPI_Comm comm, comm_dup;
MPI_Info info;
H5Pget_fapl_mpio(fapl_id, &comm, &info);
MPI_Comm_dup(comm, &comm_dup);
MPI_Comm_dup(comm, &H5DWMM.mpi.comm);
MPI_Comm_rank(comm, &H5DWMM.mpi.rank);
MPI_Comm_split_type(comm_dup, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &H5DWMM.mpi.comm);
MPI_Comm_rank(H5DWMM.mpi.comm, &H5DWMM.mpi.local_rank);
MPI_Comm_size(H5DWMM.mpi.comm, &H5DWMM.mpi.ppn);
MPI_Comm_split_type(H5DWMM.mpi.comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &H5DWMM.mpi.node_comm);
MPI_Comm_rank(H5DWMM.mpi.node_comm, &H5DWMM.mpi.local_rank);
MPI_Comm_size(H5DWMM.mpi.node_comm, &H5DWMM.mpi.ppn);
strcpy(H5DWMM.mmap.fname, H5DWMM.ssd->path);
char rnd[255];
sprintf(rnd, "%d", rand());
......@@ -272,6 +281,7 @@ void H5WPthreadTerminate() {
and terminate the pthread, remove the files on the SSD.
*/
herr_t H5Fclose_cache( hid_t file_id ) {
// we should check whether cache is turn on for file_id. Therefore we should have a property.
H5WPthreadTerminate();
close(H5DWMM.mmap.fd);
remove(H5DWMM.mmap.fname);
......@@ -308,12 +318,6 @@ herr_t H5Sclose_cache(hid_t filespace) {
The following functions are for parallel read.
*/
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> parent of 1a31aa6... read
H5Dread_cache_metadata
H5DRMM = {
.io.master_cond = PTHREAD_COND_INITIALIZER,
......@@ -323,9 +327,6 @@ H5DRMM = {
.io.dset_cached = false,
.ssd = &SSD
};
=======
H5Dread_cache_metadata H5DRMM;
>>>>>>> a8929dd4386a5dd20edfe4b59b807130188063d7
/*
Helper function to compute the local number of samples and the offset.
*/
......@@ -468,18 +469,9 @@ void create_mmap(const char *prefix) {
MPI_Type_commit(&H5DRMM.dset.mpi_datatype);
MPI_Win_create(H5DRMM.mmap.buf, ss, H5DRMM.dset.esize, MPI_INFO_NULL, H5DRMM.mpi.comm, &H5DRMM.mpi.win);
}
<<<<<<< HEAD
<<<<<<< HEAD
=======
hid_t H5Fopen_cache( const char *name, hid_t fcpl_id, hid_t fapl_id ) {
srand(time(NULL)); // Initialization, should only be called once.
setH5SSD();
H5DRMM.io.master_cond = PTHREAD_COND_INITIALIZER;
H5DRMM.io.io_cond = PTHREAD_COND_INITIALIZER;
H5DRMM.io.request_lock = PTHREAD_MUTEX_INITIALIZER;
H5DRMM.io.batch_cached = true;
H5DRMM.io.dset_cached = false;
H5DRMM.ssd = &SSD;
MPI_Comm comm;
MPI_Info info;
H5Pget_fapl_mpio(fapl_id, &comm, &info);
......@@ -494,18 +486,12 @@ hid_t H5Fopen_cache( const char *name, hid_t fcpl_id, hid_t fapl_id ) {
H5DRMM.ssd->mspace_per_rank_left = H5DRMM.ssd->mspace_per_rank_total;
return H5Fopen(name, fcpl_id, fapl_id);
}
>>>>>>> a8929dd4386a5dd20edfe4b59b807130188063d7
=======
>>>>>>> parent of 1a31aa6... read
/*
Open a dataset, can create memory map.
*/
hid_t H5Dopen_cache(hid_t loc_id, const char *name, hid_t dapl_id) {
int rc = pthread_create(&H5DRMM.io.pthread, NULL, H5Dread_pthread_func, NULL);
MPI_Info info;
MPI_Comm comm;
MPI_Comm_dup(MPI_COMM_WORLD, &H5DRMM.mpi.comm);
srand(time(NULL)); // Initialization, should only be called once.
hid_t dset = H5Dopen(loc_id, name, dapl_id);
H5DRMM.dset.h5_datatype = H5Dget_type(dset);
......@@ -521,8 +507,6 @@ hid_t H5Dopen_cache(hid_t loc_id, const char *name, hid_t dapl_id) {
H5DRMM.dset.sample.nel = dim;
H5DRMM.dset.sample.dim = ndims-1;
H5DRMM.dset.ns_glob = gdims[0];
MPI_Comm_size(H5DRMM.mpi.comm, &H5DRMM.mpi.nproc);
MPI_Comm_rank(H5DRMM.mpi.comm, &H5DRMM.mpi.rank);
parallel_dist(gdims[0], H5DRMM.mpi.nproc, H5DRMM.mpi.rank, H5DRMM.dset.ns_loc, H5DRMM.dset.s_offset);
H5DRMM.dset.sample.size = H5DRMM.dset.esize*H5DRMM.dset.sample.nel;
H5DRMM.dset.size = H5DRMM.dset.sample.size*H5DRMM.dset.ns_loc;
......
......@@ -41,7 +41,8 @@ typedef struct _MPI_INFO {
int rank;
int local_rank;
int nproc;
MPI_Comm comm;
MPI_Comm comm; // global communicator
MPI_Comm node_comm; // node local communicator
MPI_Win win;
} MPI_INFO;
......@@ -106,6 +107,7 @@ void set_hyperslab_from_samples(int *samples, int nsample, hid_t &fspace);
void get_samples_from_filespace(hid_t fspace, vector<int> &samples, bool &contiguous);
//void create_mmap(char *path, H5Dio_mmap &f);
void parallel_dist(size_t dim, int nproc, int rank, size_t &ldim, size_t &start);
hid_t H5Fopen_cache(const char *name, hid_t fcpl_id, hid_t fapl_id);
hid_t H5Dopen_cache(hid_t loc_id, const char *name, hid_t dapl_id);
herr_t H5Dread_to_cache(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, void * buf);
herr_t H5Dread_from_cache(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, void * buf);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment