Commit e31ec441 authored by Huihuo Zheng's avatar Huihuo Zheng
Browse files

theta

parent 79a63dc0
......@@ -62,22 +62,22 @@ H5Dwrite_cache_metadata H5DWMM;
/*
Function for set up the local storage path and capacity.
*/
void setH5SSD(SSD_INFO *SSD) {
// set SSD_PATH
void setH5SSD(SSD_INFO *ssd) {
// set ssd_PATH
if (getenv("SSD_PATH")) {
strcpy(SSD->path, getenv("SSD_PATH"));
strcpy(ssd->path, getenv("SSD_PATH"));
} else {
strcpy(SSD->path, "/local/scratch/");
strcpy(ssd->path, "/local/scratch/");
}
// set SSD_SIZE;
if (getenv("SSD_SIZE")) {
SSD->mspace_total = atof(getenv("SSD_SIZE"))*1024*1024*1024;
SSD->mspace_left = SSD->mspace_total;
ssd->mspace_total = atof(getenv("SSD_SIZE"))*1024*1024*1024;
ssd->mspace_left = ssd->mspace_total;
} else {
SSD->mspace_total = 137438953472;
SSD->mspace_left = 137438953472;
ssd->mspace_total = 137438953472;
ssd->mspace_left = 137438953472;
}
SSD->offset = 0;
ssd->offset = 0;
}
......@@ -109,33 +109,34 @@ hsize_t get_buf_size(hid_t mspace, hid_t tid) {
we instead allocate a buffer directly to the memory.
*/
void *H5Dwrite_pthread_func(void *arg) {
pthread_mutex_lock(&H5DWMM.io.request_lock);
while (H5DWMM.io.num_request>=0) {
if (H5DWMM.io.num_request >0) {
thread_data_t *data = H5DWMM.io.current_request;
data->buf = mmap(NULL, data->size, PROT_READ, MAP_SHARED, H5DWMM.mmap.fd, data->offset);
H5Dwrite_cache_metadata *wmm = (H5Dwrite_cache_metadata*) arg;
pthread_mutex_lock(&wmm->io.request_lock);
while (wmm->io.num_request>=0) {
if (wmm->io.num_request >0) {
thread_data_t *data = wmm->io.current_request;
data->buf = mmap(NULL, data->size, PROT_READ, MAP_SHARED, wmm->mmap.fd, data->offset);
msync(data->buf, data->size, MS_SYNC);
#ifdef THETA
H5DWMM.mmap.tmp_buf = malloc(data->size);
memcpy(H5DWMM.mmap.tmp_buf, data->buf, data->size);
wmm->mmap.tmp_buf = malloc(data->size);
memcpy(wmm->mmap.tmp_buf, data->buf, data->size);
H5Dwrite(data->dataset_id, data->mem_type_id,
data->mem_space_id, data->file_space_id,
data->xfer_plist_id, H5DWMM.mmap.tmp_buf);
free(H5DWMM.mmap.tmp_buf);
data->xfer_plist_id, wmm->mmap.tmp_buf);
free(wmm->mmap.tmp_buf);
#else
H5Dwrite(data->dataset_id, data->mem_type_id,
data->mem_space_id, data->file_space_id,
data->xfer_plist_id, data->buf);
#endif
munmap(data->buf, data->size);
H5DWMM.io.current_request=H5DWMM.io.current_request->next;
H5DWMM.io.num_request--;
} if (H5DWMM.io.num_request == 0) {
pthread_cond_signal(&H5DWMM.io.master_cond);
pthread_cond_wait(&H5DWMM.io.io_cond, &H5DWMM.io.request_lock);
wmm->io.current_request=wmm->io.current_request->next;
wmm->io.num_request--;
} if (wmm->io.num_request == 0) {
pthread_cond_signal(&wmm->io.master_cond);
pthread_cond_wait(&wmm->io.io_cond, &wmm->io.request_lock);
}
}
pthread_mutex_unlock(&H5DWMM.io.request_lock);
pthread_mutex_unlock(&wmm->io.request_lock);
return NULL;
}
......@@ -155,17 +156,17 @@ void int2char(int a, char str[255]) {
sprintf(str, "%d", a);
}
hid_t H5Fcreate_cache( const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id ) {
H5DWMM.io.num_request = 0;
int rc = pthread_create(&H5DWMM.io.pthread, NULL, H5Dwrite_pthread_func, NULL);
H5DWMM.io.num_request = 0;
pthread_cond_init(&H5DWMM.io.io_cond, NULL);
pthread_cond_init(&H5DWMM.io.master_cond, NULL);
pthread_mutex_init(&H5DWMM.io.request_lock, NULL);
int rc = pthread_create(&H5DWMM.io.pthread, NULL, H5Dwrite_pthread_func, &H5DWMM);
srand(time(NULL)); // Initialization, should only be called once.
setH5SSD(&SSD);
H5DWMM.ssd = &SSD;
MPI_Comm comm, comm_dup;
MPI_Info info;
H5Pget_fapl_mpio(fapl_id, &comm, &info);
pthread_cond_init(&H5DWMM.io.io_cond, NULL);
pthread_cond_init(&H5DWMM.io.master_cond, NULL);
pthread_mutex_init(&H5DWMM.io.request_lock, NULL);
MPI_Comm_dup(comm, &H5DWMM.mpi.comm);
MPI_Comm_rank(comm, &H5DWMM.mpi.rank);
MPI_Comm_size(comm, &H5DWMM.mpi.nproc);
......@@ -379,44 +380,45 @@ void get_samples_from_filespace(hid_t fspace, BATCH *samples, bool *contig) {
mapped files on the local storage using MPI_Put
*/
void *H5Dread_pthread_func(void *args) {
pthread_mutex_lock(&H5DRMM.io.request_lock);
while (!H5DRMM.io.dset_cached) {
if (!H5DRMM.io.batch_cached) {
char *p_mem = (char *) H5DRMM.mmap.tmp_buf;
MPI_Win_fence(MPI_MODE_NOPRECEDE, H5DRMM.mpi.win);
int batch_size = H5DRMM.dset.batch.size;
if (H5DRMM.dset.contig_read) {
int dest = H5DRMM.dset.batch.list[0];
int src = dest/H5DRMM.dset.ns_loc;
MPI_Aint offset = (dest%H5DRMM.dset.ns_loc)*H5DRMM.dset.sample.nel;
MPI_Put(p_mem, H5DRMM.dset.sample.nel*batch_size,
H5DRMM.dset.mpi_datatype, src,
offset, H5DRMM.dset.sample.nel*batch_size,
H5DRMM.dset.mpi_datatype, H5DRMM.mpi.win);
H5Dread_cache_metadata *dmm = (H5Dread_cache_metadata*) args;
pthread_mutex_lock(&dmm->io.request_lock);
while (!dmm->io.dset_cached) {
if (!dmm->io.batch_cached) {
char *p_mem = (char *) dmm->mmap.tmp_buf;
MPI_Win_fence(MPI_MODE_NOPRECEDE, dmm->mpi.win);
int batch_size = dmm->dset.batch.size;
if (dmm->dset.contig_read) {
int dest = dmm->dset.batch.list[0];
int src = dest/dmm->dset.ns_loc;
MPI_Aint offset = (dest%dmm->dset.ns_loc)*dmm->dset.sample.nel;
MPI_Put(p_mem, dmm->dset.sample.nel*batch_size,
dmm->dset.mpi_datatype, src,
offset, dmm->dset.sample.nel*batch_size,
dmm->dset.mpi_datatype, dmm->mpi.win);
} else {
for(int i=0; i<batch_size; i++) {
int dest = H5DRMM.dset.batch.list[i];
int src = dest/H5DRMM.dset.ns_loc;
MPI_Aint offset = (dest%H5DRMM.dset.ns_loc)*H5DRMM.dset.sample.nel;
MPI_Put(&p_mem[i*H5DRMM.dset.sample.size],
H5DRMM.dset.sample.nel,
H5DRMM.dset.mpi_datatype, src,
offset, H5DRMM.dset.sample.nel,
H5DRMM.dset.mpi_datatype, H5DRMM.mpi.win);
int dest = dmm->dset.batch.list[i];
int src = dest/dmm->dset.ns_loc;
MPI_Aint offset = (dest%dmm->dset.ns_loc)*dmm->dset.sample.nel;
MPI_Put(&p_mem[i*dmm->dset.sample.size],
dmm->dset.sample.nel,
dmm->dset.mpi_datatype, src,
offset, dmm->dset.sample.nel,
dmm->dset.mpi_datatype, dmm->mpi.win);
}
}
MPI_Win_fence(MPI_MODE_NOSUCCEED, H5DRMM.mpi.win);
if (io_node()==H5DRMM.mpi.rank && debug_level()>2) printf("PTHREAD DONE\n");
H5DRMM.io.batch_cached = true;
H5DRMM.dset.ns_cached += H5DRMM.dset.batch.size;
if (H5DRMM.dset.ns_cached>=H5DRMM.dset.ns_loc)
H5DRMM.io.dset_cached=true;
MPI_Win_fence(MPI_MODE_NOSUCCEED, dmm->mpi.win);
if (io_node()==dmm->mpi.rank && debug_level()>2) printf("PTHREAD DONE\n");
dmm->io.batch_cached = true;
dmm->dset.ns_cached += dmm->dset.batch.size;
if (dmm->dset.ns_cached>=dmm->dset.ns_loc)
dmm->io.dset_cached=true;
} else {
pthread_cond_signal(&H5DRMM.io.master_cond);
pthread_cond_wait(&H5DRMM.io.io_cond, &H5DRMM.io.request_lock);
pthread_cond_signal(&dmm->io.master_cond);
pthread_cond_wait(&dmm->io.io_cond, &dmm->io.request_lock);
}
}
pthread_mutex_unlock(&H5DRMM.io.request_lock);
pthread_mutex_unlock(&dmm->io.request_lock);
return NULL;
}
......@@ -424,7 +426,6 @@ void *H5Dread_pthread_func(void *args) {
Create memory map files on the local storage attach them to MPI_Win
*/
void create_mmap_win(const char *prefix) {
setH5SSD(&SSD);
hsize_t ss = (H5DRMM.dset.size/PAGESIZE+1)*PAGESIZE;
if (strcmp("MEMORY", getenv("SSD_PATH"))!=0) {
strcpy(H5DRMM.mmap.fname, H5DRMM.ssd->path);
......@@ -442,7 +443,6 @@ void create_mmap_win(const char *prefix) {
fsync(fh);
close(fh);
H5DRMM.mmap.fd = open(H5DRMM.mmap.fname, O_RDWR);
H5DRMM.mmap.buf = mmap(NULL, ss, PROT_READ | PROT_WRITE, MAP_SHARED, H5DRMM.mmap.fd, 0);
msync(H5DRMM.mmap.buf, ss, MS_SYNC);
} else {
......@@ -461,6 +461,7 @@ void create_mmap_win(const char *prefix) {
hid_t H5Fopen_cache( const char *name, hid_t fcpl_id, hid_t fapl_id ) {
srand(time(NULL)); // Initialization, should only be called once.
setH5SSD(&SSD);
H5DRMM.ssd = &SSD;
MPI_Comm comm;
MPI_Info info;
H5Pget_fapl_mpio(fapl_id, &comm, &info);
......@@ -480,13 +481,15 @@ hid_t H5Fopen_cache( const char *name, hid_t fcpl_id, hid_t fapl_id ) {
*/
hid_t H5Dopen_cache(hid_t loc_id, const char *name, hid_t dapl_id) {
hid_t dset = H5Dopen(loc_id, name, dapl_id);
int rc = pthread_create(&H5DRMM.io.pthread, NULL, H5Dread_pthread_func, NULL);
srand(time(NULL)); // Initialization, should only be called once.
pthread_cond_init(&H5DRMM.io.io_cond, NULL);
pthread_cond_init(&H5DRMM.io.master_cond, NULL);
pthread_mutex_init(&H5DRMM.io.request_lock, NULL);
H5DRMM.io.batch_cached = false;
H5DRMM.io.dset_cached =false;
pthread_mutex_lock(&H5DRMM.io.request_lock);
H5DRMM.io.batch_cached = true;
H5DRMM.io.dset_cached = false;
pthread_cond_signal(&H5DRMM.io.io_cond);
pthread_mutex_unlock(&H5DRMM.io.request_lock);
srand(time(NULL)); // Initialization, should only be called once.
H5DRMM.dset.h5_datatype = H5Dget_type(dset);
H5DRMM.dset.esize = H5Tget_size(H5DRMM.dset.h5_datatype);
hid_t fspace = H5Dget_space(dset);
......@@ -507,6 +510,7 @@ hid_t H5Dopen_cache(hid_t loc_id, const char *name, hid_t dapl_id) {
double t0 = MPI_Wtime();
create_mmap_win(name);
double t1 = MPI_Wtime() - t0;
int rc = pthread_create(&H5DRMM.io.pthread, NULL, H5Dread_pthread_func, (void *) &H5DRMM);
if (io_node() == H5DRMM.mpi.rank && debug_level() > 1)
printf("Time for creating memory map files: %f seconds\n", t1);
free(gdims);
......@@ -551,21 +555,24 @@ herr_t H5Dread_to_cache(hid_t dataset_id, hid_t mem_type_id,
herr_t H5Dread_cache(hid_t dataset_id, hid_t mem_type_id,
hid_t mem_space_id, hid_t file_space_id,
hid_t xfer_plist_id, void * dat) {
if (H5DRMM.dset.ns_cached>=H5DRMM.dset.ns_loc)
if (H5DRMM.dset.ns_cached>=H5DRMM.dset.ns_loc) {
return H5Dread_from_cache(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, dat);
else
} else {
return H5Dread_to_cache(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, dat);
}
}
herr_t H5Dread_from_cache(hid_t dataset_id, hid_t mem_type_id,
hid_t mem_space_id, hid_t file_space_id,
hid_t xfer_plist_id, void * dat) {
if (io_node()==H5DRMM.mpi.rank && debug_level()>1) {
printf("Reading data from cache (H5Dread_from_cache)\n");
}
}
bool contig = false;
BATCH b;
H5RPthreadWait();
get_samples_from_filespace(file_space_id, &b, &contig);
MPI_Win_fence(MPI_MODE_NOPUT | MPI_MODE_NOPRECEDE, H5DRMM.mpi.win);
char *p_mem = (char *) dat;
int batch_size = b.size;
......
......@@ -11,7 +11,7 @@ include make.inc
all: test_read_cache test_write_cache prepare_dataset
test_read_cache: test_read_cache.o H5Dio_cache.o ../utils/debug.o ../utils/profiling.o
$(CXX) $(CFLAGS) -o $@ test_read_cache.o ../utils/debug.o ../utils/profiling.o $(HDF5_LIB) -L$(HDF5_ROOT)/../vol/ -lh5passthrough_vol
$(CXX) $(CFLAGS) -o $@ test_read_cache.o ../utils/debug.o ../utils/profiling.o $(HDF5_LIB) H5Dio_cache.o #-L$(HDF5_ROOT)/../vol/ -lh5passthrough_vol
test_vol: test_vol.o ../utils/debug.o
......
......@@ -128,12 +128,11 @@ int main(int argc, char **argv) {
hid_t dset;
tt.start_clock("H5Dopen");
if (cache) {
dset = H5Dopen(fd, dataset, H5P_DEFAULT);
dset = H5Dopen_cache(fd, dataset, H5P_DEFAULT);
} else {
dset = H5Dopen(fd, dataset, H5P_DEFAULT);
}
tt.stop_clock("H5Dopen");
hid_t fspace = H5Dget_space(dset);
int ndims = H5Sget_simple_extent_ndims(fspace);
......
This diff is collapsed.
../hdf5/H5Dio_cache.c
\ No newline at end of file
/*
This is the header files for node local storage incorporated HDF5
*/
#ifndef H5DIO_CACHE_H_
#define H5DIO_CACHE_H_
#include "hdf5.h"
#include "mpi.h"
#ifndef MAXDIM
#define MAXDIM 32
#endif
// The meta data for I/O thread to perform parallel write
typedef struct _thread_data_t {
// we will use the link structure in C to build the list of I/O tasks
char fname[255];
hid_t dataset_id;
hid_t mem_type_id;
hid_t mem_space_id;
hid_t file_space_id;
hid_t xfer_plist_id;
int id;
hid_t offset; // offset in memory mapped file on SSD
hsize_t size;
void *buf;
struct _thread_data_t *next;
} thread_data_t;
// SSD related meta data
typedef struct _SSD_INFO {
double mspace_total;
double mspace_left;
double mspace_per_rank_total;
double mspace_per_rank_left;
char path[255];
hsize_t offset;
} SSD_INFO;
// MPI infos
typedef struct _MPI_INFO {
int rank;
int nproc;
int local_rank; // rank id in the local communicator
int ppn; // number or processors in the
MPI_Comm comm; // global communicator
MPI_Comm node_comm; // node local communicator
MPI_Win win;
} MPI_INFO;
// I/O threads
typedef struct _IO_THREAD {
pthread_cond_t master_cond;
pthread_cond_t io_cond;
pthread_mutex_t request_lock;
int num_request; // for parallel write
thread_data_t *request_list, *current_request, *first_request; // task queue
bool batch_cached; // for parallel read, -- whether the batch data is cached to SSD or not
bool dset_cached; // whether the entire dataset is cached to SSD or not.
pthread_t pthread;
} IO_THREAD;
// Memory mapped files
typedef struct _MMAP {
// for write
int fd; // file handle for write
char fname[255];// full path of the memory mapped file
void *buf; // pointer that map the file to the memory
void *tmp_buf; // temporally buffer, used for parallel read: copy the read buffer, return the H5Dread_to_cache function, the back ground thread write the data to the SSD.
} MMAP;
// Dataset
typedef struct _SAMPLE {
size_t dim; // the number of dimension
size_t size; // the size of the sample in bytes.
size_t nel; // the number of elements per sample,
} SAMPLE;
typedef struct _BATCH {
int *list;
int size;
} BATCH;
typedef struct _DSET {
SAMPLE sample;
size_t ns_loc; // number of samples per rank
size_t ns_glob; // total number of samples
size_t s_offset; // offset
hsize_t size; // the size of the entire dataset in bytes.
BATCH batch; // batch data to read
int ns_cached;
bool contig_read; // whether the batch of data to read is contigues or not.
MPI_Datatype mpi_datatype; // the constructed mpi dataset
hid_t h5_datatype; // hdf5 dataset
size_t esize; // the size of an element in bytes.
} DSET;
typedef struct _H5Dwrite_cache_metadata {
MMAP mmap;
MPI_INFO mpi;
IO_THREAD io;
SSD_INFO *ssd;
} H5Dwrite_cache_metadata;
typedef struct _H5Dread_cache_metadata {
MMAP mmap;
MPI_INFO mpi;
IO_THREAD io;
DSET dset;
SSD_INFO *ssd;
} H5Dread_cache_metadata;
/**************************************
* Function APIs for parallel write *
**************************************/
// Create HDF5 file: create memory mapped file on the SSD
#if __cplusplus
extern "C" {
#endif
hid_t H5Fcreate_cache( const char *name, unsigned flags,
hid_t fcpl_id, hid_t fapl_id );
// Close HDF5 file: clean up the memory mapped file
herr_t H5Fclose_cache( hid_t file_id );
// The main program write the dataset, and the I/O thread perform the data migration
herr_t H5Dwrite_cache(hid_t dset_id, hid_t mem_type_id,
hid_t mem_space_id, hid_t file_space_id,
hid_t dxpl_id, const void *buf);
// close the dataset, property list, etc: check whether all the tasks have been finished or not.
herr_t H5Dclose_cache( hid_t id);
herr_t H5Sclose_cache( hid_t id);
herr_t H5Pclose_cache( hid_t id);
/****************************************
* Function APIs for Parallel read *
****************************************/
//
hid_t H5Fopen_cache(const char *name, hid_t fcpl_id, hid_t fapl_id);
// Open the dataset, create memory mapped file
hid_t H5Dopen_cache(hid_t loc_id, const char *name, hid_t dapl_id);
// Reading dataset (one batch), and then the I/O thread write them to the SSDs
herr_t H5Dread_to_cache(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, void * buf);
herr_t H5Dread_cache(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, void * buf);
// Reading dataset (one batch) from the SSDs
herr_t H5Dread_from_cache(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, void * buf);
// close the dataset
herr_t H5Dclose_cache_read(hid_t dset);
/****************************************
* I/O thread sync functions *
****************************************/
// waiting for the write (or read) I/O thread to finish the work
void H5WPthreadWait();
void H5RPthreadWait();
// terminate (join) the write (or read) I/O thread after finish all the tasks
void H5WPthreadTerminate();
void H5RPthreadTerminate();
/***************************************
* Other utils functions *
***************************************/
herr_t H5DRMMF_remap(); // remap the memory mapped file to remove the cache effect
// set hyperslab selection given the sample list
void set_hyperslab_from_samples(int *samples, int nsample, hid_t *fspace);
// get the list of the samples from the filespace
void get_samples_from_filespace(hid_t fspace, BATCH *samples, bool *contiguous);
// get the buffer size from the mspace and type ids.
hsize_t get_buf_size(hid_t mspace, hid_t tid);
void parallel_dist(size_t dim, int nproc, int rank, size_t *ldim, size_t *start);
void setH5SSD();
#if __cplusplus
}
#endif
#endif //H5Dio_cache.h
../hdf5/H5Dio_cache.h
\ No newline at end of file
......@@ -86,7 +86,7 @@ typedef struct H5VL_pass_through_ext_t {
hid_t under_vol_id; /* ID for underlying VOL connector */
void *under_object; /* Info object for underlying VOL connector */
H5Dread_cache_metadata *H5DRMM;
H5Dwrite_cache_metadata *H5DRMM;
H5Dwrite_cache_metadata *H5DWMM;
bool read_cache;
bool write_cache;
} H5VL_pass_through_ext_t;
......@@ -1180,7 +1180,8 @@ H5VL_pass_through_ext_dataset_open(void *obj, const H5VL_loc_params_t *loc_param
} /* end if */
else
dset = NULL;
int rc = pthread_create(&H5DRMM.io.pthread, NULL, H5Dread_pthread_func, NULL);
int rc = pthread_create(&dset->H5DRMM.io.pthread, NULL, H5Dread_pthread_func, NULL);
printf("dataset open: %d %d %d\n", H5DRMM.mpi.rank, H5DRMM.mpi.nproc, H5DRMM.mpi.nproc);
srand(time(NULL)); // Initialization, should only be called once.
/*
......@@ -1709,6 +1710,9 @@ H5VL_pass_through_ext_file_create(const char *name, unsigned flags, hid_t fcpl_i
MPI_Comm comm, comm_dup;
MPI_Info mpi_info;
H5Pget_fapl_mpio(fapl_id, &comm, &mpi_info);
pthread_cond_init(&file->H5DWMM.io.io_cond, NULL);
pthread_cond_init(&file->H5DWMM.io.master_cond, NULL);
pthread_mutex_init(&file->H5DWMM.io.request_lock, NULL);
MPI_Comm_dup(comm, &file->H5DWMM.mpi.comm);
MPI_Comm_rank(comm, &file->H5DWMM.mpi.rank);
MPI_Comm_size(comm, &file->H5DWMM.mpi.nproc);
......@@ -1722,7 +1726,7 @@ H5VL_pass_through_ext_file_create(const char *name, unsigned flags, hid_t fcpl_i
strcat(file->H5DWMM.mmap.fname, "-");
sprintf(rnd, "%d", file->H5DWMM.mpi.rank);
strcat(file->H5DWMM.mmap.fname, rnd);
H5DWMM.io.request_list = (thread_data_t*) malloc(sizeof(thread_data_t));
file->H5DWMM.io.request_list = (thread_data_t*) malloc(sizeof(thread_data_t));
pthread_mutex_lock(&file->H5DWMM.io.request_lock);
file->H5DWMM.io.request_list->id = 0;
file->H5DWMM.io.current_request = file->H5DWMM.io.request_list;
......@@ -1730,7 +1734,7 @@ H5VL_pass_through_ext_file_create(const char *name, unsigned flags, hid_t fcpl_i
pthread_mutex_unlock(&file->H5DWMM.io.request_lock);
file->H5DWMM.ssd->mspace_per_rank_total = file->H5DWMM.ssd->mspace_total / file->H5DWMM.mpi.ppn;
file->H5DWMM.ssd->mspace_per_rank_left = file->H5DWMM.ssd->mspace_per_rank_total;
file->H5DWMM.mmap.fd = open(H5DWMM.mmap.fname, O_RDWR | O_CREAT | O_TRUNC, 0644);
file->H5DWMM.mmap.fd = open(file->H5DWMM.mmap.fname, O_RDWR | O_CREAT | O_TRUNC, 0644);
file->write_cache = true;
return (void *)file;
} /* end H5VL_pass_through_ext_file_create() */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment