Commit 2e912409 authored by Shane Snyder's avatar Shane Snyder

add benchmarking hooks for generating logs

parent d7c2e81c
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -63,6 +63,9 @@
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the home page for this package. */
#undef PACKAGE_URL
/* Define to the version of this package. */
#undef PACKAGE_VERSION
......
......@@ -93,7 +93,7 @@ extern char* darshan_path_exclusions[]; /* defined in lib/darshan-core.c */
* environment, allowing the module to store I/O characterization data.
* 'funcs' is a pointer to a structure containing each of the function
* pointers required by darshan-core to shut down the module. The function
* returns the following integers passed in as pointers: 'my_rank' is the
* returns the following integers passed in as pointers: 'rank' is the
* MPI rank of the calling process, 'mod_mem_limit' is the maximum amount
* of memory the module may use, and 'sys_mem_alignment' is the configured
* memory alignment value Darshan was configured with.
......@@ -101,7 +101,7 @@ extern char* darshan_path_exclusions[]; /* defined in lib/darshan-core.c */
void darshan_core_register_module(
darshan_module_id mod_id,
struct darshan_module_funcs *funcs,
int *my_rank,
int *rank,
int *mod_mem_limit,
int *sys_mem_alignment);
......
......@@ -1507,12 +1507,91 @@ static void darshan_core_cleanup(struct darshan_core_runtime* core)
return;
}
/* crude benchmarking hook into darshan-core to benchmark Darshan
* shutdown overhead using a variety of application I/O workloads
*/
extern void darshan_posix_shutdown_bench_setup();
extern void darshan_mpiio_shutdown_bench_setup();
void darshan_shutdown_bench(int argc, char **argv)
{
/* clear out existing core runtime structure */
if(darshan_core)
{
darshan_core_cleanup(darshan_core);
darshan_core = NULL;
}
/***********************************************************/
/* restart darshan */
darshan_core_initialize(argc, argv);
darshan_posix_shutdown_bench_setup(1);
darshan_mpiio_shutdown_bench_setup(1);
if(my_rank == 0)
printf("# 1 unique file per proc\n");
DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
sleep(1);
/***********************************************************/
/* restart darshan */
darshan_core_initialize(argc, argv);
darshan_posix_shutdown_bench_setup(2);
darshan_mpiio_shutdown_bench_setup(2);
if(my_rank == 0)
printf("# 1 shared file per proc\n");
DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
sleep(1);
/***********************************************************/
/* restart darshan */
darshan_core_initialize(argc, argv);
darshan_posix_shutdown_bench_setup(3);
darshan_mpiio_shutdown_bench_setup(3);
if(my_rank == 0)
printf("# 1024 unique files per proc\n");
DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
sleep(1);
/***********************************************************/
/* restart darshan */
darshan_core_initialize(argc, argv);
darshan_posix_shutdown_bench_setup(4);
darshan_mpiio_shutdown_bench_setup(4);
if(my_rank == 0)
printf("# 1024 shared files per proc\n");
DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
sleep(1);
/***********************************************************/
return;
}
/* ********************************************************* */
void darshan_core_register_module(
darshan_module_id mod_id,
struct darshan_module_funcs *funcs,
int *my_rank,
int *rank,
int *mod_mem_limit,
int *sys_mem_alignment)
{
......@@ -1554,7 +1633,8 @@ void darshan_core_register_module(
darshan_core->log_header.mod_ver[mod_id] = darshan_module_versions[mod_id];
/* get the calling process's rank */
DARSHAN_MPI_CALL(PMPI_Comm_rank)(MPI_COMM_WORLD, my_rank);
if(rank)
*rank = my_rank;
/* set the maximum amount of memory this module can use */
mod_mem_str = getenv(DARSHAN_MOD_MEM_OVERRIDE);
......
......@@ -129,6 +129,33 @@ static void mpiio_shutdown(void);
#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)
#define MPIIO_RECORD_OPEN(__ret, __path, __fh, __comm, __mode, __info, __tm1, __tm2) do { \
struct mpiio_file_runtime* file; \
char *exclude; \
int tmp_index = 0; \
int comm_size; \
if(__ret != MPI_SUCCESS) break; \
while((exclude=darshan_path_exclusions[tmp_index])) { \
if(!(strncmp(exclude, __path, strlen(exclude)))) \
break; \
tmp_index++; \
} \
if(exclude) break; \
file = mpiio_file_by_name_setfh(__path, __fh); \
if(!file) break; \
file->file_record->counters[MPIIO_MODE] = __mode; \
DARSHAN_MPI_CALL(PMPI_Comm_size)(__comm, &comm_size); \
if(comm_size == 1) \
file->file_record->counters[MPIIO_INDEP_OPENS] += 1; \
else \
file->file_record->counters[MPIIO_COLL_OPENS] += 1; \
if(__info != MPI_INFO_NULL) \
file->file_record->counters[MPIIO_HINTS] += 1; \
if(file->file_record->fcounters[MPIIO_F_OPEN_TIMESTAMP] == 0) \
file->file_record->fcounters[MPIIO_F_OPEN_TIMESTAMP] = __tm1; \
DARSHAN_TIMER_INC_NO_OVERLAP(file->file_record->fcounters[MPIIO_F_META_TIME], __tm1, __tm2, file->last_meta_end); \
} while(0)
#define MPIIO_RECORD_READ(__ret, __fh, __count, __datatype, __counter, __tm1, __tm2) do { \
struct mpiio_file_runtime* file; \
int size = 0; \
......@@ -190,20 +217,13 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_F
#endif
{
int ret;
struct mpiio_file_runtime* file;
char* tmp;
int comm_size;
double tm1, tm2;
tm1 = darshan_core_wtime();
ret = DARSHAN_MPI_CALL(PMPI_File_open)(comm, filename, amode, info, fh);
tm2 = darshan_core_wtime();
if(ret == MPI_SUCCESS)
{
MPIIO_LOCK();
mpiio_runtime_initialize();
/* use ROMIO approach to strip prefix if present */
/* strip off prefix if there is one, but only skip prefixes
* if they are greater than length one to allow for windows
......@@ -214,33 +234,10 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_F
filename = tmp + 1;
}
file = mpiio_file_by_name_setfh(filename, (*fh));
if(file)
{
file->file_record->counters[MPIIO_MODE] = amode;
DARSHAN_MPI_CALL(PMPI_Comm_size)(comm, &comm_size);
if(comm_size == 1)
{
file->file_record->counters[MPIIO_INDEP_OPENS] += 1;
}
else
{
file->file_record->counters[MPIIO_COLL_OPENS] += 1;
}
if(info != MPI_INFO_NULL)
{
file->file_record->counters[MPIIO_HINTS] += 1;
}
if(file->file_record->fcounters[MPIIO_F_OPEN_TIMESTAMP] == 0)
file->file_record->fcounters[MPIIO_F_OPEN_TIMESTAMP] = tm1;
DARSHAN_TIMER_INC_NO_OVERLAP(
file->file_record->fcounters[MPIIO_F_META_TIME],
tm1, tm2, file->last_meta_end);
}
MPIIO_LOCK();
mpiio_runtime_initialize();
MPIIO_RECORD_OPEN(ret, filename, (*fh), comm, amode, info, tm1, tm2);
MPIIO_UNLOCK();
}
return(ret);
}
......@@ -1286,6 +1283,83 @@ static void mpiio_shared_record_variance(MPI_Comm mod_comm,
return;
}
/* mpiio module shutdown benchmark routine */
void darshan_mpiio_shutdown_bench_setup(int test_case)
{
char filepath[256];
MPI_File *fh_array;
int64_t *size_array;
int i;
intptr_t j;
if(mpiio_runtime)
mpiio_shutdown();
mpiio_runtime_initialize();
srand(my_rank);
fh_array = malloc(1024 * sizeof(MPI_File));
size_array = malloc(DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT * sizeof(int64_t));
assert(fh_array && size_array);
for(j = 0; j < 1024; j++)
fh_array[j] = (MPI_File)j;
for(i = 0; i < DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT; i++)
size_array[i] = rand();
switch(test_case)
{
case 1: /* single file-per-process */
snprintf(filepath, 256, "fpp-0_rank-%d", my_rank);
MPIIO_RECORD_OPEN(MPI_SUCCESS, filepath, fh_array[0], MPI_COMM_SELF,
2, MPI_INFO_NULL, 0, 1);
MPIIO_RECORD_WRITE(MPI_SUCCESS, fh_array[0], size_array[0], MPI_BYTE,
MPIIO_INDEP_WRITES, 1, 2);
break;
case 2: /* single shared file */
snprintf(filepath, 256, "shared-0");
MPIIO_RECORD_OPEN(MPI_SUCCESS, filepath, fh_array[0], MPI_COMM_WORLD,
2, MPI_INFO_NULL, 0, 1);
MPIIO_RECORD_WRITE(MPI_SUCCESS, fh_array[0], size_array[0], MPI_BYTE,
MPIIO_COLL_WRITES, 1, 2);
break;
case 3: /* 1024 unique files per proc */
for(i = 0; i < 1024; i++)
{
snprintf(filepath, 256, "fpp-%d_rank-%d", i , my_rank);
MPIIO_RECORD_OPEN(MPI_SUCCESS, filepath, fh_array[i], MPI_COMM_SELF,
2, MPI_INFO_NULL, 0, 1);
MPIIO_RECORD_WRITE(MPI_SUCCESS, fh_array[i],
size_array[i % DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT],
MPI_BYTE, MPIIO_INDEP_WRITES, 1, 2);
}
break;
case 4: /* 1024 shared files per proc */
for(i = 0; i < 1024; i++)
{
snprintf(filepath, 256, "shared-%d", i);
MPIIO_RECORD_OPEN(MPI_SUCCESS, filepath, fh_array[i], MPI_COMM_WORLD,
2, MPI_INFO_NULL, 0, 1);
MPIIO_RECORD_WRITE(MPI_SUCCESS, fh_array[i],
size_array[i % DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT],
MPI_BYTE, MPIIO_COLL_WRITES, 1, 2);
}
break;
default:
fprintf(stderr, "Error: invalid Darshan benchmark test case.\n");
return;
}
return;
}
/**************************************************************************
* Functions exported by MPI-IO module for coordinating with darshan-core *
**************************************************************************/
......@@ -1452,6 +1526,7 @@ static void mpiio_shutdown()
free(mpiio_runtime->file_record_array);
free(mpiio_runtime);
mpiio_runtime = NULL;
instrumentation_disabled = 0;
return;
}
......
......@@ -2017,6 +2017,78 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
return;
}
/* posix module shutdown benchmark routine */
void darshan_posix_shutdown_bench_setup(int test_case)
{
char filepath[256];
int *fd_array;
int64_t *size_array;
int i;
if(posix_runtime)
posix_shutdown();
posix_runtime_initialize();
srand(my_rank);
fd_array = malloc(1024 * sizeof(int));
size_array = malloc(DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT * sizeof(int64_t));
assert(fd_array && size_array);
for(i = 0; i < 1024; i++)
fd_array[i] = i;
for(i = 0; i < DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT; i++)
size_array[i] = rand();
switch(test_case)
{
case 1: /* single file-per-process */
snprintf(filepath, 256, "fpp-0_rank-%d", my_rank);
POSIX_RECORD_OPEN(fd_array[0], filepath, 777, 0, 0, 1);
POSIX_RECORD_WRITE(size_array[0], fd_array[0], 0, 0, 1, 0, 1, 2);
break;
case 2: /* single shared file */
snprintf(filepath, 256, "shared-0");
POSIX_RECORD_OPEN(fd_array[0], filepath, 777, 0, 0, 1);
POSIX_RECORD_WRITE(size_array[0], fd_array[0], 0, 0, 1, 0, 1, 2);
break;
case 3: /* 1024 unique files per proc */
for(i = 0; i < 1024; i++)
{
snprintf(filepath, 256, "fpp-%d_rank-%d", i , my_rank);
POSIX_RECORD_OPEN(fd_array[i], filepath, 777, 0, 0, 1);
POSIX_RECORD_WRITE(size_array[i % DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT],
fd_array[i], 0, 0, 1, 0, 1, 2);
}
break;
case 4: /* 1024 shared files per proc */
for(i = 0; i < 1024; i++)
{
snprintf(filepath, 256, "shared-%d", i);
POSIX_RECORD_OPEN(fd_array[i], filepath, 777, 0, 0, 1);
POSIX_RECORD_WRITE(size_array[i % DARSHAN_COMMON_VAL_MAX_RUNTIME_COUNT],
fd_array[i], 0, 0, 1, 0, 1, 2);
}
break;
default:
fprintf(stderr, "Error: invalid Darshan benchmark test case.\n");
return;
}
free(fd_array);
free(size_array);
return;
}
/************************************************************************
* Functions exported by this module for coordinating with darshan-core *
************************************************************************/
......@@ -2189,6 +2261,7 @@ static void posix_shutdown()
free(posix_runtime->file_record_array);
free(posix_runtime);
posix_runtime = NULL;
instrumentation_disabled = 0;
return;
}
......
/*
* (C) 2009 by Argonne National Laboratory.
* (C) 2015 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
......@@ -20,16 +20,11 @@
* benchmarking hooks for us. This should only be used by special-purpose
* benchmarking tools.
*/
void darshan_shutdown_bench(int argc, char** argv, int rank, int nprocs);
void darshan_shutdown_bench(int argc, char** argv);
int main(int argc, char **argv)
{
int nprocs;
int mynod;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if(argc != 1)
{
......@@ -38,7 +33,7 @@ int main(int argc, char **argv)
return(-1);
}
darshan_shutdown_bench(argc, argv, mynod, nprocs);
darshan_shutdown_bench(argc, argv);
MPI_Finalize();
return(0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment