Commit 2c6c98e0 authored by fisaila's avatar fisaila

Added counters for MPI activity. Following steps worked:

1. Added the counters in darshan-log-format.h
2. Added the names of the counters in darshan-util/darshan-logutils.c
3. Added the new profiled function declarations (e.g., PMPI_Sent) in darshan-runtime/darshan-dynamic.h
 - DARSHAN_EXTERN_DECL(...)
4. Added the new profiled function declarations (e.g., PMPI_Sent) in darshan-runtime/lib/darshan-mpi-init-finalize.c 
 - DARSHAN_FORWARD_DECL(...)
 - MAP_OR_FAIL(...)
5. Updated the reducer in darshan-runtime/lib/darshan-mpi-io.c in the function: 
 static void darshan_file_reduce(void* infile_v, 
    void* inoutfile_v, int *len, 
    MPI_Datatype *datatype);




git-svn-id: https://svn.mcs.anl.gov/repos/darshan/branches/darshan-florin-extensions-addcounters@827 3b7491f3-a168-0410-bf4b-c445ed680a29
parent 20fd9625
...@@ -217,6 +217,63 @@ enum darshan_indices ...@@ -217,6 +217,63 @@ enum darshan_indices
CP_SLOWEST_RANK, CP_SLOWEST_RANK,
CP_SLOWEST_RANK_BYTES, CP_SLOWEST_RANK_BYTES,
CP_MPI_SENDS,
CP_MPI_RECVS,
CP_MPI_ISENDS,
CP_MPI_IRECVS,
CP_MPI_WAITS,
CP_MPI_TESTS,
CP_MPI_WAITANYS,
CP_MPI_TESTANYS,
CP_MPI_WAITALLS,
CP_MPI_TESTALLS,
CP_MPI_BARRIERS,
CP_MPI_BCASTS,
CP_MPI_GATHERS,
CP_MPI_GATHERVS,
CP_MPI_SCATHERS,
CP_MPI_SCATHERVS,
CP_MPI_ALLGATHERS,
CP_MPI_ALLGATHERVS,
CP_MPI_ALLTOALLS,
CP_MPI_ALLTOALLVS,
CP_MPI_REDUCES,
CP_MPI_ALLREDUCES,
CP_BYTES_MPI_SEND,
CP_BYTES_MPI_RECV,
CP_BYTES_MPI_ISEND,
CP_BYTES_MPI_IRECV,
CP_BYTES_MPI_BCAST,
CP_BYTES_MPI_GATHER,
CP_BYTES_MPI_GATHERV,
CP_BYTES_MPI_SCATHER,
CP_BYTES_MPI_SCATHERV,
CP_BYTES_MPI_ALLGATHER,
CP_BYTES_MPI_ALLGATHERV,
CP_BYTES_MPI_ALLTOALL,
CP_BYTES_MPI_ALLTOALLV,
CP_BYTES_MPI_REDUCE,
CP_BYTES_MPI_ALLREDUCE,
CP_AVG_MEM_DTYPE_SIZE,
CP_AVG_MEM_DTYPE_EXTENT,
CP_AVG_MEM_DTYPE_BLOCKS,
CP_AVG_FILE_DTYPE_EXTENT,
CP_AVG_FILE_DTYPE_BLOCKS,
CP_MAX_MEM_DTYPE_SIZE,
CP_MAX_MEM_DTYPE_EXTENT,
CP_MAX_MEM_DTYPE_BLOCKS,
CP_MAX_FILE_OFFSET,
CP_MAX_FILE_DTYPE_EXTENT,
CP_MAX_FILE_DTYPE_BLOCKS,
CP_MIN_MEM_DTYPE_SIZE,
CP_MIN_MEM_DTYPE_EXTENT,
CP_MIN_MEM_DTYPE_BLOCKS,
CP_MIN_FILE_OFFSET,
CP_MIN_FILE_DTYPE_EXTENT,
CP_MIN_FILE_DTYPE_BLOCKS,
CP_NUM_INDICES, CP_NUM_INDICES,
}; };
......
This diff is collapsed.
This diff is collapsed.
...@@ -8,7 +8,7 @@ CFLAGS = -O0 -g -Wall -Wl,-rpath,../../darshan-runtime/lib ...@@ -8,7 +8,7 @@ CFLAGS = -O0 -g -Wall -Wl,-rpath,../../darshan-runtime/lib
CFLAGS += -I../../darshan-runtime/lib -I. -I../../darshan-runtime -I../../darshan-florin-extensions CFLAGS += -I../../darshan-runtime/lib -I. -I../../darshan-runtime -I../../darshan-florin-extensions
#LDFLAGS = -Wl,-wrap,MPI_Init #LDFLAGS = -Wl,-wrap,MPI_Init
LDFLAGS = -L../../darshan-runtime/lib -ldarshan LDFLAGS = -Wl,--no-as-needed -ldl -L../../darshan-runtime/lib -ldarshan
#-L/home/fisaila/software/darshan-florin-extensions/darshan-runtime/lib -ldarshan-posix #-rdynamic -L/home/fisaila/software/darshan-florin-extensions/darshan-runtime/lib -ldarshan #-L/home/fisaila/software/darshan-florin-extensions/darshan-runtime/lib -ldarshan-posix #-rdynamic -L/home/fisaila/software/darshan-florin-extensions/darshan-runtime/lib -ldarshan
...@@ -18,7 +18,7 @@ LIBS = ...@@ -18,7 +18,7 @@ LIBS =
SRCS = $(patsubst %.o,%.c,$(OBJS)) SRCS = $(patsubst %.o,%.c,$(OBJS))
PRGS = writef PRGS = writef_noepoch writef counters coll_perf
all: $(PRGS) all: $(PRGS)
......
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
/* The file name is taken as a command-line argument. */
/* Measures the I/O bandwidth for writing/reading a 3D
block-distributed array to a file corresponding to the global array
in row-major (C) order.
Note that the file access pattern is noncontiguous.
Array size 128^3. For other array sizes, change array_of_gsizes below.*/
int main(int argc, char **argv)
{
MPI_Datatype newtype;
int i, ndims, array_of_gsizes[3], array_of_distribs[3];
int order, nprocs, len, *buf, bufcount, mynod;
int array_of_dargs[3], array_of_psizes[3];
MPI_File fh;
MPI_Status status;
double stim, write_tim, new_write_tim, write_bw;
double read_tim, new_read_tim, read_bw;
char *filename;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
/* process 0 takes the file name as a command-line argument and
broadcasts it to other processes */
if (!mynod) {
i = 1;
while ((i < argc) && strcmp("-fname", *argv)) {
i++;
argv++;
}
if (i >= argc) {
fprintf(stderr, "\n*# Usage: coll_perf -fname filename\n\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
argv++;
len = strlen(*argv);
filename = (char *) malloc(len+1);
strcpy(filename, *argv);
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
else {
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
filename = (char *) malloc(len+1);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
ndims = 3;
order = MPI_ORDER_C;
array_of_gsizes[0] = 128;
array_of_gsizes[1] = 128;
array_of_gsizes[2] = 128;
array_of_distribs[0] = MPI_DISTRIBUTE_BLOCK;
array_of_distribs[1] = MPI_DISTRIBUTE_BLOCK;
array_of_distribs[2] = MPI_DISTRIBUTE_BLOCK;
array_of_dargs[0] = MPI_DISTRIBUTE_DFLT_DARG;
array_of_dargs[1] = MPI_DISTRIBUTE_DFLT_DARG;
array_of_dargs[2] = MPI_DISTRIBUTE_DFLT_DARG;
for (i=0; i<ndims; i++) array_of_psizes[i] = 0;
MPI_Dims_create(nprocs, ndims, array_of_psizes);
MPI_Type_create_darray(nprocs, mynod, ndims, array_of_gsizes,
array_of_distribs, array_of_dargs,
array_of_psizes, order, MPI_INT, &newtype);
MPI_Type_commit(&newtype);
MPI_Type_size(newtype, &bufcount);
bufcount = bufcount/sizeof(int);
buf = (int *) malloc(bufcount * sizeof(int));
/* to eliminate paging effects, do the operations once but don't time
them */
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, buf, bufcount, MPI_INT, &status);
MPI_File_seek(fh, 0, MPI_SEEK_SET);
MPI_File_read_all(fh, buf, bufcount, MPI_INT, &status);
MPI_File_close(&fh);
MPI_Barrier(MPI_COMM_WORLD);
/* now time write_all */
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", MPI_INFO_NULL);
MPI_Barrier(MPI_COMM_WORLD);
stim = MPI_Wtime();
darshan_start_epoch();
MPI_File_write_all(fh, buf, bufcount, MPI_INT, &status);
darshan_end_epoch();
write_tim = MPI_Wtime() - stim;
MPI_File_close(&fh);
MPI_Allreduce(&write_tim, &new_write_tim, 1, MPI_DOUBLE, MPI_MAX,
MPI_COMM_WORLD);
if (mynod == 0) {
write_bw = (array_of_gsizes[0]*array_of_gsizes[1]*array_of_gsizes[2]*sizeof(int))/(new_write_tim*1024.0*1024.0);
fprintf(stderr, "Global array size %d x %d x %d integers\n", array_of_gsizes[0], array_of_gsizes[1], array_of_gsizes[2]);
fprintf(stderr, "Collective write time = %f sec, Collective write bandwidth = %f Mbytes/sec\n", new_write_tim, write_bw);
}
MPI_Barrier(MPI_COMM_WORLD);
/* now time read_all */
MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_INT, newtype, "native", MPI_INFO_NULL);
MPI_Barrier(MPI_COMM_WORLD);
stim = MPI_Wtime();
darshan_start_epoch();
MPI_File_read_all(fh, buf, bufcount, MPI_INT, &status);
darshan_end_epoch();
read_tim = MPI_Wtime() - stim;
MPI_File_close(&fh);
MPI_Allreduce(&read_tim, &new_read_tim, 1, MPI_DOUBLE, MPI_MAX,
MPI_COMM_WORLD);
if (mynod == 0) {
read_bw = (array_of_gsizes[0]*array_of_gsizes[1]*array_of_gsizes[2]*sizeof(int))/(new_read_tim*1024.0*1024.0);
fprintf(stderr, "Collective read time = %f sec, Collective read bandwidth = %f Mbytes/sec\n", new_read_tim, read_bw);
}
MPI_Type_free(&newtype);
free(buf);
free(filename);
MPI_Finalize();
return 0;
}
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <darshan-ext.h>
static void handle_error(int errcode, char *str)
{
char msg[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(errcode, msg, &resultlen);
fprintf(stderr, "%s: %s\n", str, msg);
MPI_Abort(MPI_COMM_WORLD, 1);
}
/* The file name is taken as a command-line argument. */
int main(int argc, char **argv)
{
int i, errcode;
int nprocs, len, *buf, bufcount, rank;
MPI_File fh,fh2;
MPI_Status status;
double stim, write_tim, new_write_tim, write_bw;
char *filename;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
/* process 0 takes the file name as a command-line argument and
broadcasts it to other processes */
if (!rank) {
i = 1;
while ((i < argc) && strcmp("-fname", *argv)) {
i++;
argv++;
}
if (i >= argc) {
fprintf(stderr, "\n*# Usage: coll_perf -fname filename\n\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
argv++;
len = strlen(*argv);
filename = (char *) malloc(len+1);
strcpy(filename, *argv);
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
else {
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
filename = (char *) malloc(len+1);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
bufcount = 128*128*128;
buf = (int *) malloc(bufcount * sizeof(int));
darshan_start_epoch();
errcode = MPI_File_open(MPI_COMM_SELF, filename,
MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);
darshan_end_epoch();
if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_open(1)");
MPI_Barrier(MPI_COMM_WORLD);
stim = MPI_Wtime();
darshan_start_epoch();
MPI_File_write_all(fh, buf, bufcount, MPI_INT, &status);
errcode = MPI_File_open(MPI_COMM_SELF, "abc",
MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh2);
MPI_File_write_all(fh2, buf, bufcount, MPI_INT, &status);
darshan_end_epoch();
write_tim = MPI_Wtime() - stim;
darshan_start_epoch();
MPI_File_write_all(fh, buf, bufcount, MPI_INT, &status);
darshan_end_epoch();
MPI_File_close(&fh);
MPI_File_close(&fh2);
MPI_Allreduce(&write_tim, &new_write_tim, 1, MPI_DOUBLE, MPI_MAX,
MPI_COMM_WORLD);
if (rank == 0) {
write_bw = (bufcount*sizeof(int))/(new_write_tim*1024.0*1024.0);
fprintf(stderr, "Each of %d processes writes buf size=%ld\n",nprocs, bufcount*sizeof(int));
fprintf(stderr, "Collective write time = %f sec, Collective write bandwidth = %f Mbytes/sec\n", new_write_tim, write_bw);
}
free(filename);
free(buf);
MPI_Finalize();
return 0;
}
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
#include "mpi.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <darshan-ext.h>
static void handle_error(int errcode, char *str)
{
char msg[MPI_MAX_ERROR_STRING];
int resultlen;
MPI_Error_string(errcode, msg, &resultlen);
fprintf(stderr, "%s: %s\n", str, msg);
MPI_Abort(MPI_COMM_WORLD, 1);
}
/* The file name is taken as a command-line argument. */
int main(int argc, char **argv)
{
int i, errcode;
int nprocs, len, *buf, bufcount, rank;
MPI_File fh,fh2;
MPI_Status status;
double stim, write_tim, new_write_tim, write_bw;
char *filename;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
/* process 0 takes the file name as a command-line argument and
broadcasts it to other processes */
if (!rank) {
i = 1;
while ((i < argc) && strcmp("-fname", *argv)) {
i++;
argv++;
}
if (i >= argc) {
fprintf(stderr, "\n*# Usage: coll_perf -fname filename\n\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
argv++;
len = strlen(*argv);
filename = (char *) malloc(len+1);
strcpy(filename, *argv);
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
else {
MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
filename = (char *) malloc(len+1);
MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD);
}
bufcount = 128*128*128;
buf = (int *) malloc(bufcount * sizeof(int));
errcode = MPI_File_open(MPI_COMM_SELF, filename,
MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);
if (errcode != MPI_SUCCESS) handle_error(errcode, "MPI_File_open(1)");
MPI_Barrier(MPI_COMM_WORLD);
stim = MPI_Wtime();
MPI_File_write_all(fh, buf, bufcount, MPI_INT, &status);
errcode = MPI_File_open(MPI_COMM_SELF, "abc",
MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh2);
MPI_File_write_all(fh2, buf, bufcount, MPI_INT, &status);
write_tim = MPI_Wtime() - stim;
MPI_File_write_all(fh, buf, bufcount, MPI_INT, &status);
MPI_File_close(&fh);
MPI_File_close(&fh2);
MPI_Allreduce(&write_tim, &new_write_tim, 1, MPI_DOUBLE, MPI_MAX,
MPI_COMM_WORLD);
if (rank == 0) {
write_bw = (bufcount*sizeof(int))/(new_write_tim*1024.0*1024.0);
fprintf(stderr, "Each of %d processes writes buf size=%ld\n",nprocs, bufcount*sizeof(int));
fprintf(stderr, "Collective write time = %f sec, Collective write bandwidth = %f Mbytes/sec\n", new_write_tim, write_bw);
}
free(filename);
free(buf);
MPI_Finalize();
return 0;
}
...@@ -163,6 +163,61 @@ char *darshan_names[] = { ...@@ -163,6 +163,61 @@ char *darshan_names[] = {
"CP_FASTEST_RANK_BYTES", "CP_FASTEST_RANK_BYTES",
"CP_SLOWEST_RANK", "CP_SLOWEST_RANK",
"CP_SLOWEST_RANK_BYTES", "CP_SLOWEST_RANK_BYTES",
"CP_MPI_SENDS",
"CP_MPI_RECVS",
"CP_MPI_ISENDS",
"CP_MPI_IRECVS",
"CP_MPI_WAITS",
"CP_MPI_TESTS",
"CP_MPI_WAITANYS",
"CP_MPI_TESTANYS",
"CP_MPI_WAITALLS",
"CP_MPI_TESTALLS",
"CP_MPI_BARRIERS",
"CP_MPI_BCASTS",
"CP_MPI_GATHERS",
"CP_MPI_GATHERVS",
"CP_MPI_SCATHERS",
"CP_MPI_SCATHERVS",
"CP_MPI_ALLGATHERS",
"CP_MPI_ALLGATHERVS",
"CP_MPI_ALLTOALLS",
"CP_MPI_ALLTOALLVS",
"CP_MPI_REDUCES",
"CP_MPI_ALLREDUCES",
"CP_BYTES_MPI_SEND",
"CP_BYTES_MPI_RECV",
"CP_BYTES_MPI_ISEND",
"CP_BYTES_MPI_IRECV",
"CP_BYTES_MPI_BCAST",
"CP_BYTES_MPI_GATHER",
"CP_BYTES_MPI_GATHERV",
"CP_BYTES_MPI_SCATHER",
"CP_BYTES_MPI_SCATHERV",
"CP_BYTES_MPI_ALLGATHER",
"CP_BYTES_MPI_ALLGATHERV",
"CP_BYTES_MPI_ALLTOALL",
"CP_BYTES_MPI_ALLTOALLV",
"CP_BYTES_MPI_REDUCE",
"CP_BYTES_MPI_ALLREDUCE",
"CP_AVG_MEM_DTYPE_SIZE",
"CP_AVG_MEM_DTYPE_EXTENT",
"CP_AVG_MEM_DTYPE_BLOCKS",
"CP_AVG_FILE_DTYPE_EXTENT",
"CP_AVG_FILE_DTYPE_BLOCKS",
"CP_MAX_MEM_DTYPE_SIZE",
"CP_MAX_MEM_DTYPE_EXTENT",
"CP_MAX_MEM_DTYPE_BLOCKS",
"CP_MAX_FILE_OFFSET",
"CP_MAX_FILE_DTYPE_EXTENT",
"CP_MAX_FILE_DTYPE_BLOCKS",
"CP_MIN_MEM_DTYPE_SIZE",
"CP_MIN_MEM_DTYPE_EXTENT",
"CP_MIN_MEM_DTYPE_BLOCKS",
"CP_MIN_FILE_OFFSET",
"CP_MIN_FILE_DTYPE_EXTENT",
"CP_MIN_FILE_DTYPE_BLOCKS",
"CP_NUM_INDICES" "CP_NUM_INDICES"
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment