Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
darshan
darshan
Commits
94573e9b
Commit
94573e9b
authored
Jul 30, 2020
by
Jakob Luettgau
Browse files
Add graph compilation example with holoviews.
parent
b451c8c8
Changes
5
Expand all
Hide whitespace changes
Inline
Side-by-side
darshan-util/pydarshan/examples/99-graph-visualization.ipynb
0 → 100644
View file @
94573e9b
This diff is collapsed.
Click to expand it.
darshan-util/pydarshan/examples/darshan-graph/Makefile
0 → 100644
View file @
94573e9b
CC
=
mpicc
CLFAGS
=
-g
all
:
${CC}
-o
app_write
-DWRITE
app.c
${CC}
-o
app_read
-DREAD
app.c
${CC}
-o
app_readAB_writeC app_rw_mpiio.c
run
:
all
./run.sh
clean
:
rm
-f
${USER}
_
*
.darshan
rm
-rf
*
.o
rm
-f
app_read
rm
-f
app_write
rm
-f
app_readAB_writeC
rm
-f
A B C Z
darshan-util/pydarshan/examples/darshan-graph/app.c
0 → 100644
View file @
94573e9b
#define _GNU_SOURCE // asprintf
#include <mpi.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <time.h>
int
main
(
int
argc
,
char
**
argv
)
{
int
i
;
int
ret
=
0
;
// Get the number of processes
int
world_size
;
int
world_rank
;
char
processor_name
[
MPI_MAX_PROCESSOR_NAME
];
int
name_len
;
// Initialize the MPI environment
MPI_Init
(
NULL
,
NULL
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
world_size
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
world_rank
);
MPI_Get_processor_name
(
processor_name
,
&
name_len
);
// Print off a hello world message
printf
(
"Hello world from processor %s, rank %d"
" out of %d processors
\n
"
,
processor_name
,
world_rank
,
world_size
);
char
*
filename
=
"A"
;
if
(
argc
>
1
)
filename
=
argv
[
1
];
int
size
=
10000
;
int
opcount
=
10
;
char
*
buf
=
(
char
*
)
malloc
(
sizeof
(
char
)
*
size
);
#ifdef WRITE
for
(
i
=
0
;
i
<
size
;
i
++
)
{
buf
[
i
]
=
filename
[
0
];
}
#endif
// write to non existing file
int
fd
=
open
(
filename
,
O_RDWR
|
O_CREAT
,
S_IWUSR
|
S_IRUSR
|
S_IWGRP
|
S_IRGRP
|
S_IROTH
);
if
(
fd
!=
-
1
)
{
for
(
i
=
0
;
i
<
opcount
;
i
++
)
{
int
pos
=
(
size
/
opcount
)
*
i
;
int
len
=
size
/
opcount
;
#ifdef WRITE
printf
(
"write(from_pos=%d, len=%d)
\n
"
,
pos
,
len
);
ret
=
write
(
fd
,
buf
+
pos
,
len
);
#endif
#ifdef READ
printf
(
"read(to_pos=%d, len=%d)
\n
"
,
pos
,
len
);
ret
=
read
(
fd
,
buf
+
pos
,
len
);
#endif
}
ret
=
close
(
fd
);
}
for
(
i
=
0
;
i
<
size
;
i
++
)
{
printf
(
"%c"
,
buf
[
i
]);
}
printf
(
"
\n
"
);
MPI_Finalize
();
}
darshan-util/pydarshan/examples/darshan-graph/app_rw_mpiio.c
0 → 100644
View file @
94573e9b
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
* Copyright (C) 2019, Northwestern University
* See COPYRIGHT notice in top-level directory.
*
* This program shows an example of calling MPI_File_set_view(), which sets
* a visible file region to the calling MPI process. With such fileview set,
* all successive MPI read and write function calls will read and write only
* the visible region. In this example program, the visible region to process
* rank i starts from file offset (rank * 10 * sizeof(int)).
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <mpi.h>
#define CHECK_ERR(func) { \
if (err != MPI_SUCCESS) { \
int errorStringLen; \
char errorString[MPI_MAX_ERROR_STRING]; \
MPI_Error_string(err, errorString, &errorStringLen); \
printf("Error at line %d: calling %s (%s)\n",__LINE__, #func, errorString); \
} \
}
int
main
(
int
argc
,
char
**
argv
)
{
char
*
filename
;
int
i
,
err
,
cmode
,
rank
;
char
buf
[
10000
];
MPI_Offset
offset
;
MPI_File
fh
;
MPI_Status
status
;
MPI_Init
(
&
argc
,
&
argv
);
err
=
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
CHECK_ERR
(
MPI_Comm_rank
);
// readers
int
ret
=
0
;
int
size
=
5000
;
int
opcount
=
10
;
if
(
rank
%
2
==
1
)
{
filename
=
"A"
;
}
else
{
filename
=
"B"
;
}
int
fd
=
open
(
filename
,
O_RDWR
|
O_CREAT
,
S_IWUSR
|
S_IRUSR
|
S_IWGRP
|
S_IRGRP
|
S_IROTH
);
if
(
fd
!=
-
1
)
{
for
(
i
=
0
;
i
<
opcount
;
i
++
)
{
int
pos
=
(
size
/
opcount
)
*
i
;
int
len
=
size
/
opcount
;
printf
(
"read(to_pos=%d, len=%d)
\n
"
,
pos
,
len
);
ret
=
read
(
fd
,
buf
+
pos
,
len
);
}
ret
=
close
(
fd
);
}
// writers
filename
=
"C"
;
if
(
argc
>
1
)
filename
=
argv
[
1
];
/* open a file (create if the file does not exist) */
cmode
=
MPI_MODE_CREATE
|
MPI_MODE_RDWR
;
err
=
MPI_File_open
(
MPI_COMM_WORLD
,
filename
,
cmode
,
MPI_INFO_NULL
,
&
fh
);
CHECK_ERR
(
MPI_File_open
);
int
data_per_proc
=
100
;
/* set file offset for this calling process */
offset
=
(
MPI_Offset
)
rank
*
data_per_proc
*
sizeof
(
char
);
/* initialize write buffer contents */
char
*
abc
=
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
;
for
(
i
=
0
;
i
<
data_per_proc
;
i
++
)
{
buf
[
i
]
=
abc
[
rank
];
}
for
(
i
=
0
;
i
<
size
;
i
++
)
{
printf
(
"%c"
,
buf
[
i
]);
}
printf
(
"
\n
"
);
/* MPI_File_set_view() sets the file visible region to this process starts
* at offset. Setting etype argument to MPI_INT means this file will be
* accessed in the units of integer size. Setting filetype argument to
* MPI_INT means a contiguous 4-byte region (assuming integer size if 4
* bytes) is recursively applied to the file to form the visible region to
* the calling process, starting from its "offset" set in the offset
* argument. In this example, the "file view" of a process is the entire file
* starting from its offset.
*/
err
=
MPI_File_set_view
(
fh
,
offset
,
MPI_CHAR
,
MPI_CHAR
,
"native"
,
MPI_INFO_NULL
);
CHECK_ERR
(
MPI_File_set_view
);
/* Each process writes 3 integers to the file region visible to it.
* Note the file pointer will advance 3x4 bytes after this call.
*/
err
=
MPI_File_write_all
(
fh
,
&
buf
[
0
],
300
,
MPI_CHAR
,
&
status
);
CHECK_ERR
(
MPI_File_set_view
);
/* Each process continues to write next 7 integers to the file region
* visible to it, starting from the file pointer updated from the previous
* write call.
*/
err
=
MPI_File_write_all
(
fh
,
&
buf
[
3
],
700
,
MPI_CHAR
,
&
status
);
CHECK_ERR
(
MPI_File_set_view
);
/* close the file collectively */
err
=
MPI_File_close
(
&
fh
);
CHECK_ERR
(
MPI_File_set_view
);
MPI_Finalize
();
return
0
;
}
darshan-util/pydarshan/examples/darshan-graph/run.sh
0 → 100755
View file @
94573e9b
#!/bin/bash
# ensure the sample library libabcxyz.so is found
export
LD_LIBRARY_PATH
=
.
export
darshan_install
=
$(
cd
$(
dirname
$(
which darshan-config
))
;
cd
..
;
pwd
-L
)
export
darshan_libpath
=
$darshan_install
/lib/libdarshan.so
# enable and configure darshan to write logs into current directory
#export LD_PRELOAD=$PWD/../../darshan-runtime/lib/libdarshan.so
export
LD_PRELOAD
=
$darshan_libpath
export
DARSHAN_LOG_DIR_PATH
=
$PWD
#export DXT_ENABLE_IO_TRACE=4
mpirun
-oversubscribe
-np
1 ./app_write A
mpirun
-oversubscribe
-np
1 ./app_write B
mpirun
-oversubscribe
-np
1 ./app_write Z
mpirun
-oversubscribe
-np
1 ./app_read A
mpirun
-oversubscribe
-np
4 ./app_readAB_writeC
mpirun
-oversubscribe
-np
1 ./app_read C
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment