Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
codes
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
38
Issues
38
List
Boards
Labels
Milestones
Merge Requests
8
Merge Requests
8
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
codes
codes
Commits
c9a39af0
Commit
c9a39af0
authored
Sep 18, 2013
by
Philip Carns
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
split up test program by LP types for clarity
parent
9e55e435
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
412 additions
and
198 deletions
+412
-198
tests/Makefile.subdir
tests/Makefile.subdir
+9
-4
tests/workload/codes-workload-test-cn-lp.c
tests/workload/codes-workload-test-cn-lp.c
+39
-194
tests/workload/codes-workload-test-cn-lp.h
tests/workload/codes-workload-test-cn-lp.h
+28
-0
tests/workload/codes-workload-test-svr-lp.c
tests/workload/codes-workload-test-svr-lp.c
+212
-0
tests/workload/codes-workload-test-svr-lp.h
tests/workload/codes-workload-test-svr-lp.h
+27
-0
tests/workload/codes-workload-test.c
tests/workload/codes-workload-test.c
+97
-0
No files found.
tests/Makefile.subdir
View file @
c9a39af0
check_PROGRAMS
+=
tests/lp-io-test
\
tests/codes-workload-test
tests/
workload/
codes-workload-test
TESTS
+=
tests/lp-io-test.sh
EXTRA_DIST
+=
tests/lp-io-test.sh
...
...
@@ -11,6 +11,11 @@ tests_lp_io_test_LDADD = $(testlib) ${ROSS_LIBS}
tests_lp_io_test_LDFLAGS
=
${ROSS_LDFLAGS}
tests_lp_io_test_SOURCES
=
tests/lp-io-test.c
tests_codes_workload_test_LDADD
=
$(testlib)
${ROSS_LIBS}
tests_codes_workload_test_LDFLAGS
=
${ROSS_LDFLAGS}
tests_codes_workload_test_SOURCES
=
tests/codes-workload-test.c
tests_workload_codes_workload_test_LDADD
=
$(testlib)
${ROSS_LIBS}
tests_workload_codes_workload_test_LDFLAGS
=
${ROSS_LDFLAGS}
tests_workload_codes_workload_test_SOURCES
=
\
tests/workload/codes-workload-test.c
\
tests/workload/codes-workload-test-svr-lp.c
\
tests/workload/codes-workload-test-svr-lp.h
\
tests/workload/codes-workload-test-cn-lp.c
\
tests/workload/codes-workload-test-cn-lp.h
tests/
codes-workload-test
.c
→
tests/
workload/codes-workload-test-cn-lp
.c
View file @
c9a39af0
...
...
@@ -4,12 +4,8 @@
*
*/
/* SUMMARY:
*
* This is a test harness for the codes workload API. It sets up two LP
* types: clients (which consume operations from the workload generator) and
* servers (which service operations submitted by clients).
*
/* SUMMARY: This is a compute node LP to be used in a codes workload
* test/demo
*/
#include <string.h>
...
...
@@ -19,23 +15,16 @@
#include "codes/lp-io.h"
#include "codes/codes.h"
#include "codes/codes-workload.h"
#include "codes-workload-test-cn-lp.h"
#include "codes-workload-test-svr-lp.h"
#define NUM_SERVERS 16
/* number of servers */
#define NUM_CLIENTS 48
/* number of clients */
typedef
struct
svr_msg
svr_msg
;
typedef
struct
svr_state
svr_state
;
typedef
struct
client_msg
client_msg
;
typedef
struct
client_state
client_state
;
enum
client_event_type
{
CLIENT_KICKOFF
,
/* initial event */
};
enum
svr_event_type
{
SVR_OP
,
CLIENT_OP_COMPLETE
,
/* finished previous I/O operation */
};
struct
client_state
...
...
@@ -44,55 +33,11 @@ struct client_state
int
wkld_id
;
};
struct
svr_state
{
};
struct
client_msg
{
enum
client_event_type
event_type
;
};
struct
svr_msg
{
enum
svr_event_type
event_type
;
struct
codes_workload_op
op
;
tw_lpid
src
;
/* source of this request or ack */
};
const
tw_optdef
app_opt
[]
=
{
TWOPT_GROUP
(
"CODES Workload Test Model"
),
TWOPT_END
()
};
static
void
svr_init
(
svr_state
*
ns
,
tw_lp
*
lp
);
static
void
svr_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
);
static
void
svr_rev_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
);
static
void
svr_finalize
(
svr_state
*
ns
,
tw_lp
*
lp
);
static
tw_peid
node_mapping
(
tw_lpid
gid
);
tw_lptype
svr_lp
=
{
(
init_f
)
svr_init
,
(
event_f
)
svr_event
,
(
revent_f
)
svr_rev_event
,
(
final_f
)
svr_finalize
,
(
map_f
)
node_mapping
,
sizeof
(
svr_state
),
};
static
void
handle_client_op_loop_rev_event
(
client_state
*
ns
,
tw_bf
*
b
,
...
...
@@ -132,60 +77,8 @@ tw_lptype client_lp = {
sizeof
(
client_state
),
};
int
main
(
int
argc
,
char
**
argv
)
{
int
nprocs
;
int
rank
;
int
lps_per_proc
;
int
i
;
int
ret
;
lp_io_handle
handle
;
g_tw_ts_end
=
60
*
60
*
24
*
365
;
tw_opt_add
(
app_opt
);
tw_init
(
&
argc
,
&
argv
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
nprocs
);
if
((
NUM_SERVERS
+
NUM_CLIENTS
)
%
nprocs
)
{
fprintf
(
stderr
,
"Error: number of server LPs (%d total) is not evenly divisible by the number of MPI processes (%d)
\n
"
,
NUM_SERVERS
+
NUM_CLIENTS
,
nprocs
);
exit
(
-
1
);
}
lps_per_proc
=
(
NUM_SERVERS
+
NUM_CLIENTS
)
/
nprocs
;
tw_define_lps
(
lps_per_proc
,
512
,
0
);
for
(
i
=
0
;
i
<
lps_per_proc
;
i
++
)
{
if
((
rank
*
lps_per_proc
+
i
)
<
NUM_CLIENTS
)
tw_lp_settype
(
i
,
&
client_lp
);
else
tw_lp_settype
(
i
,
&
svr_lp
);
}
g_tw_lookahead
=
100
;
ret
=
lp_io_prepare
(
"codes-workload-test-results"
,
LP_IO_UNIQ_SUFFIX
,
&
handle
,
MPI_COMM_WORLD
);
if
(
ret
<
0
)
{
return
(
-
1
);
}
tw_run
();
ret
=
lp_io_flush
(
handle
,
MPI_COMM_WORLD
);
assert
(
ret
==
0
);
tw_end
();
return
0
;
}
static
int
g_num_clients
=
-
1
;
static
int
g_num_servers
=
-
1
;
static
void
client_init
(
client_state
*
ns
,
...
...
@@ -211,15 +104,6 @@ static void client_init(
return
;
}
static
void
svr_init
(
svr_state
*
ns
,
tw_lp
*
lp
)
{
memset
(
ns
,
0
,
sizeof
(
*
ns
));
return
;
}
static
void
client_event
(
client_state
*
ns
,
tw_bf
*
b
,
...
...
@@ -238,21 +122,6 @@ static void client_event(
}
}
static
void
svr_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
)
{
switch
(
m
->
event_type
)
{
default:
assert
(
0
);
break
;
}
}
static
void
client_rev_event
(
client_state
*
ns
,
tw_bf
*
b
,
...
...
@@ -272,22 +141,6 @@ static void client_rev_event(
return
;
}
static
void
svr_rev_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
)
{
switch
(
m
->
event_type
)
{
default:
assert
(
0
);
break
;
}
return
;
}
static
void
client_finalize
(
client_state
*
ns
,
tw_lp
*
lp
)
...
...
@@ -295,39 +148,6 @@ static void client_finalize(
return
;
}
static
void
svr_finalize
(
svr_state
*
ns
,
tw_lp
*
lp
)
{
#if 0
char buffer[256];
int ret;
sprintf(buffer, "LP %ld finalize data\n", (long)lp->gid);
/* test having everyone write to same identifier */
ret = lp_io_write(lp->gid, "node_state_pointers", strlen(buffer)+1, buffer);
assert(ret == 0);
/* test having only one lp write to a particular identifier */
if(lp->gid == 3)
{
ret = lp_io_write(lp->gid, "subset_example", strlen(buffer)+1, buffer);
assert(ret == 0);
}
/* test having one lp write two buffers to the same id */
if(lp->gid == 5)
{
sprintf(buffer, "LP %ld finalize data (intentional duplicate)\n", (long)lp->gid);
ret = lp_io_write(lp->gid, "node_state_pointers", strlen(buffer)+1, buffer);
assert(ret == 0);
}
#endif
return
;
}
static
tw_peid
node_mapping
(
tw_lpid
gid
)
{
...
...
@@ -354,8 +174,6 @@ static void handle_client_op_loop_event(
tw_lp
*
lp
)
{
struct
codes_workload_op
op
;
tw_event
*
e
;
svr_msg
*
m_out
;
tw_lpid
dest_svr_id
;
printf
(
"handle_client_op_loop_event(), lp %llu.
\n
"
,
(
unsigned
long
long
)
lp
->
gid
);
...
...
@@ -369,6 +187,11 @@ static void handle_client_op_loop_event(
codes_workload_get_next
(
ns
->
wkld_id
,
ns
->
my_rank
,
&
op
);
/* NOTE: in this test model the LP is doing its own math to find the LP
* ID of servers just to do something simple. It knows that compute
* nodes are the first N LPs and servers are the next M LPs.
*/
switch
(
op
.
op_type
)
{
case
CODES_WK_END
:
...
...
@@ -376,22 +199,44 @@ static void handle_client_op_loop_event(
return
;
break
;
case
CODES_WK_OPEN
:
dest_svr_id
=
NUM_CLIENTS
+
op
.
u
.
open
.
file_id
%
NUM_SERVERS
;
dest_svr_id
=
g_num_clients
+
op
.
u
.
open
.
file_id
%
g_num_servers
;
break
;
default:
assert
(
0
);
break
;
}
e
=
codes_event_new
(
dest_svr_id
,
1
,
lp
);
m_out
=
tw_event_data
(
e
);
m_out
->
event_type
=
SVR_OP
;
m_out
->
op
=
op
;
svr_op_start
(
lp
,
dest_svr_id
,
&
op
);
return
;
}
void
cn_op_complete
(
tw_lp
*
lp
,
tw_lpid
gid
)
{
tw_event
*
e
;
client_msg
*
m
;
e
=
codes_event_new
(
gid
,
codes_local_latency
(
lp
),
lp
);
m
=
tw_event_data
(
e
);
m
->
event_type
=
CLIENT_OP_COMPLETE
;
tw_event_send
(
e
);
return
;
}
void
cn_op_complete_rc
(
tw_lp
*
lp
)
{
codes_local_latency_reverse
(
lp
);
return
;
}
void
cn_set_params
(
int
num_clients
,
int
num_servers
)
{
g_num_clients
=
num_clients
;
g_num_servers
=
num_servers
;
}
/*
* Local variables:
* c-indent-level: 4
...
...
tests/workload/codes-workload-test-cn-lp.h
0 → 100644
View file @
c9a39af0
/*
* Copyright (C) 2013 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#ifndef CODES_WORKLOAD_TEST_CN_LP_H
#define CODES_WORKLOAD_TEST_CN_LP_H
#include <ross.h>
#include "codes/codes-workload.h"
extern
tw_lptype
client_lp
;
void
cn_op_complete
(
tw_lp
*
lp
,
tw_lpid
gid
);
void
cn_op_complete_rc
(
tw_lp
*
lp
);
void
cn_set_params
(
int
num_clients
,
int
num_servers
);
#endif
/* CODES_WORKLOAD_TEST_CN_LP_H */
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
tests/workload/codes-workload-test-svr-lp.c
0 → 100644
View file @
c9a39af0
/*
* Copyright (C) 2013 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
/* Summary: example storage server LP type for workload demo/test */
#include <string.h>
#include <assert.h>
#include <ross.h>
#include "codes/lp-io.h"
#include "codes/codes.h"
#include "codes/codes-workload.h"
#include "codes-workload-test-svr-lp.h"
#include "codes-workload-test-cn-lp.h"
typedef
struct
svr_msg
svr_msg
;
typedef
struct
svr_state
svr_state
;
enum
svr_event_type
{
SVR_OP
,
};
struct
svr_state
{
};
struct
svr_msg
{
enum
svr_event_type
event_type
;
struct
codes_workload_op
op
;
tw_lpid
src
;
/* source of this request or ack */
};
static
void
svr_init
(
svr_state
*
ns
,
tw_lp
*
lp
);
static
void
svr_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
);
static
void
svr_rev_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
);
static
void
svr_finalize
(
svr_state
*
ns
,
tw_lp
*
lp
);
static
tw_peid
node_mapping
(
tw_lpid
gid
);
tw_lptype
svr_lp
=
{
(
init_f
)
svr_init
,
(
event_f
)
svr_event
,
(
revent_f
)
svr_rev_event
,
(
final_f
)
svr_finalize
,
(
map_f
)
node_mapping
,
sizeof
(
svr_state
),
};
static
void
handle_svr_op_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
);
static
void
handle_svr_op_event_rc
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
);
static
void
svr_init
(
svr_state
*
ns
,
tw_lp
*
lp
)
{
memset
(
ns
,
0
,
sizeof
(
*
ns
));
return
;
}
static
void
svr_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
)
{
switch
(
m
->
event_type
)
{
case
SVR_OP
:
handle_svr_op_event
(
ns
,
b
,
m
,
lp
);
break
;
default:
assert
(
0
);
break
;
}
}
static
void
svr_rev_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
)
{
switch
(
m
->
event_type
)
{
case
SVR_OP
:
handle_svr_op_event_rc
(
ns
,
b
,
m
,
lp
);
break
;
default:
assert
(
0
);
break
;
}
return
;
}
static
void
svr_finalize
(
svr_state
*
ns
,
tw_lp
*
lp
)
{
#if 0
char buffer[256];
int ret;
sprintf(buffer, "LP %ld finalize data\n", (long)lp->gid);
/* test having everyone write to same identifier */
ret = lp_io_write(lp->gid, "node_state_pointers", strlen(buffer)+1, buffer);
assert(ret == 0);
/* test having only one lp write to a particular identifier */
if(lp->gid == 3)
{
ret = lp_io_write(lp->gid, "subset_example", strlen(buffer)+1, buffer);
assert(ret == 0);
}
/* test having one lp write two buffers to the same id */
if(lp->gid == 5)
{
sprintf(buffer, "LP %ld finalize data (intentional duplicate)\n", (long)lp->gid);
ret = lp_io_write(lp->gid, "node_state_pointers", strlen(buffer)+1, buffer);
assert(ret == 0);
}
#endif
return
;
}
static
tw_peid
node_mapping
(
tw_lpid
gid
)
{
return
(
tw_peid
)
gid
/
g_tw_nlp
;
}
void
svr_op_start
(
tw_lp
*
lp
,
tw_lpid
gid
,
const
struct
codes_workload_op
*
op
)
{
tw_event
*
e
;
svr_msg
*
m
;
e
=
codes_event_new
(
gid
,
codes_local_latency
(
lp
),
lp
);
m
=
tw_event_data
(
e
);
m
->
event_type
=
SVR_OP
;
m
->
op
=
*
op
;
m
->
src
=
lp
->
gid
;
tw_event_send
(
e
);
}
void
svr_op_start_rc
(
tw_lp
*
lp
)
{
codes_local_latency_reverse
(
lp
);
}
static
void
handle_svr_op_event
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
)
{
/* TODO: fill in some stub service time */
/* send event back to cn to let it know the operation is done */
cn_op_complete
(
lp
,
m
->
src
);
return
;
}
static
void
handle_svr_op_event_rc
(
svr_state
*
ns
,
tw_bf
*
b
,
svr_msg
*
m
,
tw_lp
*
lp
)
{
assert
(
0
);
return
;
}
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
tests/workload/codes-workload-test-svr-lp.h
0 → 100644
View file @
c9a39af0
/*
* Copyright (C) 2013 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#ifndef CODES_WORKLOAD_TEST_SERVER_H
#define CODES_WORKLOAD_TEST_SERVER_H
#include <ross.h>
#include "codes/codes-workload.h"
extern
tw_lptype
svr_lp
;
void
svr_op_start
(
tw_lp
*
lp
,
tw_lpid
gid
,
const
struct
codes_workload_op
*
op
);
void
svr_op_start_rc
(
tw_lp
*
lp
);
#endif
/* CODES_WORKLOAD_TEST_SERVER_H */
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
tests/workload/codes-workload-test.c
0 → 100644
View file @
c9a39af0
/*
* Copyright (C) 2013 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
/* SUMMARY:
*
* This is a test harness for the codes workload API. It sets up two LP
* types: clients (which consume operations from the workload generator) and
* servers (which service operations submitted by clients).
*
*/
#include <string.h>
#include <assert.h>
#include <ross.h>
#include "codes/lp-io.h"
#include "codes/codes.h"
#include "codes/codes-workload.h"
#include "codes-workload-test-svr-lp.h"
#include "codes-workload-test-cn-lp.h"
#define NUM_SERVERS 16
/* number of servers */
#define NUM_CLIENTS 48
/* number of clients */
const
tw_optdef
app_opt
[]
=
{
TWOPT_GROUP
(
"CODES Workload Test Model"
),
TWOPT_END
()
};
int
main
(
int
argc
,
char
**
argv
)
{
int
nprocs
;
int
rank
;
int
lps_per_proc
;
int
i
;
int
ret
;
lp_io_handle
handle
;
g_tw_ts_end
=
60
*
60
*
24
*
365
;
tw_opt_add
(
app_opt
);
tw_init
(
&
argc
,
&
argv
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
nprocs
);
if
((
NUM_SERVERS
+
NUM_CLIENTS
)
%
nprocs
)
{
fprintf
(
stderr
,
"Error: number of server LPs (%d total) is not evenly divisible by the number of MPI processes (%d)
\n
"
,
NUM_SERVERS
+
NUM_CLIENTS
,
nprocs
);
exit
(
-
1
);
}
lps_per_proc
=
(
NUM_SERVERS
+
NUM_CLIENTS
)
/
nprocs
;
tw_define_lps
(
lps_per_proc
,
512
,
0
);
for
(
i
=
0
;
i
<
lps_per_proc
;
i
++
)
{
if
((
rank
*
lps_per_proc
+
i
)
<
NUM_CLIENTS
)
tw_lp_settype
(
i
,
&
client_lp
);
else
tw_lp_settype
(
i
,
&
svr_lp
);
}
cn_set_params
(
NUM_CLIENTS
,
NUM_SERVERS
);
g_tw_lookahead
=
100
;
ret
=
lp_io_prepare
(
"codes-workload-test-results"
,
LP_IO_UNIQ_SUFFIX
,
&
handle
,
MPI_COMM_WORLD
);
if
(
ret
<
0
)
{
return
(
-
1
);
}
tw_run
();
ret
=
lp_io_flush
(
handle
,
MPI_COMM_WORLD
);
assert
(
ret
==
0
);
tw_end
();
return
0
;
}
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment