Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Xin Wang
codes-dev
Commits
ae0f71dd
Commit
ae0f71dd
authored
May 29, 2014
by
Jonathan Jenkins
Browse files
sched method refactor, added non-packetizing fcfs sched
- NOTE: shim just forces packet size to a huge number
parent
27fc0282
Changes
6
Hide whitespace changes
Inline
Side-by-side
codes/model-net-sched.h
View file @
ae0f71dd
...
...
@@ -13,6 +13,27 @@
#include "model-net.h"
#include "model-net-method.h"
/// types of schedulers
/// format: enum type, config string, function pointer names
/// fcfs-full eschews packetization
#define SCHEDULER_TYPES \
X(MN_SCHED_FCFS, "fcfs", &fcfs_tab) \
X(MN_SCHED_FCFS_FULL, "fcfs-full", &fcfs_tab) \
X(MN_SCHED_RR, "round-robin", &rr_tab) \
X(MAX_SCHEDS, NULL, NULL)
#define X(a,b,c) a,
enum
sched_type
{
SCHEDULER_TYPES
};
#undef X
extern
char
*
sched_names
[];
/// global for scheduler
/// TODO: move away from using the global for when we have multiple networks
extern
enum
sched_type
mn_sched_type
;
/// scheduler decls
typedef
struct
model_net_sched_s
model_net_sched
;
...
...
@@ -41,15 +62,6 @@ typedef struct model_net_sched_interface {
void
(
*
next_rc
)(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
}
model_net_sched_interface
;
enum
sched_type
{
MN_SCHED_FCFS
,
// issue all packets at once (orig. model-net behavior)
MN_SCHED_RR
// round-robin packet scheduling
};
/// global for scheduler
/// TODO: move away from using the global for when we have multiple networks
extern
enum
sched_type
mn_sched_type
;
/// overall scheduler struct - type puns the actual data structure
struct
model_net_sched_s
{
...
...
src/models/Makefile.subdir
View file @
ae0f71dd
...
...
@@ -20,4 +20,6 @@ src_libcodes_net_a_SOURCES = \
src/models/networks/model-net/loggp.c
\
src/models/networks/model-net/simplewan.c
\
src/models/networks/model-net/model-net-lp.c
\
src/models/networks/model-net/model-net-sched.c
src/models/networks/model-net/model-net-sched.c
\
src/models/networks/model-net/model-net-sched-impl.h
\
src/models/networks/model-net/model-net-sched-impl.c
src/models/networks/model-net/model-net-sched-impl.c
0 → 100644
View file @
ae0f71dd
/*
* Copyright (C) 2014 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#include <stdlib.h>
#include <assert.h>
#include "model-net-sched-impl.h"
#include "codes/model-net-sched.h"
#include "codes/model-net-method.h"
#include "codes/quicklist.h"
/// scheduler-specific data structures
/// NOTE: for now, scheduler data structures are the same - this may change in
/// later versions
typedef
struct
mn_sched
{
// method containing packet event to call
struct
model_net_method
*
method
;
struct
qlist_head
reqs
;
// of type mn_sched_qitem
// this is an unfortunate result - we have to basically not free anything
// in order to keep around the remote and local events
// we desperately need GVT hooks to run our own garbage collection
struct
qlist_head
free_reqs
;
}
mn_sched
;
typedef
struct
mn_sched_qitem
{
model_net_request
req
;
// remaining bytes to send
uint64_t
rem
;
// pointers to event structures
// sizes are given in the request struct
void
*
remote_event
;
void
*
local_event
;
struct
qlist_head
ql
;
}
mn_sched_qitem
;
/// scheduler-specific function decls and function table
/// FCFS
// void used to avoid ptr-to-ptr conv warnings
static
void
fcfs_init
(
struct
model_net_method
*
method
,
void
**
sched
);
static
void
fcfs_destroy
(
void
*
sched
);
static
void
fcfs_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
fcfs_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
int
fcfs_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
fcfs_next_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
rr_init
(
struct
model_net_method
*
method
,
void
**
sched
);
static
void
rr_destroy
(
void
*
sched
);
static
void
rr_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
rr_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
int
rr_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
rr_next_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
/// function tables (names defined by X macro in model-net-sched.h)
static
model_net_sched_interface
fcfs_tab
=
{
&
fcfs_init
,
&
fcfs_destroy
,
&
fcfs_add
,
&
fcfs_add_rc
,
&
fcfs_next
,
&
fcfs_next_rc
};
static
model_net_sched_interface
rr_tab
=
{
&
rr_init
,
&
rr_destroy
,
&
rr_add
,
&
rr_add_rc
,
&
rr_next
,
&
rr_next_rc
};
#define X(a,b,c) c,
model_net_sched_interface
*
sched_interfaces
[]
=
{
SCHEDULER_TYPES
};
#undef X
/// FCFS implementation
void
fcfs_init
(
struct
model_net_method
*
method
,
void
**
sched
){
*
sched
=
malloc
(
sizeof
(
mn_sched
));
mn_sched
*
ss
=
*
sched
;
ss
->
method
=
method
;
INIT_QLIST_HEAD
(
&
ss
->
reqs
);
INIT_QLIST_HEAD
(
&
ss
->
free_reqs
);
}
void
fcfs_destroy
(
void
*
sched
){
free
(
sched
);
}
void
fcfs_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
// NOTE: in optimistic mode, we currently do not have a good way to
// reliably free and re-initialize the q item and the local/remote events
// when processing next/next_rc events. Hence, the memory leaks. Later on
// we'll figure out a better way to handle this.
mn_sched_qitem
*
q
=
malloc
(
sizeof
(
mn_sched_qitem
));
assert
(
q
);
memset
(
q
,
0
,
sizeof
(
*
q
));
q
->
req
=
*
req
;
q
->
rem
=
req
->
is_pull
?
PULL_MSG_SIZE
:
req
->
msg_size
;
if
(
remote_event_size
>
0
){
q
->
remote_event
=
malloc
(
remote_event_size
);
memcpy
(
q
->
remote_event
,
remote_event
,
remote_event_size
);
}
if
(
local_event_size
>
0
){
q
->
local_event
=
malloc
(
local_event_size
);
memcpy
(
q
->
local_event
,
local_event
,
local_event_size
);
}
mn_sched
*
s
=
sched
;
qlist_add_tail
(
&
q
->
ql
,
&
s
->
reqs
);
}
void
fcfs_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
struct
qlist_head
*
ent
=
qlist_pop_back
(
&
s
->
reqs
);
assert
(
ent
!=
NULL
);
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
if
(
q
->
remote_event
)
free
(
q
->
remote_event
);
if
(
q
->
local_event
)
free
(
q
->
local_event
);
free
(
q
);
}
int
fcfs_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
struct
qlist_head
*
ent
=
s
->
reqs
.
next
;
if
(
ent
==
&
s
->
reqs
){
rc
->
rtn
=
-
1
;
return
-
1
;
}
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
// issue the next packet
int
is_last_packet
;
uint64_t
psize
;
if
(
q
->
req
.
packet_size
>=
q
->
rem
)
{
psize
=
q
->
rem
;
is_last_packet
=
1
;
}
else
{
psize
=
q
->
req
.
packet_size
;
is_last_packet
=
0
;
}
*
poffset
=
s
->
method
->
model_net_method_packet_event
(
q
->
req
.
category
,
q
->
req
.
final_dest_lp
,
psize
,
q
->
req
.
is_pull
,
q
->
req
.
msg_size
,
0
.
0
,
q
->
req
.
remote_event_size
,
q
->
remote_event
,
q
->
req
.
self_event_size
,
q
->
local_event
,
q
->
req
.
src_lp
,
lp
,
is_last_packet
);
// if last packet - remove from list, put into free list
if
(
is_last_packet
){
qlist_pop
(
&
s
->
reqs
);
qlist_add_tail
(
&
q
->
ql
,
&
s
->
free_reqs
);
rc
->
rtn
=
1
;
}
else
{
q
->
rem
-=
psize
;
rc
->
rtn
=
0
;
}
return
rc
->
rtn
;
}
void
fcfs_next_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
if
(
rc
->
rtn
==
-
1
){
// no op
}
else
{
s
->
method
->
model_net_method_packet_event_rc
(
lp
);
if
(
rc
->
rtn
==
0
){
// just get the front and increment rem
mn_sched_qitem
*
q
=
qlist_entry
(
s
->
reqs
.
next
,
mn_sched_qitem
,
ql
);
// just increment rem
q
->
rem
+=
q
->
req
.
packet_size
;
}
else
if
(
rc
->
rtn
==
1
){
qlist_add
(
qlist_pop_back
(
&
s
->
free_reqs
),
&
s
->
reqs
);
}
else
{
assert
(
0
);
}
}
}
void
rr_init
(
struct
model_net_method
*
method
,
void
**
sched
){
*
sched
=
malloc
(
sizeof
(
mn_sched
));
mn_sched
*
ss
=
*
sched
;
ss
->
method
=
method
;
INIT_QLIST_HEAD
(
&
ss
->
reqs
);
INIT_QLIST_HEAD
(
&
ss
->
free_reqs
);
}
void
rr_destroy
(
void
*
sched
){
free
(
sched
);
}
void
rr_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
// NOTE: in optimistic mode, we currently do not have a good way to
// reliably free and re-initialize the q item and the local/remote events
// when processing next/next_rc events. Hence, the memory leaks. Later on
// we'll figure out a better way to handle this.
mn_sched_qitem
*
q
=
malloc
(
sizeof
(
mn_sched_qitem
));
q
->
req
=
*
req
;
q
->
rem
=
req
->
is_pull
?
PULL_MSG_SIZE
:
req
->
msg_size
;
if
(
remote_event_size
>
0
){
q
->
remote_event
=
malloc
(
remote_event_size
);
memcpy
(
q
->
remote_event
,
remote_event
,
remote_event_size
);
}
if
(
local_event_size
>
0
){
q
->
local_event
=
malloc
(
local_event_size
);
memcpy
(
q
->
local_event
,
local_event
,
local_event_size
);
}
mn_sched
*
s
=
sched
;
qlist_add_tail
(
&
q
->
ql
,
&
s
->
reqs
);
}
void
rr_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
struct
qlist_head
*
ent
=
qlist_pop_back
(
&
s
->
reqs
);
assert
(
ent
!=
NULL
);
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
if
(
q
->
remote_event
)
free
(
q
->
remote_event
);
if
(
q
->
local_event
)
free
(
q
->
local_event
);
free
(
q
);
}
int
rr_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
struct
qlist_head
*
ent
=
qlist_pop
(
&
s
->
reqs
);
if
(
ent
==
NULL
){
rc
->
rtn
=
-
1
;
return
-
1
;
}
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
// issue the next packet
int
is_last_packet
;
uint64_t
psize
;
if
(
q
->
req
.
packet_size
>=
q
->
rem
)
{
psize
=
q
->
rem
;
is_last_packet
=
1
;
}
else
{
psize
=
q
->
req
.
packet_size
;
is_last_packet
=
0
;
}
*
poffset
=
s
->
method
->
model_net_method_packet_event
(
q
->
req
.
category
,
q
->
req
.
final_dest_lp
,
psize
,
q
->
req
.
is_pull
,
q
->
req
.
msg_size
,
0
.
0
,
q
->
req
.
remote_event_size
,
q
->
remote_event
,
q
->
req
.
self_event_size
,
q
->
local_event
,
q
->
req
.
src_lp
,
lp
,
is_last_packet
);
// if last packet - remove from list, put into free list
if
(
is_last_packet
){
qlist_add_tail
(
&
q
->
ql
,
&
s
->
free_reqs
);
rc
->
rtn
=
1
;
}
else
{
q
->
rem
-=
psize
;
qlist_add_tail
(
&
q
->
ql
,
&
s
->
reqs
);
rc
->
rtn
=
0
;
}
return
rc
->
rtn
;
}
void
rr_next_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
if
(
rc
->
rtn
==
-
1
){
// no op
}
else
{
s
->
method
->
model_net_method_packet_event_rc
(
lp
);
if
(
rc
->
rtn
==
0
){
// increment rem and put item back to front of list
struct
qlist_head
*
ent
=
qlist_pop_back
(
&
s
->
reqs
);
qlist_add
(
ent
,
&
s
->
reqs
);
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
q
->
rem
+=
q
->
req
.
packet_size
;
}
else
if
(
rc
->
rtn
==
1
){
// put back to *front* of list. We know it's the front because it was
// in the front when it was deleted
qlist_add
(
qlist_pop_back
(
&
s
->
free_reqs
),
&
s
->
reqs
);
}
else
{
assert
(
0
);
}
}
}
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
src/models/networks/model-net/model-net-sched-impl.h
0 → 100644
View file @
ae0f71dd
/*
* Copyright (C) 2014 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#ifndef MODEL_NET_SCHED_IMPL_H
#define MODEL_NET_SCHED_IMPL_H
#include "codes/model-net-sched.h"
#include "codes/model-net-method.h"
extern
model_net_sched_interface
*
sched_interfaces
[];
#endif
/* end of include guard: MODEL-NET-SCHED-IMPL_H */
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
src/models/networks/model-net/model-net-sched.c
View file @
ae0f71dd
...
...
@@ -9,79 +9,18 @@
#include <assert.h>
#include <ross.h>
#include "model-net-sched-impl.h"
#include "codes/model-net-sched.h"
#include "codes/model-net-lp.h"
#include "codes/quicklist.h"
enum
sched_type
mn_sched_type
=
-
1
;
/// scheduler-specific data structures (TODO: split specific schedulers into
/// their own files if we move beyond just these two)
/// NOTE: for now, scheduler data structures are the same - this may change in
/// later versions
typedef
struct
mn_sched
{
// method containing packet event to call
struct
model_net_method
*
method
;
struct
qlist_head
reqs
;
// of type mn_sched_qitem
// this is an unfortunate result - we have to basically not free anything
// in order to keep around the remote and local events
// we desperately need GVT hooks to run our own garbage collection
struct
qlist_head
free_reqs
;
}
mn_sched
;
// at the moment, rr and fcfs only differ in how req queue is modified, queue
// items themselves are equivalent
typedef
struct
mn_sched_qitem
{
model_net_request
req
;
// remaining bytes to send
uint64_t
rem
;
// pointers to event structures
// sizes are given in the request struct
void
*
remote_event
;
void
*
local_event
;
struct
qlist_head
ql
;
}
mn_sched_qitem
;
/// scheduler-specific function decls and function table
// void used to avoid ptr-to-ptr conv warnings
static
void
fcfs_init
(
struct
model_net_method
*
method
,
void
**
sched
);
static
void
fcfs_destroy
(
void
*
sched
);
static
void
fcfs_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
fcfs_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
int
fcfs_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
fcfs_next_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
model_net_sched_interface
fcfs_tab
=
{
&
fcfs_init
,
&
fcfs_destroy
,
&
fcfs_add
,
&
fcfs_add_rc
,
&
fcfs_next
,
&
fcfs_next_rc
};
static
void
rr_init
(
struct
model_net_method
*
method
,
void
**
sched
);
static
void
rr_destroy
(
void
*
sched
);
static
void
rr_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
rr_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
int
rr_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
void
rr_next_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
);
static
model_net_sched_interface
rr_tab
=
{
&
rr_init
,
&
rr_destroy
,
&
rr_add
,
&
rr_add_rc
,
&
rr_next
,
&
rr_next_rc
};
#define X(a,b,c) b,
char
*
sched_names
[]
=
{
SCHEDULER_TYPES
};
#undef X
/// general scheduler functions
...
...
@@ -89,18 +28,14 @@ void model_net_sched_init(
enum
sched_type
type
,
struct
model_net_method
*
method
,
model_net_sched
*
sched
){
sched
->
type
=
type
;
switch
(
type
){
case
MN_SCHED_FCFS
:
sched
->
impl
=
&
fcfs_tab
;
break
;
case
MN_SCHED_RR
:
sched
->
impl
=
&
rr_tab
;
break
;
default:
fprintf
(
stderr
,
"unknown scheduler type"
);
abort
();
if
(
type
>=
MAX_SCHEDS
){
fprintf
(
stderr
,
"unknown scheduler type"
);
abort
();
}
else
{
sched
->
impl
=
sched_interfaces
[
type
];
}
sched
->
type
=
type
;
sched
->
impl
->
init
(
method
,
&
sched
->
dat
);
}
...
...
@@ -139,234 +74,6 @@ void model_net_sched_add_rc(
sched
->
impl
->
add_rc
(
sched
->
dat
,
sched_rc
,
lp
);
}
/// specific scheduler implementations
void
fcfs_init
(
struct
model_net_method
*
method
,
void
**
sched
){
*
sched
=
malloc
(
sizeof
(
mn_sched
));
mn_sched
*
ss
=
*
sched
;
ss
->
method
=
method
;
INIT_QLIST_HEAD
(
&
ss
->
reqs
);
INIT_QLIST_HEAD
(
&
ss
->
free_reqs
);
}
void
fcfs_destroy
(
void
*
sched
){
free
(
sched
);
}
void
fcfs_add
(
model_net_request
*
req
,
int
remote_event_size
,
void
*
remote_event
,
int
local_event_size
,
void
*
local_event
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
// NOTE: in optimistic mode, we currently do not have a good way to
// reliably free and re-initialize the q item and the local/remote events
// when processing next/next_rc events. Hence, the memory leaks. Later on
// we'll figure out a better way to handle this.
mn_sched_qitem
*
q
=
malloc
(
sizeof
(
mn_sched_qitem
));
assert
(
q
);
memset
(
q
,
0
,
sizeof
(
*
q
));
q
->
req
=
*
req
;
q
->
rem
=
req
->
is_pull
?
PULL_MSG_SIZE
:
req
->
msg_size
;
if
(
remote_event_size
>
0
){
q
->
remote_event
=
malloc
(
remote_event_size
);
memcpy
(
q
->
remote_event
,
remote_event
,
remote_event_size
);
}
if
(
local_event_size
>
0
){
q
->
local_event
=
malloc
(
local_event_size
);
memcpy
(
q
->
local_event
,
local_event
,
local_event_size
);
}
mn_sched
*
s
=
sched
;
qlist_add_tail
(
&
q
->
ql
,
&
s
->
reqs
);
}
void
fcfs_add_rc
(
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
struct
qlist_head
*
ent
=
qlist_pop_back
(
&
s
->
reqs
);
assert
(
ent
!=
NULL
);
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
if
(
q
->
remote_event
)
free
(
q
->
remote_event
);
if
(
q
->
local_event
)
free
(
q
->
local_event
);
free
(
q
);
}
int
fcfs_next
(
tw_stime
*
poffset
,
void
*
sched
,
model_net_sched_rc
*
rc
,
tw_lp
*
lp
){
mn_sched
*
s
=
sched
;
struct
qlist_head
*
ent
=
s
->
reqs
.
next
;
if
(
ent
==
&
s
->
reqs
){
rc
->
rtn
=
-
1
;
return
-
1
;
}
mn_sched_qitem
*
q
=
qlist_entry
(
ent
,
mn_sched_qitem
,
ql
);
// issue the next packet
int
is_last_packet
;
uint64_t
psize
;
if
(
q
->
req
.
packet_size
>=
q
->
rem
)
{
psize
=
q
->
rem
;
is_last_packet
=
1
;
}
else
{
psize
=
q
->
req
.
packet_size
;
is_last_packet
=
0
;
}
*
poffset
=
s
->
method
->
model_net_method_packet_event
(
q
->
req
.
category
,
q
->
req
.
final_dest_lp
,
psize
,
q
->
req
.
is_pull
,
q
->
req
.
msg_size
,
0
.
0
,
q
->
req
.
remote_event_size
,
q
->
remote_event
,
q
->
req
.
self_event_size
,
q
->
local_event
,
q
->
req
.
src_lp
,
lp
,
is_last_packet
);
// if last packet - remove from list, put into free list
if
(
is_last_packet
){
qlist_pop
(
&
s
->
reqs
);
qlist_add_tail
(
&
q
->
ql
,
&
s
->
free_reqs
);