Commit ae0f71dd authored by Jonathan Jenkins's avatar Jonathan Jenkins
Browse files

sched method refactor, added non-packetizing fcfs sched

- NOTE: shim just forces packet size to a huge number
parent 27fc0282
...@@ -13,6 +13,27 @@ ...@@ -13,6 +13,27 @@
#include "model-net.h" #include "model-net.h"
#include "model-net-method.h" #include "model-net-method.h"
/// types of schedulers
/// format: enum type, config string, function pointer names
/// fcfs-full eschews packetization
#define SCHEDULER_TYPES \
X(MN_SCHED_FCFS, "fcfs", &fcfs_tab) \
X(MN_SCHED_FCFS_FULL, "fcfs-full", &fcfs_tab) \
X(MN_SCHED_RR, "round-robin", &rr_tab) \
X(MAX_SCHEDS, NULL, NULL)
#define X(a,b,c) a,
enum sched_type {
SCHEDULER_TYPES
};
#undef X
extern char * sched_names[];
/// global for scheduler
/// TODO: move away from using the global for when we have multiple networks
extern enum sched_type mn_sched_type;
/// scheduler decls /// scheduler decls
typedef struct model_net_sched_s model_net_sched; typedef struct model_net_sched_s model_net_sched;
...@@ -41,15 +62,6 @@ typedef struct model_net_sched_interface { ...@@ -41,15 +62,6 @@ typedef struct model_net_sched_interface {
void (*next_rc)(void * sched, model_net_sched_rc *rc, tw_lp *lp); void (*next_rc)(void * sched, model_net_sched_rc *rc, tw_lp *lp);
} model_net_sched_interface; } model_net_sched_interface;
enum sched_type {
MN_SCHED_FCFS, // issue all packets at once (orig. model-net behavior)
MN_SCHED_RR // round-robin packet scheduling
};
/// global for scheduler
/// TODO: move away from using the global for when we have multiple networks
extern enum sched_type mn_sched_type;
/// overall scheduler struct - type puns the actual data structure /// overall scheduler struct - type puns the actual data structure
struct model_net_sched_s { struct model_net_sched_s {
......
...@@ -20,4 +20,6 @@ src_libcodes_net_a_SOURCES = \ ...@@ -20,4 +20,6 @@ src_libcodes_net_a_SOURCES = \
src/models/networks/model-net/loggp.c \ src/models/networks/model-net/loggp.c \
src/models/networks/model-net/simplewan.c \ src/models/networks/model-net/simplewan.c \
src/models/networks/model-net/model-net-lp.c \ src/models/networks/model-net/model-net-lp.c \
src/models/networks/model-net/model-net-sched.c src/models/networks/model-net/model-net-sched.c \
src/models/networks/model-net/model-net-sched-impl.h \
src/models/networks/model-net/model-net-sched-impl.c
/*
* Copyright (C) 2014 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#include <stdlib.h>
#include <assert.h>
#include "model-net-sched-impl.h"
#include "codes/model-net-sched.h"
#include "codes/model-net-method.h"
#include "codes/quicklist.h"
/// scheduler-specific data structures
/// NOTE: for now, scheduler data structures are the same - this may change in
/// later versions
typedef struct mn_sched {
// method containing packet event to call
struct model_net_method *method;
struct qlist_head reqs; // of type mn_sched_qitem
// this is an unfortunate result - we have to basically not free anything
// in order to keep around the remote and local events
// we desperately need GVT hooks to run our own garbage collection
struct qlist_head free_reqs;
} mn_sched;
typedef struct mn_sched_qitem {
model_net_request req;
// remaining bytes to send
uint64_t rem;
// pointers to event structures
// sizes are given in the request struct
void * remote_event;
void * local_event;
struct qlist_head ql;
} mn_sched_qitem;
/// scheduler-specific function decls and function table
/// FCFS
// void used to avoid ptr-to-ptr conv warnings
static void fcfs_init (struct model_net_method *method, void ** sched);
static void fcfs_destroy (void *sched);
static void fcfs_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp);
static void fcfs_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp);
static int fcfs_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp);
static void fcfs_next_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp);
static void rr_init (struct model_net_method *method, void ** sched);
static void rr_destroy (void *sched);
static void rr_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp);
static void rr_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp);
static int rr_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp);
static void rr_next_rc (void *sched, model_net_sched_rc *rc, tw_lp *lp);
/// function tables (names defined by X macro in model-net-sched.h)
static model_net_sched_interface fcfs_tab =
{ &fcfs_init, &fcfs_destroy, &fcfs_add, &fcfs_add_rc, &fcfs_next, &fcfs_next_rc};
static model_net_sched_interface rr_tab =
{ &rr_init, &rr_destroy, &rr_add, &rr_add_rc, &rr_next, &rr_next_rc};
#define X(a,b,c) c,
model_net_sched_interface * sched_interfaces[] = {
SCHEDULER_TYPES
};
#undef X
/// FCFS implementation
void fcfs_init(struct model_net_method *method, void ** sched){
*sched = malloc(sizeof(mn_sched));
mn_sched *ss = *sched;
ss->method = method;
INIT_QLIST_HEAD(&ss->reqs);
INIT_QLIST_HEAD(&ss->free_reqs);
}
void fcfs_destroy(void *sched){
free(sched);
}
void fcfs_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp){
// NOTE: in optimistic mode, we currently do not have a good way to
// reliably free and re-initialize the q item and the local/remote events
// when processing next/next_rc events. Hence, the memory leaks. Later on
// we'll figure out a better way to handle this.
mn_sched_qitem *q = malloc(sizeof(mn_sched_qitem));
assert(q);
memset(q, 0, sizeof(*q));
q->req = *req;
q->rem = req->is_pull ? PULL_MSG_SIZE : req->msg_size;
if (remote_event_size > 0){
q->remote_event = malloc(remote_event_size);
memcpy(q->remote_event, remote_event, remote_event_size);
}
if (local_event_size > 0){
q->local_event = malloc(local_event_size);
memcpy(q->local_event, local_event, local_event_size);
}
mn_sched *s = sched;
qlist_add_tail(&q->ql, &s->reqs);
}
void fcfs_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
struct qlist_head *ent = qlist_pop_back(&s->reqs);
assert(ent != NULL);
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
if (q->remote_event) free(q->remote_event);
if (q->local_event) free(q->local_event);
free(q);
}
int fcfs_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
struct qlist_head *ent = s->reqs.next;
if (ent == &s->reqs){
rc->rtn = -1;
return -1;
}
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
// issue the next packet
int is_last_packet;
uint64_t psize;
if (q->req.packet_size >= q->rem) {
psize = q->rem;
is_last_packet = 1;
}
else{
psize = q->req.packet_size;
is_last_packet = 0;
}
*poffset = s->method->model_net_method_packet_event(q->req.category,
q->req.final_dest_lp, psize, q->req.is_pull, q->req.msg_size, 0.0,
q->req.remote_event_size, q->remote_event, q->req.self_event_size,
q->local_event, q->req.src_lp, lp, is_last_packet);
// if last packet - remove from list, put into free list
if (is_last_packet){
qlist_pop(&s->reqs);
qlist_add_tail(&q->ql, &s->free_reqs);
rc->rtn = 1;
}
else{
q->rem -= psize;
rc->rtn = 0;
}
return rc->rtn;
}
void fcfs_next_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
if (rc->rtn == -1){
// no op
}
else{
s->method->model_net_method_packet_event_rc(lp);
if (rc->rtn == 0){
// just get the front and increment rem
mn_sched_qitem *q = qlist_entry(s->reqs.next, mn_sched_qitem, ql);
// just increment rem
q->rem += q->req.packet_size;
}
else if (rc->rtn == 1){
qlist_add(qlist_pop_back(&s->free_reqs), &s->reqs);
}
else {
assert(0);
}
}
}
void rr_init (struct model_net_method *method, void ** sched){
*sched = malloc(sizeof(mn_sched));
mn_sched *ss = *sched;
ss->method = method;
INIT_QLIST_HEAD(&ss->reqs);
INIT_QLIST_HEAD(&ss->free_reqs);
}
void rr_destroy (void *sched){
free(sched);
}
void rr_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp){
// NOTE: in optimistic mode, we currently do not have a good way to
// reliably free and re-initialize the q item and the local/remote events
// when processing next/next_rc events. Hence, the memory leaks. Later on
// we'll figure out a better way to handle this.
mn_sched_qitem *q = malloc(sizeof(mn_sched_qitem));
q->req = *req;
q->rem = req->is_pull ? PULL_MSG_SIZE : req->msg_size;
if (remote_event_size > 0){
q->remote_event = malloc(remote_event_size);
memcpy(q->remote_event, remote_event, remote_event_size);
}
if (local_event_size > 0){
q->local_event = malloc(local_event_size);
memcpy(q->local_event, local_event, local_event_size);
}
mn_sched *s = sched;
qlist_add_tail(&q->ql, &s->reqs);
}
void rr_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
struct qlist_head *ent = qlist_pop_back(&s->reqs);
assert(ent != NULL);
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
if (q->remote_event) free(q->remote_event);
if (q->local_event) free(q->local_event);
free(q);
}
int rr_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
struct qlist_head *ent = qlist_pop(&s->reqs);
if (ent == NULL){
rc->rtn = -1;
return -1;
}
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
// issue the next packet
int is_last_packet;
uint64_t psize;
if (q->req.packet_size >= q->rem) {
psize = q->rem;
is_last_packet = 1;
}
else{
psize = q->req.packet_size;
is_last_packet = 0;
}
*poffset = s->method->model_net_method_packet_event(q->req.category,
q->req.final_dest_lp, psize, q->req.is_pull, q->req.msg_size, 0.0,
q->req.remote_event_size, q->remote_event, q->req.self_event_size,
q->local_event, q->req.src_lp, lp, is_last_packet);
// if last packet - remove from list, put into free list
if (is_last_packet){
qlist_add_tail(&q->ql, &s->free_reqs);
rc->rtn = 1;
}
else{
q->rem -= psize;
qlist_add_tail(&q->ql, &s->reqs);
rc->rtn = 0;
}
return rc->rtn;
}
void rr_next_rc (void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
if (rc->rtn == -1){
// no op
}
else {
s->method->model_net_method_packet_event_rc(lp);
if (rc->rtn == 0){
// increment rem and put item back to front of list
struct qlist_head *ent = qlist_pop_back(&s->reqs);
qlist_add(ent, &s->reqs);
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
q->rem += q->req.packet_size;
}
else if (rc->rtn == 1){
// put back to *front* of list. We know it's the front because it was
// in the front when it was deleted
qlist_add(qlist_pop_back(&s->free_reqs), &s->reqs);
}
else {
assert(0);
}
}
}
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
/*
* Copyright (C) 2014 University of Chicago.
* See COPYRIGHT notice in top-level directory.
*
*/
#ifndef MODEL_NET_SCHED_IMPL_H
#define MODEL_NET_SCHED_IMPL_H
#include "codes/model-net-sched.h"
#include "codes/model-net-method.h"
extern model_net_sched_interface * sched_interfaces[];
#endif /* end of include guard: MODEL-NET-SCHED-IMPL_H */
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/
...@@ -9,79 +9,18 @@ ...@@ -9,79 +9,18 @@
#include <assert.h> #include <assert.h>
#include <ross.h> #include <ross.h>
#include "model-net-sched-impl.h"
#include "codes/model-net-sched.h" #include "codes/model-net-sched.h"
#include "codes/model-net-lp.h" #include "codes/model-net-lp.h"
#include "codes/quicklist.h" #include "codes/quicklist.h"
enum sched_type mn_sched_type = -1; enum sched_type mn_sched_type = -1;
/// scheduler-specific data structures (TODO: split specific schedulers into #define X(a,b,c) b,
/// their own files if we move beyond just these two) char * sched_names [] = {
/// NOTE: for now, scheduler data structures are the same - this may change in SCHEDULER_TYPES
/// later versions };
#undef X
typedef struct mn_sched {
// method containing packet event to call
struct model_net_method *method;
struct qlist_head reqs; // of type mn_sched_qitem
// this is an unfortunate result - we have to basically not free anything
// in order to keep around the remote and local events
// we desperately need GVT hooks to run our own garbage collection
struct qlist_head free_reqs;
} mn_sched;
// at the moment, rr and fcfs only differ in how req queue is modified, queue
// items themselves are equivalent
typedef struct mn_sched_qitem {
model_net_request req;
// remaining bytes to send
uint64_t rem;
// pointers to event structures
// sizes are given in the request struct
void * remote_event;
void * local_event;
struct qlist_head ql;
} mn_sched_qitem;
/// scheduler-specific function decls and function table
// void used to avoid ptr-to-ptr conv warnings
static void fcfs_init (struct model_net_method *method, void ** sched);
static void fcfs_destroy (void *sched);
static void fcfs_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp);
static void fcfs_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp);
static int fcfs_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp);
static void fcfs_next_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp);
static model_net_sched_interface fcfs_tab =
{ &fcfs_init, &fcfs_destroy, &fcfs_add, &fcfs_add_rc, &fcfs_next, &fcfs_next_rc};
static void rr_init (struct model_net_method *method, void ** sched);
static void rr_destroy (void *sched);
static void rr_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp);
static void rr_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp);
static int rr_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp);
static void rr_next_rc (void *sched, model_net_sched_rc *rc, tw_lp *lp);
static model_net_sched_interface rr_tab =
{ &rr_init, &rr_destroy, &rr_add, &rr_add_rc, &rr_next, &rr_next_rc};
/// general scheduler functions /// general scheduler functions
...@@ -89,18 +28,14 @@ void model_net_sched_init( ...@@ -89,18 +28,14 @@ void model_net_sched_init(
enum sched_type type, enum sched_type type,
struct model_net_method *method, struct model_net_method *method,
model_net_sched *sched){ model_net_sched *sched){
sched->type = type; if (type >= MAX_SCHEDS){
switch (type){ fprintf(stderr, "unknown scheduler type");
case MN_SCHED_FCFS: abort();
sched->impl = &fcfs_tab;
break;
case MN_SCHED_RR:
sched->impl = &rr_tab;
break;
default:
fprintf(stderr, "unknown scheduler type");
abort();
} }
else{
sched->impl = sched_interfaces[type];
}
sched->type = type;
sched->impl->init(method, &sched->dat); sched->impl->init(method, &sched->dat);
} }
...@@ -139,234 +74,6 @@ void model_net_sched_add_rc( ...@@ -139,234 +74,6 @@ void model_net_sched_add_rc(
sched->impl->add_rc(sched->dat, sched_rc, lp); sched->impl->add_rc(sched->dat, sched_rc, lp);
} }
/// specific scheduler implementations
void fcfs_init(struct model_net_method *method, void ** sched){
*sched = malloc(sizeof(mn_sched));
mn_sched *ss = *sched;
ss->method = method;
INIT_QLIST_HEAD(&ss->reqs);
INIT_QLIST_HEAD(&ss->free_reqs);
}
void fcfs_destroy(void *sched){
free(sched);
}
void fcfs_add (
model_net_request *req,
int remote_event_size,
void * remote_event,
int local_event_size,
void * local_event,
void *sched,
model_net_sched_rc *rc,
tw_lp *lp){
// NOTE: in optimistic mode, we currently do not have a good way to
// reliably free and re-initialize the q item and the local/remote events
// when processing next/next_rc events. Hence, the memory leaks. Later on
// we'll figure out a better way to handle this.
mn_sched_qitem *q = malloc(sizeof(mn_sched_qitem));
assert(q);
memset(q, 0, sizeof(*q));
q->req = *req;
q->rem = req->is_pull ? PULL_MSG_SIZE : req->msg_size;
if (remote_event_size > 0){
q->remote_event = malloc(remote_event_size);
memcpy(q->remote_event, remote_event, remote_event_size);
}
if (local_event_size > 0){
q->local_event = malloc(local_event_size);
memcpy(q->local_event, local_event, local_event_size);
}
mn_sched *s = sched;
qlist_add_tail(&q->ql, &s->reqs);
}
void fcfs_add_rc(void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
struct qlist_head *ent = qlist_pop_back(&s->reqs);
assert(ent != NULL);
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
if (q->remote_event) free(q->remote_event);
if (q->local_event) free(q->local_event);
free(q);
}
int fcfs_next(tw_stime *poffset, void *sched, model_net_sched_rc *rc, tw_lp *lp){
mn_sched *s = sched;
struct qlist_head *ent = s->reqs.next;
if (ent == &s->reqs){
rc->rtn = -1;
return -1;
}
mn_sched_qitem *q = qlist_entry(ent, mn_sched_qitem, ql);
// issue the next packet