modelnet-simplep2p-test.c 9.57 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
/*
 * Copyright (C) 2013 University of Chicago.
 * See COPYRIGHT notice in top-level directory.
 *
 */

/* SUMMARY:
 *
 * This is a test harness for the modelnet module.  It sets up a number of
 * servers, each of which is paired up with a simplenet LP to serve as the
 * NIC.  Each server exchanges a sequence of requests and acks with one peer
 * and measures the throughput in terms of payload bytes (ack size) moved
 * per second.
 */

#include <string.h>
#include <assert.h>
#include <ross.h>

#include "codes/model-net.h"
#include "codes/lp-io.h"
#include "codes/codes.h"
#include "codes/codes_mapping.h"
#include "codes/configuration.h"
#include "codes/lp-type-lookup.h"

#define NUM_REQS 3  /* number of requests sent by each server */
#define PAYLOAD_SZ 2048 /* size of simulated data payload, bytes  */

static int net_id = 0;
static int num_servers = 0;

typedef struct svr_msg svr_msg;
typedef struct svr_state svr_state;

/* types of events that will constitute triton requests */
enum svr_event
{
    KICKOFF,    /* initial event */
    REQ,        /* request event */
    ACK,        /* ack event */
    LOCAL      /* local event */
};

struct svr_state
{
    int msg_sent_count;   /* requests sent */
    int msg_recvd_count;  /* requests recvd */
    int local_recvd_count; /* number of local messages received */
    tw_stime start_ts;    /* time that we started sending requests */
};

struct svr_msg
{
    enum svr_event svr_event_type;
56
//    enum net_event net_event_type;
57 58
    tw_lpid src;          /* source of this request or ack */

59 60
    model_net_event_return ret;

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
    int incremented_flag; /* helper for reverse computation */
};

static void svr_init(
    svr_state * ns,
    tw_lp * lp);
static void svr_event(
    svr_state * ns,
    tw_bf * b,
    svr_msg * m,
    tw_lp * lp);
static void svr_rev_event(
    svr_state * ns,
    tw_bf * b,
    svr_msg * m,
    tw_lp * lp);
static void svr_finalize(
    svr_state * ns,
    tw_lp * lp);

tw_lptype svr_lp = {
82 83 84 85
    (init_f) svr_init,
    (pre_run_f) NULL,
    (event_f) svr_event,
    (revent_f) svr_rev_event,
86 87
    (commit_f) NULL,
    (final_f)  svr_finalize,
88 89
    (map_f) codes_mapping,
    sizeof(svr_state),
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
};

extern const tw_lptype* svr_get_lp_type();
static void svr_add_lp_type();
static tw_stime ns_to_s(tw_stime ns);
static tw_stime s_to_ns(tw_stime ns);
static void handle_kickoff_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp);
static void handle_ack_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp);
static void handle_req_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp);
108 109
static void handle_local_event(svr_state * ns);
static void handle_local_rev_event(svr_state * ns);
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static void handle_kickoff_rev_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp);
static void handle_ack_rev_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp);
static void handle_req_rev_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp);

const tw_optdef app_opt [] =
{
	TWOPT_GROUP("Model net test case" ),
	TWOPT_END()
};

int main(
    int argc,
    char **argv)
{
    int nprocs;
    int rank;
135
    int num_nets, *net_ids;
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    //printf("\n Config count %d ",(int) config.lpgroups_count);
    g_tw_ts_end = s_to_ns(60*60*24*365); /* one year, in nsecs */
    lp_io_handle handle;

    tw_opt_add(app_opt);
    tw_init(&argc, &argv);

    if(argc < 2)
    {
	    printf("\n Usage: mpirun <args> --sync=2/3 mapping_file_name.conf (optional --nkp) ");
	    MPI_Finalize();
	    return 0;
    }
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
151

152 153
    configuration_load(argv[2], MPI_COMM_WORLD, &config);
    svr_add_lp_type();
154
    model_net_register();
155

156
    codes_mapping_setup();
157 158 159 160 161

    net_ids = model_net_configure(&num_nets);
    assert(num_nets==1);
    net_id = *net_ids;
    free(net_ids);
162

163 164
    num_servers = codes_mapping_get_lp_count("MODELNET_GRP", 0, "server",
            NULL, 1);
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
    assert(num_servers == 3);

    if(lp_io_prepare("modelnet-test", LP_IO_UNIQ_SUFFIX, &handle, MPI_COMM_WORLD) < 0)
    {
        return(-1);
    }

    tw_run();
    model_net_report_stats(net_id);

    if(lp_io_flush(handle, MPI_COMM_WORLD) < 0)
    {
        return(-1);
    }

    tw_end();
    return 0;
}

const tw_lptype* svr_get_lp_type()
{
	    return(&svr_lp);
}

static void svr_add_lp_type()
{
  lp_type_register("server", svr_get_lp_type());
}

static void svr_init(
    svr_state * ns,
    tw_lp * lp)
{
    tw_event *e;
    svr_msg *m;
    tw_stime kickoff_time;
201

202 203 204 205 206 207 208 209
    memset(ns, 0, sizeof(*ns));

    /* each server sends a dummy event to itself that will kick off the real
     * simulation
     */

    //printf("\n Initializing servers %d ", (int)lp->gid);
    /* skew each kickoff event slightly to help avoid event ties later on */
210
    kickoff_time = g_tw_lookahead + tw_rand_unif(lp->rng);
211

212
    e = tw_event_new(lp->gid, kickoff_time, lp);
213 214 215 216 217 218 219 220 221 222 223 224 225
    m = tw_event_data(e);
    m->svr_event_type = KICKOFF;
    tw_event_send(e);

    return;
}

static void svr_event(
    svr_state * ns,
    tw_bf * b,
    svr_msg * m,
    tw_lp * lp)
{
226
    (void)b;
227 228 229
   switch (m->svr_event_type)
    {
        case REQ:
230
            handle_req_event(ns, m, lp);
231 232
            break;
        case ACK:
233
            handle_ack_event(ns, m, lp);
234 235
            break;
        case KICKOFF:
236
            handle_kickoff_event(ns, m, lp);
237 238
            break;
	case LOCAL:
239
	   handle_local_event(ns);
240 241 242 243 244 245 246 247 248 249 250 251 252 253
	 break;
        default:
	    printf("\n Invalid message type %d ", m->svr_event_type);
            assert(0);
        break;
    }
}

static void svr_rev_event(
    svr_state * ns,
    tw_bf * b,
    svr_msg * m,
    tw_lp * lp)
{
254
    (void)b;
255 256 257
    switch (m->svr_event_type)
    {
        case REQ:
258
            handle_req_rev_event(ns, m, lp);
259 260
            break;
        case ACK:
261
            handle_ack_rev_event(ns, m, lp);
262 263
            break;
        case KICKOFF:
264
            handle_kickoff_rev_event(ns, m, lp);
265 266
            break;
	case LOCAL:
267
	    handle_local_rev_event(ns);
268 269 270 271 272 273 274 275 276 277 278 279 280 281
	    break;
        default:
            assert(0);
            break;
    }

    return;
}

static void svr_finalize(
    svr_state * ns,
    tw_lp * lp)
{
    double t = ns_to_s(tw_now(lp) - ns->start_ts);
282
    printf("server %llu recvd %d bytes in %f seconds, %f MiB/s sent_count %d recvd_count %d local_count %d \n", (unsigned long long)lp->gid, PAYLOAD_SZ*ns->msg_recvd_count, t,
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
        ((double)(PAYLOAD_SZ*NUM_REQS)/(double)(1024*1024)/t), ns->msg_sent_count, ns->msg_recvd_count, ns->local_recvd_count);
    return;
}

/* convert ns to seconds */
static tw_stime ns_to_s(tw_stime ns)
{
    return(ns / (1000.0 * 1000.0 * 1000.0));
}

/* convert seconds to ns */
static tw_stime s_to_ns(tw_stime ns)
{
    return(ns * (1000.0 * 1000.0 * 1000.0));
}

/* handle initial event */
static void handle_kickoff_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp)
{
    svr_msg m_local, m_remote;

//    m_local.svr_event_type = REQ;
    m_local.svr_event_type = LOCAL;
    m_local.src = lp->gid;

    memcpy(&m_remote, &m_local, sizeof(svr_msg));
    m_remote.svr_event_type = REQ;
    //printf("handle_kickoff_event(), lp %llu.\n", (unsigned long long)lp->gid);

    /* record when transfers started on this server */
    ns->start_ts = tw_now(lp);

    /* each server sends a request to the next highest server */
    int dest_id;
    switch (lp->gid / 2){
        case 0: dest_id = 4; break;
        case 1: dest_id = 4; break;
323
        case 2: return; /* LP 4 doesn't send messages */
324
    }
325
    m->ret = model_net_event(net_id, "test", dest_id, PAYLOAD_SZ, 0.0, sizeof(svr_msg), &m_remote, sizeof(svr_msg), &m_local, lp);
326 327 328
    ns->msg_sent_count++;
}

329
static void handle_local_event(svr_state * ns)
330 331 332 333
{
    ns->local_recvd_count++;
}

334
static void handle_local_rev_event(svr_state * ns)
335 336 337 338 339 340 341 342 343 344
{
   ns->local_recvd_count--;
}
/* reverse handler for req event */
static void handle_req_rev_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp)
{
    ns->msg_recvd_count--;
345
    model_net_event_rc2(lp, &m->ret);
346 347 348 349 350 351 352 353 354 355 356 357

    return;
}


/* reverse handler for kickoff */
static void handle_kickoff_rev_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp)
{
    ns->msg_sent_count--;
358
    model_net_event_rc2(lp, &m->ret);
359 360 361 362 363 364 365 366 367 368 369 370

    return;
}

/* reverse handler for ack*/
static void handle_ack_rev_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp)
{
    if(m->incremented_flag)
    {
371
        model_net_event_rc2(lp, &m->ret);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
        ns->msg_sent_count--;
    }
    return;
}

/* handle recving ack */
static void handle_ack_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp)
{
    svr_msg m_local;
    svr_msg m_remote;

    m_local.svr_event_type = LOCAL;
    m_local.src = lp->gid;

    memcpy(&m_remote, &m_local, sizeof(svr_msg));
    m_remote.svr_event_type = REQ;

    if(ns->msg_sent_count < NUM_REQS)
    {
394
	m->ret = model_net_event(net_id, "test", m->src, PAYLOAD_SZ, 0.0, sizeof(svr_msg), &m_remote, sizeof(svr_msg), &m_local, lp);
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
        ns->msg_sent_count++;
        m->incremented_flag = 1;
    }
    else
    {
        m->incremented_flag = 0;
    }

    return;
}

/* handle receiving request */
static void handle_req_event(
    svr_state * ns,
    svr_msg * m,
    tw_lp * lp)
{
    svr_msg m_local;
    svr_msg m_remote;

    m_local.svr_event_type = LOCAL;
    m_local.src = lp->gid;

    memcpy(&m_remote, &m_local, sizeof(svr_msg));
    m_remote.svr_event_type = ACK;

    ns->msg_recvd_count++;

423
   // mm Q: What should be the size of an ack message? may be a few bytes? or larger..?
424
    m->ret = model_net_event(net_id, "test", m->src, PAYLOAD_SZ, 0.0, sizeof(svr_msg), &m_remote, sizeof(svr_msg), &m_local, lp);
425 426 427 428 429 430 431 432 433 434
}

/*
 * Local variables:
 *  c-indent-level: 4
 *  c-basic-offset: 4
 * End:
 *
 * vim: ft=c ts=8 sts=4 sw=4 expandtab
 */