margo.c 12.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
bug fix  
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19

20 21 22 23 24
/* TODO: including core.h for cancel definition, presumably this will be 
 * available in top level later?
 */
#include <mercury_core.h>

25 26
struct margo_instance
{
27
    /* provided by caller */
28 29
    hg_context_t *hg_context;
    hg_class_t *hg_class;
30 31 32
    ABT_pool handler_pool;
    ABT_pool progress_pool;

33
    /* internal to margo for this particular instance */
34 35
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
36 37 38 39 40 41 42

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
    int finalize_waiters_in_progress_pool;
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

43 44 45 46 47 48 49 50 51 52 53 54
    int table_index;
};

struct margo_handler_mapping
{
    hg_class_t *class;
    margo_instance_id mid;
};

#define MAX_HANDLER_MAPPING 8
static int handler_mapping_table_size = 0;
static struct margo_handler_mapping handler_mapping_table[MAX_HANDLER_MAPPING] = {0};
55

56
static void hg_progress_fn(void* foo);
57 58 59 60 61 62 63 64 65


struct handler_entry
{
    void* fn;
    hg_handle_t handle;
    struct handler_entry *next; 
};

66 67
margo_instance_id margo_init(ABT_pool progress_pool, ABT_pool handler_pool,
    hg_context_t *hg_context, hg_class_t *hg_class)
68 69
{
    int ret;
70 71 72
    struct margo_instance *mid;

    if(handler_mapping_table_size >= MAX_HANDLER_MAPPING)
73
        return(MARGO_INSTANCE_NULL);
74 75 76

    mid = malloc(sizeof(*mid));
    if(!mid)
77
        return(MARGO_INSTANCE_NULL);
78
    memset(mid, 0, sizeof(*mid));
79

80 81 82
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

83 84
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
85 86
    mid->hg_class = hg_class;
    mid->hg_context = hg_context;
87

88 89 90 91 92 93 94 95
    ret = margo_timer_instance_init(mid);
    if(ret != 0)
    {
        fprintf(stderr, "Error: margo_timer_instance_init()\n");
        free(mid);
        return(MARGO_INSTANCE_NULL);
    }

96
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
97
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
98 99 100
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_thread_create()\n");
101
        free(mid);
102
        return(MARGO_INSTANCE_NULL);
103 104
    }

105 106 107 108 109 110
    handler_mapping_table[handler_mapping_table_size].mid = mid;
    handler_mapping_table[handler_mapping_table_size].class = mid->hg_class;
    mid->table_index = handler_mapping_table_size;
    handler_mapping_table_size++;

    return mid;
111 112
}

113
void margo_finalize(margo_instance_id mid)
114
{
115 116
    int i;

117
    /* tell progress thread to wrap things up */
118
    mid->hg_progress_shutdown_flag = 1;
119 120

    /* wait for it to shutdown cleanly */
121 122
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
123

124 125 126 127 128
    for(i=mid->table_index; i<(handler_mapping_table_size-1); i++)
    {
        handler_mapping_table[i] = handler_mapping_table[i+1];
    }
    handler_mapping_table_size--;
129

130 131 132 133 134 135 136 137 138 139 140 141 142
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);
    ABT_mutex_unlock(mid->finalize_mutex);

    /* TODO: yuck, there is a race here if someone was really waiting for
     * finalize; we can't destroy the data structures out from under them.
     * We could fix this by reference counting so that the last caller
     * (whether a finalize() caller or wait_for_finalize() caller) knows it
     * is safe to turn off the lights on their way out.  For now we just leak 
     * a small amount of memory.
     */
#if 0
143
    margo_timer_instance_finalize(mid);
144

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);
    free(mid);
#endif

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    ABT_xstream xstream;
    ABT_pool pool;
    int ret;
    int in_pool = 0;

    ret = ABT_xstream_self(&xstream);
    if(ret != 0)
        return;
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    if(ret != 0)
        return;

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
    if(pool == mid->progress_pool)
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

        mid->finalize_waiters_in_progress_pool += in_pool;
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

    ABT_mutex_unlock(mid->finalize_mutex);
    
182 183 184 185
    return;
}

/* dedicated thread function to drive Mercury progress */
186
static void hg_progress_fn(void* foo)
187 188 189
{
    int ret;
    unsigned int actual_count;
190
    struct margo_instance *mid = (struct margo_instance *)foo;
191
    size_t size;
192

193
    while(!mid->hg_progress_shutdown_flag)
194 195
    {
        do {
196
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
197
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
198

199
        if(!mid->hg_progress_shutdown_flag)
200
        {
201 202
            ABT_mutex_lock(mid->finalize_mutex);

203
            ABT_pool_get_total_size(mid->progress_pool, &size);
204 205 206 207 208 209
            /* Are there any other threads executing in this pool that are *not*
             * blocked on margo_wait_for_finalize()?  If so then, we can't
             * sleep here or else those threads will not get a chance to
             * execute.
             */
            if(size > mid->finalize_waiters_in_progress_pool)
210
            {
211
                ABT_mutex_unlock(mid->finalize_mutex);
212
                HG_Progress(mid->hg_context, 0);
213 214 215 216
                ABT_thread_yield();
            }
            else
            {
217
                ABT_mutex_unlock(mid->finalize_mutex);
218
                HG_Progress(mid->hg_context, 100);
219 220
            }
        }
221

222
        /* check for any expired timers */
223
        margo_check_timers(mid);
224 225
    }

226
    return;
227 228
}

229
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
230
{
231
    return(&mid->handler_pool);
232 233
}

234 235 236 237 238 239 240 241 242 243 244
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}


Jonathan Jenkins's avatar
Jonathan Jenkins committed
245
static hg_return_t margo_cb(const struct hg_cb_info *info)
246 247 248 249 250 251 252 253 254 255
{
    hg_return_t hret = info->ret;

    ABT_eventual *eventual = info->arg;
    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &hret, sizeof(hret));
    
    return(HG_SUCCESS);
}

256 257 258
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
259
} margo_forward_timeout_cb_dat;
260 261 262

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
263 264
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
265 266

    /* cancel the Mercury op if the forward timed out */
Shane Snyder's avatar
Shane Snyder committed
267
    HG_Core_cancel(timeout_cb_dat->handle);
268 269 270
    return;
}

271 272 273 274 275 276
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
277
    int ret;
278 279 280
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
281 282
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
283 284 285 286 287 288 289

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
290 291
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
292
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
293
        &timeout_cb_dat, timeout_ms);
294 295 296 297 298 299 300 301

    hret = HG_Forward(handle, margo_cb, &eventual, in_struct);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

302 303
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
304
        margo_timer_destroy(mid, &forward_timer);
305 306 307 308

    ABT_eventual_free(&eventual);

    return(hret);
309 310 311 312

}


313
hg_return_t margo_forward(
314
    margo_instance_id mid,
315 316 317 318 319 320 321 322 323 324 325 326 327 328
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Jonathan Jenkins's avatar
Jonathan Jenkins committed
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
    hret = HG_Forward(handle, margo_cb, &eventual, in_struct);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

    hret = HG_Respond(handle, margo_cb, &eventual, out_struct);
358 359 360 361 362 363 364 365 366 367 368
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

369

370
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
371
{
372 373
    hg_return_t hret = info->ret;
    ABT_eventual *eventual = info->arg;
374 375 376 377 378 379 380

    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &hret, sizeof(hret));
    
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
381 382
struct lookup_cb_evt
{
383 384
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
385 386
};

387
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
388 389
{
    struct lookup_cb_evt evt;
390 391
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
Philip Carns's avatar
Philip Carns committed
392

393
    ABT_eventual *eventual = info->arg;
Philip Carns's avatar
Philip Carns committed
394 395 396 397

    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &evt, sizeof(evt));
    
398
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
399 400 401
}


402
hg_return_t margo_addr_lookup(
403
    margo_instance_id mid,
404
    hg_context_t *context,
Philip Carns's avatar
Philip Carns committed
405
    const char   *name,
406
    hg_addr_t    *addr)
407
{
408
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
409
    struct lookup_cb_evt *evt;
410 411 412
    ABT_eventual eventual;
    int ret;

Philip Carns's avatar
Philip Carns committed
413
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
414 415 416 417 418
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

419 420
    nret = HG_Addr_lookup(context, margo_addr_lookup_cb,
        &eventual, name, HG_OP_ID_IGNORE);
421 422
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
423 424 425
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
426 427 428 429 430 431 432
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

433
hg_return_t margo_bulk_transfer(
434
    margo_instance_id mid,
435
    hg_context_t *context,
436
    hg_bulk_op_t op,
437
    hg_addr_t origin_addr,
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

    hret = HG_Bulk_transfer(context, margo_bulk_transfer_cb, &eventual, op, 
        origin_addr, origin_handle, origin_offset, local_handle, local_offset,
        size, HG_OP_ID_IGNORE);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

469 470
typedef struct
{
471
    margo_instance_id mid;
472 473
    ABT_mutex mutex;
    ABT_cond cond;
474
    int is_asleep;
475 476 477 478 479 480 481
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

482 483 484 485 486
    /* decrement number of waiting threads */
    ABT_mutex_lock(sleep_cb_dat->mid->finalize_mutex);
    sleep_cb_dat->mid->finalize_waiters_in_progress_pool--;
    ABT_mutex_unlock(sleep_cb_dat->mid->finalize_mutex);

487 488
    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
489
    sleep_cb_dat->is_asleep = 0;
490 491 492 493 494 495 496
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
497
    margo_instance_id mid,
498 499 500 501 502 503
    double timeout_ms)
{
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

    /* set data needed for sleep callback */
504
    sleep_cb_dat.mid = mid;
505 506
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));
507
    sleep_cb_dat.is_asleep = 1;
508 509

    /* initialize the sleep timer */
510
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
511 512
        &sleep_cb_dat, timeout_ms);

513 514 515 516 517
    /* increment number of waiting threads */
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_waiters_in_progress_pool++;
    ABT_mutex_unlock(mid->finalize_mutex);

518 519
    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
520 521
    while(sleep_cb_dat.is_asleep)
        ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
522 523 524 525 526 527
    ABT_mutex_unlock(sleep_cb_dat.mutex);

    return;
}


528
margo_instance_id margo_hg_class_to_instance(hg_class_t *cl)
529 530 531 532 533
{
    int i;

    for(i=0; i<handler_mapping_table_size; i++)
    {
534
        if(handler_mapping_table[i].class == cl)
535 536 537 538
            return(handler_mapping_table[i].mid);
    }
    return(NULL);
}