margo.c 12.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11

/*
 * (C) 2015 The University of Chicago
 * 
 * See COPYRIGHT in top-level directory.
 */

#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <abt.h>
12
#include <abt-snoozer.h>
13
#include <time.h>
Philip Carns's avatar
bug fix  
Philip Carns committed
14
#include <math.h>
15 16

#include "margo.h"
17
#include "margo-timer.h"
Philip Carns's avatar
Philip Carns committed
18
#include "utlist.h"
19

20 21 22 23 24
/* TODO: including core.h for cancel definition, presumably this will be 
 * available in top level later?
 */
#include <mercury_core.h>

25 26
struct margo_instance
{
27
    /* provided by caller */
28 29
    hg_context_t *hg_context;
    hg_class_t *hg_class;
30 31 32
    ABT_pool handler_pool;
    ABT_pool progress_pool;

33
    /* internal to margo for this particular instance */
34 35
    ABT_thread hg_progress_tid;
    int hg_progress_shutdown_flag;
36 37 38 39 40 41 42

    /* control logic for callers waiting on margo to be finalized */
    int finalize_flag;
    int finalize_waiters_in_progress_pool;
    ABT_mutex finalize_mutex;
    ABT_cond finalize_cond;

43 44 45 46 47 48 49 50 51 52 53 54
    int table_index;
};

struct margo_handler_mapping
{
    hg_class_t *class;
    margo_instance_id mid;
};

#define MAX_HANDLER_MAPPING 8
static int handler_mapping_table_size = 0;
static struct margo_handler_mapping handler_mapping_table[MAX_HANDLER_MAPPING] = {0};
55

56
static void hg_progress_fn(void* foo);
57 58 59 60 61 62 63 64 65


struct handler_entry
{
    void* fn;
    hg_handle_t handle;
    struct handler_entry *next; 
};

66 67
margo_instance_id margo_init(ABT_pool progress_pool, ABT_pool handler_pool,
    hg_context_t *hg_context, hg_class_t *hg_class)
68 69
{
    int ret;
70 71 72
    struct margo_instance *mid;

    if(handler_mapping_table_size >= MAX_HANDLER_MAPPING)
73
        return(MARGO_INSTANCE_NULL);
74 75 76

    mid = malloc(sizeof(*mid));
    if(!mid)
77
        return(MARGO_INSTANCE_NULL);
78
    memset(mid, 0, sizeof(*mid));
79

80 81 82
    ABT_mutex_create(&mid->finalize_mutex);
    ABT_cond_create(&mid->finalize_cond);

83 84
    mid->progress_pool = progress_pool;
    mid->handler_pool = handler_pool;
85 86
    mid->hg_class = hg_class;
    mid->hg_context = hg_context;
87

88 89 90 91 92 93 94 95
    ret = margo_timer_instance_init(mid);
    if(ret != 0)
    {
        fprintf(stderr, "Error: margo_timer_instance_init()\n");
        free(mid);
        return(MARGO_INSTANCE_NULL);
    }

96
    ret = ABT_thread_create(mid->progress_pool, hg_progress_fn, mid, 
97
        ABT_THREAD_ATTR_NULL, &mid->hg_progress_tid);
98 99 100
    if(ret != 0)
    {
        fprintf(stderr, "Error: ABT_thread_create()\n");
101
        free(mid);
102
        return(MARGO_INSTANCE_NULL);
103 104
    }

105 106 107 108 109 110
    handler_mapping_table[handler_mapping_table_size].mid = mid;
    handler_mapping_table[handler_mapping_table_size].class = mid->hg_class;
    mid->table_index = handler_mapping_table_size;
    handler_mapping_table_size++;

    return mid;
111 112
}

113
void margo_finalize(margo_instance_id mid)
114
{
115 116
    int i;

117
    /* tell progress thread to wrap things up */
118
    mid->hg_progress_shutdown_flag = 1;
119 120

    /* wait for it to shutdown cleanly */
121 122
    ABT_thread_join(mid->hg_progress_tid);
    ABT_thread_free(&mid->hg_progress_tid);
123

124 125 126 127 128
    for(i=mid->table_index; i<(handler_mapping_table_size-1); i++)
    {
        handler_mapping_table[i] = handler_mapping_table[i+1];
    }
    handler_mapping_table_size--;
129

130 131 132 133 134 135 136 137 138 139 140 141 142
    ABT_mutex_lock(mid->finalize_mutex);
    mid->finalize_flag = 1;
    ABT_cond_broadcast(mid->finalize_cond);
    ABT_mutex_unlock(mid->finalize_mutex);

    /* TODO: yuck, there is a race here if someone was really waiting for
     * finalize; we can't destroy the data structures out from under them.
     * We could fix this by reference counting so that the last caller
     * (whether a finalize() caller or wait_for_finalize() caller) knows it
     * is safe to turn off the lights on their way out.  For now we just leak 
     * a small amount of memory.
     */
#if 0
143
    margo_timer_instance_finalize(mid);
144

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    ABT_mutex_free(&mid->finalize_mutex);
    ABT_cond_free(&mid->finalize_cond);
    free(mid);
#endif

    return;
}

void margo_wait_for_finalize(margo_instance_id mid)
{
    ABT_xstream xstream;
    ABT_pool pool;
    int ret;
    int in_pool = 0;

    ret = ABT_xstream_self(&xstream);
    if(ret != 0)
        return;
    ret = ABT_xstream_get_main_pools(xstream, 1, &pool);
    if(ret != 0)
        return;

    /* Is this waiter in the same pool as the pool running the progress
     * thread?
     */
    if(pool == mid->progress_pool)
        in_pool = 1;

    ABT_mutex_lock(mid->finalize_mutex);

        mid->finalize_waiters_in_progress_pool += in_pool;
            
        while(!mid->finalize_flag)
            ABT_cond_wait(mid->finalize_cond, mid->finalize_mutex);

    ABT_mutex_unlock(mid->finalize_mutex);
    
182 183 184 185
    return;
}

/* dedicated thread function to drive Mercury progress */
186
static void hg_progress_fn(void* foo)
187 188 189
{
    int ret;
    unsigned int actual_count;
190
    struct margo_instance *mid = (struct margo_instance *)foo;
191
    size_t size;
192

193
    while(!mid->hg_progress_shutdown_flag)
194 195
    {
        do {
196
            ret = HG_Trigger(mid->hg_context, 0, 1, &actual_count);
197
        } while((ret == HG_SUCCESS) && actual_count && !mid->hg_progress_shutdown_flag);
198

199
        if(!mid->hg_progress_shutdown_flag)
200
        {
201
            ABT_pool_get_total_size(mid->progress_pool, &size);
202 203 204 205 206 207
            /* Are there any other threads executing in this pool that are *not*
             * blocked on margo_wait_for_finalize()?  If so then, we can't
             * sleep here or else those threads will not get a chance to
             * execute.
             */
            if(size > mid->finalize_waiters_in_progress_pool)
208
            {
209
                HG_Progress(mid->hg_context, 0);
210 211 212 213
                ABT_thread_yield();
            }
            else
            {
214
                printf("sleep\n");
215
                HG_Progress(mid->hg_context, 100);
216 217
            }
        }
218

219
        /* check for any expired timers */
220
        margo_check_timers(mid);
221 222
    }

223
    return;
224 225
}

226
ABT_pool* margo_get_handler_pool(margo_instance_id mid)
227
{
228
    return(&mid->handler_pool);
229 230
}

231 232 233 234 235 236 237 238 239 240 241
hg_context_t* margo_get_context(margo_instance_id mid)
{
    return(mid->hg_context);
}

hg_class_t* margo_get_class(margo_instance_id mid)
{
    return(mid->hg_class);
}


Jonathan Jenkins's avatar
Jonathan Jenkins committed
242
static hg_return_t margo_cb(const struct hg_cb_info *info)
243 244 245 246 247 248 249 250 251 252
{
    hg_return_t hret = info->ret;

    ABT_eventual *eventual = info->arg;
    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &hret, sizeof(hret));
    
    return(HG_SUCCESS);
}

253 254 255
typedef struct
{
    hg_handle_t handle;
Shane Snyder's avatar
Shane Snyder committed
256
} margo_forward_timeout_cb_dat;
257 258 259

static void margo_forward_timeout_cb(void *arg)
{
Shane Snyder's avatar
Shane Snyder committed
260 261
    margo_forward_timeout_cb_dat *timeout_cb_dat =
        (margo_forward_timeout_cb_dat *)arg;
262 263

    /* cancel the Mercury op if the forward timed out */
Shane Snyder's avatar
Shane Snyder committed
264
    HG_Core_cancel(timeout_cb_dat->handle);
265 266 267
    return;
}

268 269 270 271 272 273
hg_return_t margo_forward_timed(
    margo_instance_id mid,
    hg_handle_t handle,
    void *in_struct,
    double timeout_ms)
{
Shane Snyder's avatar
Shane Snyder committed
274
    int ret;
275 276 277
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    hg_return_t* waited_hret;
Shane Snyder's avatar
Shane Snyder committed
278 279
    margo_timer_t forward_timer;
    margo_forward_timeout_cb_dat timeout_cb_dat;
280 281 282 283 284 285 286

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Shane Snyder's avatar
Shane Snyder committed
287 288
    /* set a timer object to expire when this forward times out */
    timeout_cb_dat.handle = handle;
289
    margo_timer_init(mid, &forward_timer, margo_forward_timeout_cb,
Shane Snyder's avatar
Shane Snyder committed
290
        &timeout_cb_dat, timeout_ms);
291 292 293 294 295 296 297 298

    hret = HG_Forward(handle, margo_cb, &eventual, in_struct);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

299 300
    /* remove timer if it is still in place (i.e., not timed out) */
    if(hret != HG_TIMEOUT)
301
        margo_timer_destroy(mid, &forward_timer);
302 303 304 305

    ABT_eventual_free(&eventual);

    return(hret);
306 307 308 309

}


310
hg_return_t margo_forward(
311
    margo_instance_id mid,
312 313 314 315 316 317 318 319 320 321 322 323 324 325
    hg_handle_t handle,
    void *in_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

Jonathan Jenkins's avatar
Jonathan Jenkins committed
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
    hret = HG_Forward(handle, margo_cb, &eventual, in_struct);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

hg_return_t margo_respond(
    margo_instance_id mid,
    hg_handle_t handle,
    void *out_struct)
{
    hg_return_t hret = HG_TIMEOUT;
    ABT_eventual eventual;
    int ret;
    hg_return_t* waited_hret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);
    }

    hret = HG_Respond(handle, margo_cb, &eventual, out_struct);
355 356 357 358 359 360 361 362 363 364 365
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

366

367
static hg_return_t margo_bulk_transfer_cb(const struct hg_cb_info *info)
368
{
369 370
    hg_return_t hret = info->ret;
    ABT_eventual *eventual = info->arg;
371 372 373 374 375 376 377

    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &hret, sizeof(hret));
    
    return(HG_SUCCESS);
}

Philip Carns's avatar
Philip Carns committed
378 379
struct lookup_cb_evt
{
380 381
    hg_return_t nret;
    hg_addr_t addr;
Philip Carns's avatar
Philip Carns committed
382 383
};

384
static hg_return_t margo_addr_lookup_cb(const struct hg_cb_info *info)
Philip Carns's avatar
Philip Carns committed
385 386
{
    struct lookup_cb_evt evt;
387 388
    evt.nret = info->ret;
    evt.addr = info->info.lookup.addr;
Philip Carns's avatar
Philip Carns committed
389

390
    ABT_eventual *eventual = info->arg;
Philip Carns's avatar
Philip Carns committed
391 392 393 394

    /* propagate return code out through eventual */
    ABT_eventual_set(*eventual, &evt, sizeof(evt));
    
395
    return(HG_SUCCESS);
Philip Carns's avatar
Philip Carns committed
396 397 398
}


399
hg_return_t margo_addr_lookup(
400
    margo_instance_id mid,
401
    hg_context_t *context,
Philip Carns's avatar
Philip Carns committed
402
    const char   *name,
403
    hg_addr_t    *addr)
404
{
405
    hg_return_t nret;
Philip Carns's avatar
Philip Carns committed
406
    struct lookup_cb_evt *evt;
407 408 409
    ABT_eventual eventual;
    int ret;

Philip Carns's avatar
Philip Carns committed
410
    ret = ABT_eventual_create(sizeof(*evt), &eventual);
411 412 413 414 415
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

416 417
    nret = HG_Addr_lookup(context, margo_addr_lookup_cb,
        &eventual, name, HG_OP_ID_IGNORE);
418 419
    if(nret == 0)
    {
Philip Carns's avatar
Philip Carns committed
420 421 422
        ABT_eventual_wait(eventual, (void**)&evt);
        *addr = evt->addr;
        nret = evt->nret;
423 424 425 426 427 428 429
    }

    ABT_eventual_free(&eventual);

    return(nret);
}

430
hg_return_t margo_bulk_transfer(
431
    margo_instance_id mid,
432
    hg_context_t *context,
433
    hg_bulk_op_t op,
434
    hg_addr_t origin_addr,
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
    hg_bulk_t origin_handle,
    size_t origin_offset,
    hg_bulk_t local_handle,
    size_t local_offset,
    size_t size)
{
    hg_return_t hret = HG_TIMEOUT;
    hg_return_t *waited_hret;
    ABT_eventual eventual;
    int ret;

    ret = ABT_eventual_create(sizeof(hret), &eventual);
    if(ret != 0)
    {
        return(HG_NOMEM_ERROR);        
    }

    hret = HG_Bulk_transfer(context, margo_bulk_transfer_cb, &eventual, op, 
        origin_addr, origin_handle, origin_offset, local_handle, local_offset,
        size, HG_OP_ID_IGNORE);
    if(hret == 0)
    {
        ABT_eventual_wait(eventual, (void**)&waited_hret);
        hret = *waited_hret;
    }

    ABT_eventual_free(&eventual);

    return(hret);
}

466 467 468 469
typedef struct
{
    ABT_mutex mutex;
    ABT_cond cond;
470
    int is_asleep;
471 472 473 474 475 476 477 478 479
} margo_thread_sleep_cb_dat;

static void margo_thread_sleep_cb(void *arg)
{
    margo_thread_sleep_cb_dat *sleep_cb_dat =
        (margo_thread_sleep_cb_dat *)arg;

    /* wake up the sleeping thread */
    ABT_mutex_lock(sleep_cb_dat->mutex);
480
    sleep_cb_dat->is_asleep = 0;
481 482 483 484 485 486 487
    ABT_cond_signal(sleep_cb_dat->cond);
    ABT_mutex_unlock(sleep_cb_dat->mutex);

    return;
}

void margo_thread_sleep(
488
    margo_instance_id mid,
489 490 491 492 493 494 495 496
    double timeout_ms)
{
    margo_timer_t sleep_timer;
    margo_thread_sleep_cb_dat sleep_cb_dat;

    /* set data needed for sleep callback */
    ABT_mutex_create(&(sleep_cb_dat.mutex));
    ABT_cond_create(&(sleep_cb_dat.cond));
497
    sleep_cb_dat.is_asleep = 1;
498 499

    /* initialize the sleep timer */
500
    margo_timer_init(mid, &sleep_timer, margo_thread_sleep_cb,
501 502 503 504
        &sleep_cb_dat, timeout_ms);

    /* yield thread for specified timeout */
    ABT_mutex_lock(sleep_cb_dat.mutex);
505 506
    while(sleep_cb_dat.is_asleep)
        ABT_cond_wait(sleep_cb_dat.cond, sleep_cb_dat.mutex);
507 508 509 510 511 512
    ABT_mutex_unlock(sleep_cb_dat.mutex);

    return;
}


513
margo_instance_id margo_hg_class_to_instance(hg_class_t *cl)
514 515 516 517 518
{
    int i;

    for(i=0; i<handler_mapping_table_size; i++)
    {
519
        if(handler_mapping_table[i].class == cl)
520 521 522 523
            return(handler_mapping_table[i].mid);
    }
    return(NULL);
}