stream_add_omp_mt.c 3.99 KB
Newer Older
Swann Perarnau's avatar
Swann Perarnau committed
1 2 3 4 5 6 7 8 9 10
/*******************************************************************************
 * Copyright 2019 UChicago Argonne, LLC.
 * (c.f. AUTHORS, LICENSE)
 *
 * This file is part of the AML project.
 * For more info, see https://xgitlab.cels.anl.gov/argo/aml
 *
 * SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/

11
#include "aml.h"
12
#include "aml/dma/linux-par.h"
13 14 15 16 17 18 19
#include <assert.h>
#include <errno.h>
#include <omp.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>

20 21
#include "utils.h"

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
#define ITER 10
#define CHUNKING 4

size_t numthreads, tilesz, esz;
unsigned long *a, *b, *c;
AML_TILING_1D_DECL(tiling);
AML_AREA_LINUX_DECL(slow);
AML_AREA_LINUX_DECL(fast);
AML_SCRATCH_SEQ_DECL(sa);
AML_SCRATCH_SEQ_DECL(sb);

int kernel(unsigned long *a, unsigned long *b, unsigned long *c, size_t n)
{
	#pragma omp parallel for
	for(size_t i = 0; i < n; i++)
		c[i] = a[i] + b[i];
	return 0;
}

int main(int argc, char *argv[])
{
43 44
	AML_ARENA_JEMALLOC_DECL(arns);
	AML_ARENA_JEMALLOC_DECL(arnf);
45
	AML_DMA_LINUX_PAR_DECL(dma);
46
	struct bitmask *slowb, *fastb;
47
	aml_init(&argc, &argv);
48 49 50 51 52 53
	assert(argc == 4);

	log_init(argv[0]);
	fastb = numa_parse_nodestring_all(argv[1]);
	slowb = numa_parse_nodestring_all(argv[2]);
	unsigned long memsize = 1UL << atoi(argv[3]);
54 55 56 57 58 59 60

	/* use openmp env to figure out how many threads we want
	 * (we actually use 3x as much)
	 */
	#pragma omp parallel
	{
		numthreads = omp_get_num_threads();
61
		tilesz = memsize/(numthreads*CHUNKING);
62 63 64 65
		esz = tilesz/sizeof(unsigned long);
	}

	/* initialize all the supporting struct */
66
	assert(!aml_tiling_init(&tiling, AML_TILING_TYPE_1D, tilesz, memsize));
67

68
	assert(!aml_arena_jemalloc_init(&arns, AML_ARENA_JEMALLOC_TYPE_REGULAR));
69 70 71 72
	assert(!aml_area_linux_init(&slow,
				    AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
				    AML_AREA_LINUX_MBIND_TYPE_REGULAR,
				    AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
73 74
				    &arns, MPOL_BIND, slowb->maskp));
	assert(!aml_arena_jemalloc_init(&arnf, AML_ARENA_JEMALLOC_TYPE_REGULAR));
75 76 77 78
	assert(!aml_area_linux_init(&fast,
				    AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
				    AML_AREA_LINUX_MBIND_TYPE_REGULAR,
				    AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
79
				    &arnf, MPOL_BIND, fastb->maskp));
80 81
	assert(!aml_dma_linux_par_init(&dma, numthreads*2, numthreads));
	assert(!aml_scratch_seq_init(&sa, &fast, &slow, &dma, &tiling,
82
				     (size_t)2*numthreads, (size_t)1));
83
	assert(!aml_scratch_seq_init(&sb, &fast, &slow, &dma, &tiling,
84
				     (size_t)2*numthreads, (size_t)1));
85 86

	/* allocation */
87 88 89
	a = aml_area_malloc(&slow, memsize);
	b = aml_area_malloc(&slow, memsize);
	c = aml_area_malloc(&fast, memsize);
90 91
	assert(a != NULL && b != NULL && c != NULL);

92
	unsigned long esize = memsize/sizeof(unsigned long);
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	for(unsigned long i = 0; i < esize; i++) {
		a[i] = i;
		b[i] = esize - i;
		c[i] = 0;
	}

	/* run kernel */
	int i, ai, bi, oldai, oldbi;
	unsigned long *ap, *bp;
	void *abaseptr, *bbaseptr;
	ap = aml_tiling_tilestart(&tiling, a, 0);
	bp = aml_tiling_tilestart(&tiling, b, 0);
	abaseptr = aml_scratch_baseptr(&sa);
	bbaseptr = aml_scratch_baseptr(&sb);
	ai = -1; bi = -1;
108
	for(i = 0; i < (memsize/tilesz) -1; i++) {
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
		struct aml_scratch_request *ar, *br;
		oldai = ai; oldbi = bi;
		aml_scratch_async_pull(&sa, &ar, abaseptr, &ai, a, i+1);
		aml_scratch_async_pull(&sb, &br, bbaseptr, &bi, b, i+1);
		kernel(ap, bp, &c[i*esz], esz);
		aml_scratch_wait(&sa, ar);
		aml_scratch_wait(&sb, br);
		ap = aml_tiling_tilestart(&tiling, abaseptr, ai);
		bp = aml_tiling_tilestart(&tiling, bbaseptr, bi);
		aml_scratch_release(&sa, oldai);
		aml_scratch_release(&sb, oldbi);
	}
	kernel(ap, bp, &c[i*esz], esz);

	/* validate */
	for(unsigned long i = 0; i < esize; i++) {
		assert(c[i] == esize);
	}

	aml_scratch_seq_destroy(&sa);
	aml_scratch_seq_destroy(&sb);
	aml_dma_linux_par_destroy(&dma);
	aml_area_free(&slow, a);
	aml_area_free(&slow, b);
	aml_area_free(&slow, c);
	aml_area_linux_destroy(&slow);
	aml_area_linux_destroy(&fast);
	aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
	aml_finalize();
	return 0;
}