Commit 67114902 authored by Swann Perarnau's avatar Swann Perarnau

[refactor] use one arena per area.

Prevents issue with the new arena/alloc logic. This way, no allocation
can end up overlapping the pages of another one on a different binding.
parent 759ec35a
Pipeline #6973 passed with stage
in 4 minutes and 18 seconds
......@@ -12,7 +12,8 @@
int main(int argc, char *argv[])
{
AML_ARENA_JEMALLOC_DECL(arena);
AML_ARENA_JEMALLOC_DECL(arns);
AML_ARENA_JEMALLOC_DECL(arnf);
AML_AREA_LINUX_DECL(slow);
AML_AREA_LINUX_DECL(fast);
struct bitmask *slowb, *fastb;
......@@ -25,17 +26,18 @@ int main(int argc, char *argv[])
long int N = atol(argv[3]);
unsigned long memsize = sizeof(double)*N*N;
assert(!aml_arena_jemalloc_init(&arena, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_arena_jemalloc_init(&arns, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&slow,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, slowb->maskp));
&arns, MPOL_BIND, slowb->maskp));
assert(!aml_arena_jemalloc_init(&arnf, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&fast,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, fastb->maskp));
&arnf, MPOL_BIND, fastb->maskp));
a = aml_area_malloc(&slow, memsize);
b = aml_area_malloc(&slow, memsize);
c = aml_area_malloc(&fast, memsize);
......
......@@ -49,7 +49,8 @@ void do_work()
int main(int argc, char* argv[])
{
AML_ARENA_JEMALLOC_DECL(arena);
AML_ARENA_JEMALLOC_DECL(arns);
AML_ARENA_JEMALLOC_DECL(arnf);
AML_DMA_LINUX_SEQ_DECL(dma);
struct bitmask *slowb, *fastb;
aml_init(&argc, &argv);
......@@ -68,17 +69,19 @@ int main(int argc, char* argv[])
tilesize, memsize, N/T , N/T));
assert(!aml_tiling_init(&tiling_col, AML_TILING_TYPE_2D_CONTIG_COLMAJOR,
tilesize, memsize, N/T , N/T));
assert(!aml_arena_jemalloc_init(&arena, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_arena_jemalloc_init(&arns, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&slow,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, slowb->maskp));
&arns, MPOL_BIND, slowb->maskp));
assert(!aml_arena_jemalloc_init(&arnf, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&fast,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, fastb->maskp));
&arnf, MPOL_BIND, fastb->maskp));
/* allocation */
a = aml_area_malloc(&slow, memsize);
b = aml_area_malloc(&slow, memsize);
......
......@@ -69,7 +69,8 @@ void do_work()
int main(int argc, char* argv[])
{
AML_ARENA_JEMALLOC_DECL(arena);
AML_ARENA_JEMALLOC_DECL(arns);
AML_ARENA_JEMALLOC_DECL(arnf);
AML_DMA_LINUX_SEQ_DECL(dma);
struct bitmask *slowb, *fastb;
aml_init(&argc, &argv);
......@@ -91,17 +92,19 @@ int main(int argc, char* argv[])
/* the prefetch tiling, 1D sequence of columns of tiles */
assert(!aml_tiling_init(&tiling_prefetch, AML_TILING_TYPE_1D,
tilesize*(N/T), memsize));
assert(!aml_arena_jemalloc_init(&arena, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_arena_jemalloc_init(&arns, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&slow,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, slowb->maskp));
&arns, MPOL_BIND, slowb->maskp));
assert(!aml_arena_jemalloc_init(&arnf, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&fast,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, fastb->maskp));
&arnf, MPOL_BIND, fastb->maskp));
assert(!aml_dma_linux_seq_init(&dma, 2));
assert(!aml_scratch_par_init(&sa, &fast, &slow, &dma, &tiling_prefetch, (size_t)2, (size_t)2));
assert(!aml_scratch_par_init(&sb, &fast, &slow, &dma, &tiling_prefetch, (size_t)2, (size_t)2));
......
......@@ -29,8 +29,8 @@ int kernel(unsigned long *a, unsigned long *b, unsigned long *c, size_t n)
int main(int argc, char *argv[])
{
AML_BINDING_SINGLE_DECL(binding);
AML_ARENA_JEMALLOC_DECL(arena);
AML_ARENA_JEMALLOC_DECL(arns);
AML_ARENA_JEMALLOC_DECL(arnf);
AML_DMA_LINUX_PAR_DECL(dma);
unsigned long nodemask[AML_NODEMASK_SZ];
struct bitmask *slowb, *fastb;
......@@ -53,20 +53,20 @@ int main(int argc, char *argv[])
}
/* initialize all the supporting struct */
assert(!aml_binding_init(&binding, AML_BINDING_TYPE_SINGLE, 0));
assert(!aml_tiling_init(&tiling, AML_TILING_TYPE_1D, tilesz, memsize));
assert(!aml_arena_jemalloc_init(&arena, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_arena_jemalloc_init(&arns, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&slow,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, slowb->maskp));
&arns, MPOL_BIND, slowb->maskp));
assert(!aml_arena_jemalloc_init(&arnf, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&fast,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, fastb->maskp));
&arnf, MPOL_BIND, fastb->maskp));
assert(!aml_dma_linux_par_init(&dma, numthreads*2, numthreads));
assert(!aml_scratch_seq_init(&sa, &fast, &slow, &dma, &tiling,
(size_t)2*numthreads, (size_t)1));
......@@ -124,7 +124,6 @@ int main(int argc, char *argv[])
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
aml_binding_destroy(&binding, AML_BINDING_TYPE_SINGLE);
aml_finalize();
return 0;
}
......@@ -59,8 +59,8 @@ void do_work(unsigned long tid)
int main(int argc, char *argv[])
{
AML_BINDING_SINGLE_DECL(binding);
AML_ARENA_JEMALLOC_DECL(arena);
AML_ARENA_JEMALLOC_DECL(arns);
AML_ARENA_JEMALLOC_DECL(arnf);
AML_DMA_LINUX_SEQ_DECL(dma);
unsigned long nodemask[AML_NODEMASK_SZ];
struct bitmask *slowb, *fastb;
......@@ -83,20 +83,20 @@ int main(int argc, char *argv[])
}
/* initialize all the supporting struct */
assert(!aml_binding_init(&binding, AML_BINDING_TYPE_SINGLE, 0));
assert(!aml_tiling_init(&tiling, AML_TILING_TYPE_1D, tilesz, memsize));
assert(!aml_arena_jemalloc_init(&arena, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_arena_jemalloc_init(&arns, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&slow,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, slowb->maskp));
&arns, MPOL_BIND, slowb->maskp));
assert(!aml_arena_jemalloc_init(&arnf, AML_ARENA_JEMALLOC_TYPE_REGULAR));
assert(!aml_area_linux_init(&fast,
AML_AREA_LINUX_MANAGER_TYPE_SINGLE,
AML_AREA_LINUX_MBIND_TYPE_REGULAR,
AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS,
&arena, MPOL_BIND, fastb->maskp));
&arnf, MPOL_BIND, fastb->maskp));
assert(!aml_dma_linux_seq_init(&dma, numthreads*2));
assert(!aml_scratch_par_init(&sa, &fast, &slow, &dma, &tiling,
2*numthreads, numthreads));
......@@ -136,7 +136,6 @@ int main(int argc, char *argv[])
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
aml_binding_destroy(&binding, AML_BINDING_TYPE_SINGLE);
aml_finalize();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment