arena_jemalloc.c 8.74 KB
Newer Older
1 2
#include <assert.h>
#include <aml.h>
3
#include <jemalloc/jemalloc-aml.h>
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#include <sys/mman.h>

/*******************************************************************************
 * Arena registry:
 * jemalloc extent hooks only receive the arena id when called, so we have to
 * maintain a registry of all arena allocated.
 ******************************************************************************/

/* MALLCTL_ARENAS_ALL is a reserved index and the last valid one */
#define AML_ARENA_MAX (MALLCTL_ARENAS_ALL-1)

struct aml_arena_jemalloc_global_data {
	struct aml_area *registry[AML_ARENA_MAX];
	struct aml_area *current;
	pthread_mutex_t lock;
};

static struct aml_arena_jemalloc_global_data aml_arena_jemalloc_global = {
	{NULL},
	NULL,
	PTHREAD_MUTEX_INITIALIZER,
};

static struct aml_area *aml_arena_registry_get(
Kamil Iskra's avatar
Kamil Iskra committed
28
				const struct aml_arena_jemalloc_global_data *g,
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
				unsigned int arenaid)
{
	assert(g != NULL);
	assert(arenaid < AML_ARENA_MAX);
	struct aml_area *ret = g->registry[arenaid];
	if(ret == NULL)
		return g->current;
	else
		return ret;
}

/*******************************************************************************
 * Extent hooks:
 * jemalloc has "extent hooks" to create special arenas where the actual virtual
 * address space management is user-controlled. We use these to control memory
 * binding policies from our areas.
 ******************************************************************************/

/* when jemalloc asks for alignments that are bigger than PAGE_SIZE, the regular
 * mmap will not work, and we need to do extra work. */
static void* aml_arena_extra_align_alloc(struct aml_area *area, void *new_addr,
					 size_t size, size_t alignment)
{
	size_t big_size = size + alignment;
	void *addr;

	addr = aml_area_mmap(area, new_addr, big_size);
	if(addr == MAP_FAILED)
		return NULL;

	uintptr_t iaddr = (uintptr_t)addr;
	uintptr_t aligned_addr = (iaddr + alignment) & (alignment-1);

	size_t front_len = aligned_addr - iaddr;
	if(front_len > 0)
		munmap(addr, front_len);

	uintptr_t back = aligned_addr + size;
	size_t back_len = (iaddr + big_size) - (aligned_addr + size);
	if(back_len > 0)
		munmap((void *)back, back_len);

	return (void *)aligned_addr;
}

static void* aml_arena_extent_alloc(extent_hooks_t *extent_hooks,
					     void *new_addr, size_t size,
					     size_t alignment, bool *zero,
					     bool *commit, unsigned int arenaid)
{
	void *addr;
	struct aml_area *area =
		aml_arena_registry_get(&aml_arena_jemalloc_global, arenaid);

	if(!aml_area_available(area))
		return NULL;

	addr = aml_area_mmap(area, new_addr, size);
	if(addr == MAP_FAILED)
		return NULL;

	if(new_addr != NULL && addr != new_addr) {
		/* not mmaped in the right place */
		munmap(addr, size);
		return NULL;
	}

	if((uintptr_t)addr & (alignment-1)) {
		munmap(addr, size);
		addr = aml_arena_extra_align_alloc(area, new_addr, size,
						   alignment);
		if(addr == NULL)
			return addr;
	}

	*zero = true;
	*commit = true;
	return addr;
}

static bool aml_arena_extent_dalloc(extent_hooks_t *extent_hooks,
					     void *addr, size_t size,
					     bool committed,
					     unsigned arena_ind)
{
	return false;
}

static void aml_arena_extent_destroy(extent_hooks_t *extent_hooks,
					      void *addr, size_t size,
					      bool committed, unsigned arena_ind)
{
}

static bool aml_arena_extent_commit(extent_hooks_t *extent_hooks,
					     void *addr, size_t size,
					     size_t offset, size_t length,
					     unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_decommit(extent_hooks_t *extent_hooks,
					       void *addr, size_t size,
					       size_t offset, size_t length,
					       unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_purge(extent_hooks_t *extent_hooks,
					    void *addr, size_t size,
					    size_t offset, size_t length,
					    unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_split(extent_hooks_t *extent_hooks,
					    void *addr, size_t size,
					    size_t size_a, size_t size_b,
					    bool committed, unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_merge(extent_hooks_t *extent_hooks,
					    void *addr_a, size_t size_a,
					    void *addr_b, size_t size_b,
					    bool committed, unsigned arena_ind)
{
	return false;
}

static extent_hooks_t aml_arena_extent_hooks = {
	.alloc = aml_arena_extent_alloc,
	.dalloc = aml_arena_extent_dalloc,
	.commit = aml_arena_extent_commit,
	.decommit = aml_arena_extent_decommit,
	.purge_lazy = aml_arena_extent_purge,
	.split = aml_arena_extent_split,
	.merge = aml_arena_extent_merge
};

/*******************************************************************************
 * Core Arena Behavior:
 * Tunable by changing initialization flags
 ******************************************************************************/

/* TODO: make the function idempotent */
179 180
int aml_arena_jemalloc_register_arena(struct aml_arena_data *a,
				      struct aml_area *area)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
{
	int err;
	unsigned int newidx;
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	extent_hooks_t *hooks = &aml_arena_extent_hooks;
	size_t unsigned_size = sizeof(unsigned int);

	/* only one create at a time */
	pthread_mutex_lock(&aml_arena_jemalloc_global.lock);
	aml_arena_jemalloc_global.current = area;

	/* the locking above is required because this creation will end up
	 * calling the extent hooks before we have a change of registering the
	 * area.
	 */
197
	err = jemk_aml_mallctl("arenas.create", &newidx, &unsigned_size, &hooks,
198 199 200 201 202 203 204 205 206 207 208 209 210 211
			   sizeof(hooks));
	if(err)
		goto exit;

	arena->uid = newidx;
	arena->flags |= MALLOCX_ARENA(newidx);
	aml_arena_jemalloc_global.registry[newidx] = area;
exit:
	aml_arena_jemalloc_global.current = NULL;
	pthread_mutex_unlock(&aml_arena_jemalloc_global.lock);
	return err;
}

/* TODO: make the function idempotent */
212
int aml_arena_jemalloc_deregister_arena(struct aml_arena_data *a)
213 214 215 216 217 218 219
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	char cmd[64];
	pthread_mutex_lock(&aml_arena_jemalloc_global.lock);

	snprintf(cmd, sizeof(cmd), "arena.%u.purge", arena->uid);
220
	jemk_aml_mallctl(cmd, NULL, NULL, NULL, 0);
221 222 223 224 225 226 227
	aml_arena_jemalloc_global.registry[arena->uid] = NULL;

	pthread_mutex_unlock(&aml_arena_jemalloc_global.lock);
	return 0;
}

void *aml_arena_jemalloc_mallocx(struct aml_arena_data *a, size_t sz,
Kamil Iskra's avatar
Kamil Iskra committed
228
				 int extraflags)
229 230 231
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
232
	int flags = arena->flags | extraflags;
233
	return jemk_aml_mallocx(sz, flags);
234 235 236 237 238 239 240
}

void *aml_arena_jemalloc_reallocx(struct aml_arena_data *a, void *ptr,
				  size_t sz, int extraflags)
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
241
	int flags = arena->flags | extraflags;
242
	return jemk_aml_rallocx(ptr, sz, flags);
243 244 245 246 247 248 249
}

void aml_arena_jemalloc_dallocx(struct aml_arena_data *a, void *ptr,
				int extraflags)
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
250
	int flags = arena->flags | extraflags;
251
	jemk_aml_dallocx(ptr, flags);
252 253 254
}

struct aml_arena_ops aml_arena_jemalloc_ops = {
255 256
	aml_arena_jemalloc_register_arena,
	aml_arena_jemalloc_deregister_arena,
257 258 259 260 261 262 263 264 265 266
	aml_arena_jemalloc_mallocx,
	aml_arena_jemalloc_dallocx,
	aml_arena_jemalloc_reallocx,
};

/*******************************************************************************
 * Custom initializers:
 * To create the data template for arenas.
 ******************************************************************************/

267 268
/* allocate and init the binding according to type */
int aml_arena_jemalloc_create(struct aml_arena **a, int type, ...)
269
{
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	va_list ap;
	struct aml_arena *ret = NULL;
	intptr_t baseptr, dataptr;
	va_start(ap, type);
		
	/* alloc */
	baseptr = (intptr_t) calloc(1, AML_ARENA_JEMALLOC_ALLOCSIZE);
	dataptr = baseptr + sizeof(struct aml_arena);

	ret = (struct aml_arena *)baseptr;
	ret->data = (struct aml_arena_data *)dataptr;
	
	aml_arena_jemalloc_vinit(ret, type, ap);

	va_end(ap);
	*a = ret;
286 287 288
	return 0;
}

289
int aml_arena_jemalloc_vinit(struct aml_arena *a, int type, va_list ap)
290
{
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	a->ops = &aml_arena_jemalloc_ops;
	struct aml_arena_jemalloc_data *data =
		(struct aml_arena_jemalloc_data *)a->data;
	if(type == AML_ARENA_JEMALLOC_TYPE_REGULAR)
	{
		data->flags = 0;
	}
	else if(type == AML_ARENA_JEMALLOC_TYPE_ALIGNED)
	{
		size_t align = va_arg(ap, size_t);
		data->flags = MALLOCX_ALIGN(align);
	}
	else if(type == AML_ARENA_JEMALLOC_TYPE_GENERIC)
	{
		struct aml_arena_data *arg = va_arg(ap, struct aml_arena_data*);
		struct aml_arena_jemalloc_data *template =
			(struct aml_arena_jemalloc_data *)arg;
		data->flags = template->flags;
	}
310 311 312
	return 0;
}

313
int aml_arena_jemalloc_init(struct aml_arena *a, int type, ...)
314
{
315 316 317 318 319 320
	int err;
	va_list ap;
	va_start(ap, type);
	err = aml_arena_jemalloc_vinit(a, type, ap);
	va_end(ap);
	return err;
321 322
}

323
int aml_arena_jemalloc_destroy(struct aml_arena *a)
324 325 326
{
	return 0;
}