arena_jemalloc.c 8.39 KB
Newer Older
1 2
#include <assert.h>
#include <aml.h>
3
#include <jemalloc/jemalloc-aml.h>
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
#include <pthread.h>
#include <stdio.h>
#include <sys/mman.h>

/*******************************************************************************
 * Arena registry:
 * jemalloc extent hooks only receive the arena id when called, so we have to
 * maintain a registry of all arena allocated.
 ******************************************************************************/

/* MALLCTL_ARENAS_ALL is a reserved index and the last valid one */
#define AML_ARENA_MAX (MALLCTL_ARENAS_ALL-1)

struct aml_arena_jemalloc_global_data {
	struct aml_area *registry[AML_ARENA_MAX];
	struct aml_area *current;
	pthread_mutex_t lock;
};

static struct aml_arena_jemalloc_global_data aml_arena_jemalloc_global = {
	{NULL},
	NULL,
	PTHREAD_MUTEX_INITIALIZER,
};

static struct aml_area *aml_arena_registry_get(
				struct aml_arena_jemalloc_global_data *g,
				unsigned int arenaid)
{
	assert(g != NULL);
	assert(arenaid < AML_ARENA_MAX);
	struct aml_area *ret = g->registry[arenaid];
	if(ret == NULL)
		return g->current;
	else
		return ret;
}

/*******************************************************************************
 * Extent hooks:
 * jemalloc has "extent hooks" to create special arenas where the actual virtual
 * address space management is user-controlled. We use these to control memory
 * binding policies from our areas.
 ******************************************************************************/

/* when jemalloc asks for alignments that are bigger than PAGE_SIZE, the regular
 * mmap will not work, and we need to do extra work. */
static void* aml_arena_extra_align_alloc(struct aml_area *area, void *new_addr,
					 size_t size, size_t alignment)
{
	size_t big_size = size + alignment;
	void *addr;

	addr = aml_area_mmap(area, new_addr, big_size);
	if(addr == MAP_FAILED)
		return NULL;

	uintptr_t iaddr = (uintptr_t)addr;
	uintptr_t aligned_addr = (iaddr + alignment) & (alignment-1);

	size_t front_len = aligned_addr - iaddr;
	if(front_len > 0)
		munmap(addr, front_len);

	uintptr_t back = aligned_addr + size;
	size_t back_len = (iaddr + big_size) - (aligned_addr + size);
	if(back_len > 0)
		munmap((void *)back, back_len);

	return (void *)aligned_addr;
}

static void* aml_arena_extent_alloc(extent_hooks_t *extent_hooks,
					     void *new_addr, size_t size,
					     size_t alignment, bool *zero,
					     bool *commit, unsigned int arenaid)
{
	void *addr;
	struct aml_area *area =
		aml_arena_registry_get(&aml_arena_jemalloc_global, arenaid);

	if(!aml_area_available(area))
		return NULL;

	addr = aml_area_mmap(area, new_addr, size);
	if(addr == MAP_FAILED)
		return NULL;

	if(new_addr != NULL && addr != new_addr) {
		/* not mmaped in the right place */
		munmap(addr, size);
		return NULL;
	}

	if((uintptr_t)addr & (alignment-1)) {
		munmap(addr, size);
		addr = aml_arena_extra_align_alloc(area, new_addr, size,
						   alignment);
		if(addr == NULL)
			return addr;
	}

	*zero = true;
	*commit = true;
	return addr;
}

static bool aml_arena_extent_dalloc(extent_hooks_t *extent_hooks,
					     void *addr, size_t size,
					     bool committed,
					     unsigned arena_ind)
{
	return false;
}

static void aml_arena_extent_destroy(extent_hooks_t *extent_hooks,
					      void *addr, size_t size,
					      bool committed, unsigned arena_ind)
{
}

static bool aml_arena_extent_commit(extent_hooks_t *extent_hooks,
					     void *addr, size_t size,
					     size_t offset, size_t length,
					     unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_decommit(extent_hooks_t *extent_hooks,
					       void *addr, size_t size,
					       size_t offset, size_t length,
					       unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_purge(extent_hooks_t *extent_hooks,
					    void *addr, size_t size,
					    size_t offset, size_t length,
					    unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_split(extent_hooks_t *extent_hooks,
					    void *addr, size_t size,
					    size_t size_a, size_t size_b,
					    bool committed, unsigned arena_ind)
{
	return false;
}

static bool aml_arena_extent_merge(extent_hooks_t *extent_hooks,
					    void *addr_a, size_t size_a,
					    void *addr_b, size_t size_b,
					    bool committed, unsigned arena_ind)
{
	return false;
}

static extent_hooks_t aml_arena_extent_hooks = {
	.alloc = aml_arena_extent_alloc,
	.dalloc = aml_arena_extent_dalloc,
	.commit = aml_arena_extent_commit,
	.decommit = aml_arena_extent_decommit,
	.purge_lazy = aml_arena_extent_purge,
	.split = aml_arena_extent_split,
	.merge = aml_arena_extent_merge
};

/*******************************************************************************
 * Core Arena Behavior:
 * Tunable by changing initialization flags
 ******************************************************************************/


int aml_arena_jemalloc_flags(int flags)
{
	int ret = 0;
	if(flags & AML_ARENA_FLAG_ZERO)
		ret |= MALLOCX_ZERO;
	return ret;
}

/* TODO: make the function idempotent */
int aml_arena_jemalloc_create(struct aml_arena_data *a, struct aml_area *area)
{
	int err;
	unsigned int newidx;
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	extent_hooks_t *hooks = &aml_arena_extent_hooks;
	size_t unsigned_size = sizeof(unsigned int);

	/* only one create at a time */
	pthread_mutex_lock(&aml_arena_jemalloc_global.lock);
	aml_arena_jemalloc_global.current = area;

	/* the locking above is required because this creation will end up
	 * calling the extent hooks before we have a change of registering the
	 * area.
	 */
	err = jemk_mallctl("arenas.create", &newidx, &unsigned_size, &hooks,
			   sizeof(hooks));
	if(err)
		goto exit;

	arena->uid = newidx;
	arena->flags |= MALLOCX_ARENA(newidx);
	aml_arena_jemalloc_global.registry[newidx] = area;
exit:
	aml_arena_jemalloc_global.current = NULL;
	pthread_mutex_unlock(&aml_arena_jemalloc_global.lock);
	return err;
}

/* TODO: make the function idempotent */
int aml_arena_jemalloc_purge(struct aml_arena_data *a)
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	char cmd[64];
	pthread_mutex_lock(&aml_arena_jemalloc_global.lock);

	snprintf(cmd, sizeof(cmd), "arena.%u.purge", arena->uid);
	jemk_mallctl(cmd, NULL, NULL, NULL, 0);
	aml_arena_jemalloc_global.registry[arena->uid] = NULL;

	pthread_mutex_unlock(&aml_arena_jemalloc_global.lock);
	return 0;
}

void *aml_arena_jemalloc_mallocx(struct aml_arena_data *a, size_t sz,
			       int extraflags)
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	int flags = arena->flags | aml_arena_jemalloc_flags(extraflags);
	return jemk_mallocx(sz, flags);
}

void *aml_arena_jemalloc_reallocx(struct aml_arena_data *a, void *ptr,
				  size_t sz, int extraflags)
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	int flags = arena->flags | aml_arena_jemalloc_flags(extraflags);
	return jemk_rallocx(ptr, sz, flags);
}

void aml_arena_jemalloc_dallocx(struct aml_arena_data *a, void *ptr,
				int extraflags)
{
	struct aml_arena_jemalloc_data *arena =
		(struct aml_arena_jemalloc_data*) a;
	int flags = arena->flags | aml_arena_jemalloc_flags(extraflags);
	jemk_dallocx(ptr, flags);
}

struct aml_arena_ops aml_arena_jemalloc_ops = {
	aml_arena_jemalloc_create,
	aml_arena_jemalloc_purge,
	aml_arena_jemalloc_mallocx,
	aml_arena_jemalloc_dallocx,
	aml_arena_jemalloc_reallocx,
};

/*******************************************************************************
 * Custom initializers:
 * To create the data template for arenas.
 ******************************************************************************/

int aml_arena_jemalloc_regular_init(struct aml_arena_jemalloc_data *data)
{
	assert(data != NULL);
	data->flags = 0;
	return 0;
}

int aml_arena_jemalloc_regular_destroy(struct aml_arena_jemalloc_data *data)
{
	assert(data != NULL);
	return 0;
}

int aml_arena_jemalloc_aligned_init(struct aml_arena_jemalloc_data *data,
				    size_t align)
{
	assert(data != NULL);
	data->flags = MALLOCX_ALIGN(align);
	return 0;
}

int aml_arena_jemalloc_align_destroy(struct aml_arena_jemalloc_data *data)
{
	assert(data != NULL);
	return 0;
}

int aml_arena_jemalloc_generic_init(struct aml_arena_jemalloc_data *data,
				    struct aml_arena_jemalloc_data *template)
{
	assert(data != NULL);
	assert(template != NULL);
	data->flags = template->flags;
	return 0;
}

int aml_arena_jemalloc_generic_destroy(struct aml_arena_jemalloc_data *data)
{
	assert(data != NULL);
	return 0;
}