aml.h 62.1 KB
Newer Older
1
2
3
#ifndef AML_H
#define AML_H 1

4
5
6
7
8
9
10
#include <inttypes.h>
#include <numa.h>
#include <numaif.h>
#include <pthread.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
11
#include <stdlib.h>
12
13
14
#include <sys/mman.h>
#include <unistd.h>

15
16
/* Used by bindings, specifically in aml_binding_nbpages() and
 * aml_binding_pages().  */
17
18
19
20
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif

21

22
23
24
25
26
/*******************************************************************************
 * Forward Declarations:
 ******************************************************************************/

struct aml_area;
27
struct aml_binding;
28

29
30
31
32
33
/*******************************************************************************
 * Generic vector type:
 * Vector of nbelems, each of size sz, with a comparison key at offset off
 ******************************************************************************/

34
/* Pointer to the key within element "e" of a vector "v".  */
35
#define AML_VECTOR_ELTKEY_P(v,e) ((int *)(((intptr_t) e) + v->off))
36
/* Pointer to the key within element index "i" of a vector "v".  */
37
#define AML_VECTOR_KEY_P(v,i) ((int *)(((intptr_t) v->ptr) + i*v->sz + v->off))
38
/* Pointer to the element index "i" of a vector "v".  */
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#define AML_VECTOR_ELT_P(v,i) ((void *)(((intptr_t) v->ptr) + i*v->sz))

struct aml_vector {
	int na;
	size_t nbelems;
	size_t sz;
	size_t off;
	void *ptr;
};

/* not needed, here for consistency */
#define AML_VECTOR_DECL(name) struct vector ##name;
#define AML_VECTOR_ALLOCSIZE (sizeof(struct vector))

53
54
55
56
57
58
/*
 * Provides the total number of elements in the vector, including currently
 * unused ones.
 * "vector": an initialized vector structure.
 * Returns the number of elements in the vector.
 */
59
size_t aml_vector_size(const struct aml_vector *vector);
60
61
62
63
64
65
66
67
68
69
70
71
72
73
/*
 * Provides a pointer of element with index "index" within the vector.
 * "vector": an initialized vector structure.
 * "index": a valid index within "vector".  The index must not equal "na" and
 *          must be lower than the size of the vector.
 * Returns a pointer to the requested element.
 */
void *aml_vector_get(struct aml_vector *vector, int index);
/*
 * Find the first element with a particular key.
 * "vector": an initialized vector structure.
 * "key": the key to look for.
 * Returns the index of the found element or "na" if not found.
 */
74
int aml_vector_find(const struct aml_vector *vector, int key);
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/*
 * Resizes the vector.  The keys of the newly allocated elements are set to the
 * "na" value.
 * "vector": an initialized vector structure.
 * "newsize": a new vector size.  Only sizes greater than the current one will
 *            be honored; smaller sizes will result in a no-op.
 * Returns 0 if successful; an error code otherwise.
 */
int aml_vector_resize(struct aml_vector *vector, size_t newsize);
/*
 * Provides the pointer to the first unused element.  If the vector is full,
 * it automatically gets enlarged.
 * "vector": an initialized vector structure.
 * Returns the pointer to the first unused element.
 */
void *aml_vector_add(struct aml_vector *vector);
/*
 * Removes an element from the vector.  The key of the element is set to the
 * "na" value.
 * "vector": an initialized vector structure.
 * "elem": an element within the vector.
 */
void aml_vector_remove(struct aml_vector *vector, void *elem);

/*
 * Initializes a vector.  Allocates elements and sets their keys to the "na"
 * value.
 * "vector": an allocated vector structure.
 * "num": the number of elements to allocate.
 * "size": the size of each individual element.
 * "key": the offset within each element where the key (of type int) is stored.
 * "na": a "null" key value used to indicate an unused element.
 * Returns 0 if successful; an error code otherwise.
 */
int aml_vector_init(struct aml_vector *vector, size_t num, size_t size,
110
		    size_t key, int na);
111
112
113
114
115
116
117
/*
 * Tears down an initialized vector.  Releases the memory buffer holding the
 * elements.
 * "vector": an initialized vector structure.
 * Returns 0 if successful; an error code otherwise.
 */
int aml_vector_destroy(struct aml_vector *vector);
118

119
120
/*******************************************************************************
 * Arenas:
121
122
 * In-memory allocator implementation. Dispatches actual memory mappings back to
 * areas.
123
124
 ******************************************************************************/

125
126
/* If passed as a flag to arena's mallocx()/reallocx() routines, the newly
 * allocated memory will be 0-initialized.  */
127
128
129
130
131
132
#define AML_ARENA_FLAG_ZERO 1

/* opaque handle to configuration data */
struct aml_arena_data;

struct aml_arena_ops {
133
134
135
136
137
138
139
	int (*register_arena)(struct aml_arena_data *arena,
			      struct aml_area *area);
	int (*deregister_arena)(struct aml_arena_data *arena);
	void *(*mallocx)(struct aml_arena_data *arena, size_t size, int flags);
	void (*dallocx)(struct aml_arena_data *arena, void *ptr, int flags);
	void *(*reallocx)(struct aml_arena_data *arena, void *ptr, size_t size,
			  int flags);
140
141
};

142
struct aml_arena {
143
144
	struct aml_arena_ops *ops;
	struct aml_arena_data *data;
145
146
};

147
148
149
150
151
152
153
/*
 * Registers a new memory arena with the system.  After this call the arena
 * is ready for use.
 * "arena": an initialized arena structure (see aml_arena_jemalloc_create()).
 * "area": a memory area that will be used as the backing store.
 * Returns 0 if successful; an error code otherwise.
 */
154
int aml_arena_register(struct aml_arena *arena, struct aml_area *area);
155
156
157
158
159
160
161
/*
 * Unregisters a memory arena from the system.  Also purges the contents of
 * the memory, so any buffers allocated from the arena should be considered
 * invalid after this call.
 * "arena": a registered arena structure (see aml_arena_register()).
 * Returns 0 if successful; an error code otherwise.
 */
162
int aml_arena_deregister(struct aml_arena *arena);
163
164
165
166
167
168
169
/*
 * Allocates a new memory buffer from the arena.
 * "arena": a registered arena structure (see aml_arena_register()).
 * "size": the buffer size in bytes; if 0 is passed, NULL will be returned.
 * "flags": see AML_ARENA_FLAG_*.
 * Returns a pointer to the newly allocated memory buffer; NULL if unsuccessful.
 */
170
void *aml_arena_mallocx(struct aml_arena *arena, size_t size, int flags);
171
172
173
174
175
176
/*
 * Releases a memory buffer back to the arena.
 * "arena": a registered arena structure (see aml_arena_register()).
 * "ptr": a pointer to the memory buffer or NULL (resulting in a no-op).
 * "flags": see AML_ARENA_FLAG_* (currently unused).
 */
177
void aml_arena_dallocx(struct aml_arena *arena, void *ptr, int flags);
178
179
180
181
182
183
184
185
186
187
/*
 * Changes the size of a previously allocated memory buffer.
 * "arena": a registered arena structure (see aml_arena_register()).
 * "ptr": a pointer to the memory buffer; if NULL is passed, acts just like
 *        aml_arena_mallocx().
 * "size": the new buffer size in bytes; if 0 is passed, acts just like
 *         aml_arena_dallocx() and returns NULL.
 * "flags": see AML_ARENA_FLAG_*.
 * Returns a pointer to the resized memory buffer; NULL if unsuccessful.
 */
188
189
void *aml_arena_reallocx(struct aml_arena *arena, void *ptr, size_t size,
			 int flags);
190
191
192
193
194
195
196
197
198
199

/*******************************************************************************
 * Jemalloc Arena:
 ******************************************************************************/
extern struct aml_arena_ops aml_arena_jemalloc_ops;

struct aml_arena_jemalloc_data {
	unsigned int uid;
	int flags;
};
200

201
202
203
204
205
206
207
208
209
210
211
#define AML_ARENA_JEMALLOC_DECL(name) \
	struct aml_arena_jemalloc_data __ ##name## _inner_data; \
	struct aml_arena name = { \
		&aml_arena_jemalloc_ops, \
		(struct aml_arena_data *)&__ ## name ## _inner_data, \
	};

#define AML_ARENA_JEMALLOC_ALLOCSIZE \
	(sizeof(struct aml_arena_jemalloc_data) + \
	 sizeof(struct aml_arena))

212
213
/* Arena types passed to jemalloc arena's create()/init()/vinit() routines.  */
/* Standard arena type.  */
214
#define AML_ARENA_JEMALLOC_TYPE_REGULAR 0
215
/* Arena type allocating memory-aligned buffers.  */
216
#define AML_ARENA_JEMALLOC_TYPE_ALIGNED 1
217
/* Arena type identical to an existing arena.  */
218
219
#define AML_ARENA_JEMALLOC_TYPE_GENERIC 2

220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
/*
 * Allocates and initializes a new jemalloc arena.
 * "arena": an address where the pointer to the newly allocated arena structure
 *          will be stored.
 * "type": see AML_ARENA_JEMALLOC_TYPE_*.
 * Variadic arguments:
 * - if AML_ARENA_JEMALLOC_TYPE_REGULAR is passed as "type", no additional
 *   arguments are needed.
 * - if AML_ARENA_JEMALLOC_TYPE_ALIGNED is passed as "type", an alignment
 *   argument of type size_t and value that is a power of 2 must be provided.
 * - if AML_ARENA_JEMALLOC_TYPE_GENERIC is passed as "type", a pointer argument
 *   to type "aml_arena_data" (obtained from the "data" field of an existing
 *   jemalloc arena structure) must be provided.
 * Returns 0 if successful; an error code otherwise.
 */
235
int aml_arena_jemalloc_create(struct aml_arena **arena, int type, ...);
236
237
238
239
240
241
242
243
/*
 * Initializes a jemalloc arena.  This is a varargs-variant of the
 * aml_arena_jemalloc_init() routine.
 * "arena": an allocated jemalloc arena structure.
 * "type": see aml_arena_jemalloc_create().
 * Variadic arguments: see aml_arena_jemalloc_create().
 * Returns 0 if successful; an error code otherwise.
 */
244
int aml_arena_jemalloc_init(struct aml_arena *arena, int type, ...);
245
246
247
248
249
250
251
/*
 * Initializes a jemalloc arena.
 * "arena": an allocated jemalloc arena structure.
 * "type": see aml_arena_jemalloc_create().
 * "args": see the variadic arguments of aml_arena_jemalloc_create().
 * Returns 0 if successful; an error code otherwise.
 */
252
int aml_arena_jemalloc_vinit(struct aml_arena *arena, int type, va_list args);
253
254
255
256
257
/*
 * Tears down an initialized jemalloc arena.
 * "arena": an initialized jemalloc arena structure.
 * Returns 0 if successful; an error code otherwise.
 */
258
int aml_arena_jemalloc_destroy(struct aml_arena *arena);
259

260
261
262
263
264
/*******************************************************************************
 * Areas:
 * embeds information about a byte-addressable physical memory location and well
 * as binding policies over it.
 ******************************************************************************/
265

266
267
268
269
/* opaque handle to configuration data */
struct aml_area_data;

struct aml_area_ops {
270
271
272
273
274
275
276
	void *(*malloc)(struct aml_area_data *area, size_t size);
	void (*free)(struct aml_area_data *area, void *ptr);
	void *(*calloc)(struct aml_area_data *area, size_t num, size_t size);
	void *(*realloc)(struct aml_area_data *area, void *ptr, size_t size);
	void *(*acquire)(struct aml_area_data *area, size_t size);
	void (*release)(struct aml_area_data *area, void *ptr);
	void *(*mmap)(struct aml_area_data *area, void *ptr, size_t size);
Kamil Iskra's avatar
Kamil Iskra committed
277
278
	int (*available)(const struct aml_area_data *area);
	int (*binding)(const struct aml_area_data *area,
279
		       struct aml_binding **binding);
280
281
};

282
struct aml_area {
283
284
	struct aml_area_ops *ops;
	struct aml_area_data *data;
285
286
};

287
288
289
290
291
292
293
294
/*******************************************************************************
 * POSIX Area:
 ******************************************************************************/

extern struct aml_area_ops aml_area_posix_ops;

struct aml_area_posix_data {
};
295

296
297
298
299
300
301
302
303
304
305
306
#define AML_AREA_POSIX_DECL(name) \
	struct aml_area_posix_data __ ##name## _inner_data; \
	struct aml_area name = { \
		&aml_area_posix_ops, \
		(struct aml_area_data *)&__ ## name ## _inner_data, \
	};

#define AML_AREA_POSIX_ALLOCSIZE \
	(sizeof(struct aml_area_posix_data) + \
	 sizeof(struct aml_area))

307
308
309
310
311
312
/*
 * Allocates and initializes a new POSIX memory area.
 * "area": an address where the pointer to the newly allocated area structure
 *         will be stored.
 * Returns 0 if successful; an error code otherwise.
 */
313
int aml_area_posix_create(struct aml_area **area);
314
315
316
317
318
/*
 * Initializes a POSIX memory area.
 * "area": an allocated POSIX memory area structure.
 * Returns 0 if successful; an error code otherwise.
 */
319
int aml_area_posix_vinit(struct aml_area *area);
320
321
322
323
324
325
/*
 * Initializes a POSIX memory area.  This is identical to the
 * aml_area_posix_vinit() routine.
 * "area": an allocated POSIX memory area structure.
 * Returns 0 if successful; an error code otherwise.
 */
326
int aml_area_posix_init(struct aml_area *area);
327
328
329
330
331
/*
 * Tears down an initialized POSIX memory area.
 * "area": an initialized POSIX memory area structure.
 * Returns 0 if successful; an error code otherwise.
 */
332
int aml_area_posix_destroy(struct aml_area *area);
333
334

/*******************************************************************************
335
336
337
338
339
340
341
342
343
344
345
 * Linux Area:
 ******************************************************************************/

extern struct aml_area_ops aml_area_linux_ops;

struct aml_area_linux_manager_data {
	struct aml_arena *pool;
	size_t pool_size;
};

struct aml_area_linux_manager_ops {
Kamil Iskra's avatar
Kamil Iskra committed
346
	struct aml_arena *(*get_arena)(const struct aml_area_linux_manager_data *data);
347
348
349
350
};

extern struct aml_area_linux_manager_ops aml_area_linux_manager_single_ops;

351
352
353
354
355
356
357
/*
 * Initializes a Linux memory area manager.  A manager determines which arena
 * to use for allocations.
 * "data": an allocated Linux manager structure.
 * "arena": an arena to use for future allocations.
 * Returns 0 if successful; an error code otherwise.
 */
358
359
int aml_area_linux_manager_single_init(struct aml_area_linux_manager_data *data,
				       struct aml_arena *arena);
360
361
362
363
364
/*
 * Tears down an initialized Linux memory area manager.
 * "data": an initialized Linux manager structure.
 * Returns 0 if successful; an error code otherwise.
 */
365
int aml_area_linux_manager_single_destroy(struct aml_area_linux_manager_data *data);
366

367
/* Size of the bitmask (in bits) passed to aml_area_linux_mbind_init().  */
368
#define AML_MAX_NUMA_NODES 128
369
/* Size of the bitmask (in bytes) passed to aml_area_linux_mbind_init().  */
370
#define AML_NODEMASK_BYTES (AML_MAX_NUMA_NODES/8)
371
372
/* Size of the bitmask (in array elements) passed to
   aml_area_linux_mbind_init().  */
373
374
#define AML_NODEMASK_SZ (AML_NODEMASK_BYTES/sizeof(unsigned long))

375
376
377
378
379
#define AML_NODEMASK_NBITS (8*sizeof(unsigned long))
#define AML_NODEMASK_ELT(i) ((i) / AML_NODEMASK_NBITS)
#define AML_NODEMASK_BITMASK(i) ((unsigned long)1 << ((i) % AML_NODEMASK_NBITS))
#define AML_NODEMASK_ISSET(mask, i) \
	((mask[AML_NODEMASK_ELT(i)] & AML_NODEMASK_BITMASK(i)) != 0)
380
381
382
383
384
/*
 * Sets a bit in a nodemask.
 * "mask": an array of type "unsigned long", at least AML_NODEMASK_SZ long.
 * "i": bit to set, indicating a NUMA node.
 */
385
#define AML_NODEMASK_SET(mask, i) (mask[AML_NODEMASK_ELT(i)] |= AML_NODEMASK_BITMASK(i))
386
387
388
389
/*
 * Zero-initializes a nodemask.
 * "mask": an array of type "unsigned long", at least AML_NODEMASK_SZ long.
 */
390
391
392
393
394
#define AML_NODEMASK_ZERO(mask) \
	do {								\
		for(unsigned int __i = 0; __i < AML_NODEMASK_SZ; __i++)	\
			mask[__i] = 0;					\
	} while(0)
395
396


397
398
399
400
401
402
struct aml_area_linux_mbind_data {
	unsigned long nodemask[AML_NODEMASK_SZ];
	int policy;
};

struct aml_area_linux_mbind_ops {
403
404
405
	int (*pre_bind)(struct aml_area_linux_mbind_data *data);
	int (*post_bind)(struct aml_area_linux_mbind_data *data, void *ptr,
			 size_t size);
Kamil Iskra's avatar
Kamil Iskra committed
406
	int (*binding)(const struct aml_area_linux_mbind_data *data,
407
408
409
		       struct aml_binding **binding);
};

410
411
412
413
414
415
/*
 * Sets memory policy of a Linux memory area.
 * "data": an initialized Linux memory policy structure.
 * "policy", "mask": see aml_area_linux_mbind_init().
 * Returns 0 if successful; an error code otherwise.
 */
416
int aml_area_linux_mbind_setdata(struct aml_area_linux_mbind_data *data,
Kamil Iskra's avatar
Kamil Iskra committed
417
				 int policy, const unsigned long *nodemask);
418
419
420
421
422
423
424
425
/*
 * Creates a new binding structure based on an existing Linux memory policy
 * structure.
 * "data": an initialized Linux memory policy structure.
 * "binding": an address where the pointer to the newly allocated binding
 *            structure will be stored.
 * Returns 0 if successful; an error code otherwise.
 */
Kamil Iskra's avatar
Kamil Iskra committed
426
int aml_area_linux_mbind_generic_binding(const struct aml_area_linux_mbind_data *data,
427
					 struct aml_binding **binding);
428
429
430
431
432
433
434
435
/*
 * Sets current memory policy before memory allocation using the Linux memory
 * area.
 * This variant is used with AML_AREA_LINUX_MBIND_TYPE_REGULAR mbind type (see
 * aml_area_linux_create()) and is basically a no-op.
 * "data": an initialized Linux memory policy structure.
 * Returns 0 if successful; an error code otherwise.
 */
436
int aml_area_linux_mbind_regular_pre_bind(struct aml_area_linux_mbind_data *data);
437
438
439
440
441
442
443
444
445
446
/*
 * Sets current memory policy on a new memory region allocated using the Linux
 * memory area.
 * This variant is used with AML_AREA_LINUX_MBIND_TYPE_REGULAR mbind type (see
 * aml_area_linux_create()).
 * "data": an initialized Linux memory policy structure.
 * "ptr": an address of the newly allocated memory region.
 * "size": the size of the newly allocated memory region.
 * Returns 0 if successful; an error code otherwise.
 */
447
448
int aml_area_linux_mbind_regular_post_bind(struct aml_area_linux_mbind_data *data,
					   void *ptr, size_t size);
449
450
451
452
453
454
455
456
/*
 * Sets current memory policy before memory allocation using the Linux memory
 * area.
 * This variant is used with AML_AREA_LINUX_MBIND_TYPE_MEMPOLICY mbind type (see
 * aml_area_linux_create()).
 * "data": an initialized Linux memory policy structure.
 * Returns 0 if successful; an error code otherwise.
 */
457
int aml_area_linux_mbind_mempolicy_pre_bind(struct aml_area_linux_mbind_data *data);
458
459
460
461
462
463
464
465
466
467
/*
 * Sets current memory policy on a new memory region allocated using the Linux
 * memory area.
 * This variant is used with AML_AREA_LINUX_MBIND_TYPE_MEMPOLICY mbind type (see
 * aml_area_linux_create()).
 * "data": an initialized Linux memory policy structure.
 * "ptr": an address of the newly allocated memory region.
 * "size": the size of the newly allocated memory region.
 * Returns 0 if successful; an error code otherwise.
 */
468
469
int aml_area_linux_mbind_mempolicy_post_bind(struct aml_area_linux_mbind_data *data,
					   void *ptr, size_t size);
470
471
472
473
474
475
476
477
478
/*
 * Initializes memory policy of a Linux memory area.
 * "data": an allocated Linux memory policy structure.
 * "policy": see MPOL_* in mbind(2).
 * "nodemask": an AML_MAX_NUMA_NODES-bit array (a AML_NODEMASK_SZ-element array)
 *             containing the NUMA node mask to use (see mbind(2) for more
 *             information).
 * Returns 0 if successful; an error code otherwise.
 */
479
int aml_area_linux_mbind_init(struct aml_area_linux_mbind_data *data,
Kamil Iskra's avatar
Kamil Iskra committed
480
			      int policy, const unsigned long *nodemask);
481
482
483
484
485
/*
 * Tears down an initialized Linx memory policy.
 * "data": an initialized Linux memory policy structure.
 * Returns 0 if successful; an error code otherwise.
 */
486
int aml_area_linux_mbind_destroy(struct aml_area_linux_mbind_data *data);
487
488
489
490
491
492
493
494
495
496
497
498

extern struct aml_area_linux_mbind_ops aml_area_linux_mbind_regular_ops;
extern struct aml_area_linux_mbind_ops aml_area_linux_mbind_mempolicy_ops;

struct aml_area_linux_mmap_data {
	int prot;
	int flags;
	int fildes;
	off_t off;
};

struct aml_area_linux_mmap_ops {
499
500
	void *(*mmap)(struct aml_area_linux_mmap_data *data, void *ptr,
		      size_t size);
501
502
};

503
504
505
506
507
508
509
510
511
/*
 * Allocates a memory region from a Linux memory area.
 * "data": an initialized Linux memory map structure.
 * "ptr": an address where the new memory region should be allocated (hint only;
 *        can be NULL to let the kernel decide).
 * "size": the requested size of thew new memory region to allocate.
 * Returns the address of the newly allocated region or MAP_FAILED (see mmap(2))
 * if unsuccessful.
 */
512
513
void *aml_area_linux_mmap_generic(struct aml_area_linux_mmap_data *data,
				  void *ptr, size_t size);
514
515
516
517
518
519
520
521
/*
 * Initializes memory map of a Linux memory area to use an anonymous
 * (0-initialized) mapping.
 * This variant is used with AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS map type (see
 * aml_area_linux_create()).
 * "data": an allocated Linux memory map structure.
 * Returns 0 if successful; an error code otherwise.
 */
522
int aml_area_linux_mmap_anonymous_init(struct aml_area_linux_mmap_data *data);
523
524
525
526
527
528
529
530
531
532
/*
 * Initializes memory map of a Linux memory area to use an existing file
 * mapping.
 * This variant is used with AML_AREA_LINUX_MMAP_TYPE_FD map type (see
 * aml_area_linux_create()).
 * "data": an allocated Linux memory map structure.
 * "fd": an open file descriptor.
 * "offset": the offset within the file to allocate from.
 * Returns 0 if successful; an error code otherwise.
 */
533
int aml_area_linux_mmap_fd_init(struct aml_area_linux_mmap_data *data, int fd,
534
				off_t offset);
535
536
537
538
539
540
541
542
543
544
545
546
/*
 * Initializes memory map of a Linux memory area to use a newly created,
 * temporary file mapping.
 * This variant is used with AML_AREA_LINUX_MMAP_TYPE_TMPFILE map type (see
 * aml_area_linux_create()).
 * "data": an allocated Linux memory map structure.
 * "template": a file name template, ending in "XXXXXX"; the last six characters
 *             will be replaced with the actual name on successful file creation
 *             (see mkstemp(3) for more information).
 * "max": the size of the temporary file to create.
 * Returns 0 if successful; an error code otherwise.
 */
547
548
int aml_area_linux_mmap_tmpfile_init(struct aml_area_linux_mmap_data *data,
				     char *template, size_t max);
549
550
551
552
553
554
555
/*
 * Tears down an initialized Linux memory map.
 * This variant is used with AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS map type (see
 * aml_area_linux_create()).
 * "data": an initialized Linux memory map structure.
 * Returns 0 if successful; an error code otherwise.
 */
556
int aml_area_linux_mmap_anonymous_destroy(struct aml_area_linux_mmap_data *data);
557
558
559
560
561
562
563
/*
 * Tears down an initialized Linux memory map.
 * This variant is used with AML_AREA_LINUX_MMAP_TYPE_FD map type (see
 * aml_area_linux_create()).
 * "data": an initialized Linux memory map structure.
 * Returns 0 if successful; an error code otherwise.
 */
564
int aml_area_linux_mmap_fd_destroy(struct aml_area_linux_mmap_data *data);
565
566
567
568
569
570
571
/*
 * Tears down an initialized Linux memory map.
 * This variant is used with AML_AREA_LINUX_MMAP_TYPE_TMPFILE map type (see
 * aml_area_linux_create()).
 * "data": an initialized Linux memory map structure.
 * Returns 0 if successful; an error code otherwise.
 */
572
int aml_area_linux_mmap_tmpfile_destroy(struct aml_area_linux_mmap_data *data);
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592

extern struct aml_area_linux_mmap_ops aml_area_linux_mmap_generic_ops;

struct aml_area_linux_data {
	struct aml_area_linux_manager_data manager;
	struct aml_area_linux_mbind_data mbind;
	struct aml_area_linux_mmap_data mmap;
};

struct aml_area_linux_ops {
	struct aml_area_linux_manager_ops manager;
	struct aml_area_linux_mbind_ops mbind;
	struct aml_area_linux_mmap_ops mmap;
};

struct aml_area_linux {
	struct aml_area_linux_data data;
	struct aml_area_linux_ops ops;
};

593
594
595
596
597
598
599
600
601
602
603
#define AML_AREA_LINUX_DECL(name) \
	struct aml_area_linux __ ##name## _inner_data; \
	struct aml_area name = { \
		&aml_area_linux_ops, \
		(struct aml_area_data *)&__ ## name ## _inner_data, \
	};

#define AML_AREA_LINUX_ALLOCSIZE \
	(sizeof(struct aml_area_linux) + \
	 sizeof(struct aml_area))

604
605
606
/* Linux memory area manager types, passed to Linux memory area's
   create()/init()/vinit() routines.  */
/* Single-arena manager.  */
607
608
#define AML_AREA_LINUX_MANAGER_TYPE_SINGLE 0

609
610
611
/* Linux memory area mbind types, passed to Linux memory area's
   create()/init()/vinit() routines.  */
/* Regular type using mbind() after mmap().  */
612
#define AML_AREA_LINUX_MBIND_TYPE_REGULAR 0
613
614
/* Calls set_mempolicy() before and after mmap() to change the memory policy
   globally.  */
615
616
#define AML_AREA_LINUX_MBIND_TYPE_MEMPOLICY 1

617
618
619
/* Linux memory area map types, passed to Linux memory area's
   create()/init()/vinit() routines.  */
/* Zero-initialized, anonymous mapping.  */
620
#define AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS 0
621
/* Mapping using an existing file.  */
622
#define AML_AREA_LINUX_MMAP_TYPE_FD 1
623
/* Mapping using a newly created temporary file.  */
624
625
#define AML_AREA_LINUX_MMAP_TYPE_TMPFILE 2

626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
/*
 * Allocates and initializes a new Linux memory area.
 * "area": an address where the pointer to the newly allocated Linux memory area
 *         will be stored.
 * "manager_type": see AML_AREA_LINUX_MANAGER_TYPE_*.
 * "mbind_type": see AML_AREA_LINUX_MBIND_TYPE_*.
 * "mmap_type": see AML_AREA_LINUX_MMAP_TYPE_*.
 * Variadic arguments:
 * - "policy": an argument of type int; see aml_area_linux_mbind_init().
 * - "nodemask": an argument of type const unsigned long*;
 *               see aml_area_linux_mbind_init().
 * - if AML_AREA_LINUX_MMAP_TYPE_ANONYMOUS is passed as "mmap_type", no
 *   additional arguments are needed.
 * - if AML_AREA_LINUX_MMAP_TYPE_FD is passed as "mmap_type", two additional
 *   arguments are needed:
 *   - "fd": an argument of type int; see aml_area_linux_mmap_fd_init().
 *   - "offset": an argument of type off_t; see aml_area_linux_mmap_fd_init().
 * - if AML_AREA_LINUX_MMAP_TYPE_TMPFILE is passed as "mmap_type", two
 *   additional arguments are needed:
 *   - template: an argument of type char*; see
 *     aml_area_linux_mmap_tmpfile_init().
 *   - max: an argument of type size_t; see aml_area_linux_mmap_tmpfile_init().
 * Returns 0 if successful; an error code otherwise.
 */
650
651
int aml_area_linux_create(struct aml_area **area, int manager_type,
			  int mbind_type, int mmap_type, ...);
652
653
654
655
656
657
658
659
/*
 * Initializes a Linux memory area.  This is a varargs-variant of the
 * aml_area_linux_vinit() routine.
 * "area": an allocated Linux memory area structure.
 * "manager_type", "mbind_type", "mmap_type": see aml_area_linux_create().
 * Variadic arguments: see aml_area_linux_create().
 * Returns 0 if successful; an error code otherwise.
 */
660
661
int aml_area_linux_init(struct aml_area *area, int manager_type, int mbind_type,
			int mmap_type, ...);
662
663
664
665
666
667
668
/*
 * Initializes a Linux memory area.
 * "area": an allocated Linux memory area structure.
 * "manager_type", "mbind_type", "mmap_type": see aml_area_linux_create().
 * "args": see the variadic arguments of aml_area_linux_create().
 * Returns 0 if successful; an error code otherwise.
 */
669
670
int aml_area_linux_vinit(struct aml_area *area, int manager_type,
			 int mbind_type, int mmap_type, va_list args);
671
672
673
674
675
/*
 * Tears down an initialized Linux memory area.
 * "area": an initialized Linux memory area structure.
 * Returns 0 if successful; an error code otherwise.
 */
676
int aml_area_linux_destroy(struct aml_area *area);
677
678
679
680
681

/*******************************************************************************
 * Generic Area API:
 * Low-level, direct access to area logic.
 * For memory allocation function, follows the POSIX spec.
682
 ******************************************************************************/
683

684
685
686
687
688
689
/*
 * Allocates a new memory buffer from a memory area.
 * "area": an initialized memory area structure.
 * "size": the buffer size in bytes; if 0 is passed, NULL will be returned.
 * Returns a pointer to the newly allocated memory buffer; NULL if unsuccessful.
 */
690
void *aml_area_malloc(struct aml_area *area, size_t size);
691
692
693
694
695
/*
 * Releases a memory buffer back to the memory area.
 * "area": an initialized memory area structure.
 * "ptr": a pointer to the memory buffer or NULL (resulting in a no-op).
 */
696
void aml_area_free(struct aml_area *area, void *ptr);
697
698
699
700
701
702
703
704
705
/*
 * Allocates a new, zero-initialized memory buffer from a memory area.
 * "area": an initialized memory area structure.
 * "num": the number of elements of size "size" to allocate; if 0 is passed,
 *        NULL will be returned
 * "size": the size of each individual element to allocate, in bytes; if 0 is
 *         passed, NULL will be returned.
 * Returns a pointer to the newly allocated memory buffer; NULL if unsuccessful.
 */
706
void *aml_area_calloc(struct aml_area *area, size_t num, size_t size);
707
708
709
710
711
712
713
714
715
/*
 * Changes the size of a previously allocated memory buffer.
 * "area": an initialized memory area structure.
 * "ptr": a pointer to the memory buffer; if NULL is passed, acts just like
 *        aml_area_malloc().
 * "size": the new buffer size in bytes; if 0 is passed, acts just like
 *         aml_area_free() and returns NULL.
 * Returns a pointer to the resized memory buffer; NULL if unsuccessful.
 */
716
void *aml_area_realloc(struct aml_area *area, void *ptr, size_t size);
717
/* FIXME! */
718
void *aml_area_acquire(struct aml_area *area, size_t size);
719
/* FIXME! */
720
void aml_area_release(struct aml_area *area, void *ptr);
721
722
723
724
725
726
727
728
729
730
/*
 * Allocates a memory region from a Linux memory area, respecting memory policy
 * settings (see aml_area_linux_mbind_init()).
 * "area": an initialized memory area structure.
 * "ptr": an address where the new memory region should be allocated (hint only;
 *        can be NULL to let the kernel decide).
 * "size": the requested size of thew new memory region to allocate.
 * Returns the address of the newly allocated region or MAP_FAILED (see mmap(2))
 * if unsuccessful.
 */
731
void *aml_area_mmap(struct aml_area *area, void *ptr, size_t size);
732
/* FIXME! */
Kamil Iskra's avatar
Kamil Iskra committed
733
int aml_area_available(const struct aml_area *area);
734
735
736
737
738
739
740
/*
 * Creates a new binding structure based on an existing Linux memory area.
 * "area": an initialized memory area structure.
 * "binding": an address where the pointer to the newly allocated binding
 *            structure will be stored.
 * Returns 0 if successful; an error code otherwise.
 */
Kamil Iskra's avatar
Kamil Iskra committed
741
int aml_area_binding(const struct aml_area *area, struct aml_binding **binding);
742

743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
/*******************************************************************************
 * Tiling:
 * Representation of a data structure organization in memory.
 ******************************************************************************/

/* opaque handle to all tilings */
struct aml_tiling_data;
struct aml_tiling_iterator_data;

/*forward declarations */
struct aml_tiling_iterator_ops;
struct aml_tiling_iterator;


struct aml_tiling_ops {
758
759
760
761
762
763
764
	int (*create_iterator)(struct aml_tiling_data *tiling,
			       struct aml_tiling_iterator **iterator,
			       int flags);
	int (*init_iterator)(struct aml_tiling_data *tiling,
			     struct aml_tiling_iterator *iterator, int flags);
	int (*destroy_iterator)(struct aml_tiling_data *tiling,
				struct aml_tiling_iterator *iterator);
Kamil Iskra's avatar
Kamil Iskra committed
765
766
767
	size_t (*tilesize)(const struct aml_tiling_data *tiling, int tileid);
	void* (*tilestart)(const struct aml_tiling_data *tiling,
			   const void *ptr, int tileid);
768
769
770
771
772
773
774
};

struct aml_tiling {
	struct aml_tiling_ops *ops;
	struct aml_tiling_data *data;
};

775
776
777
778
779
780
781
/*
 * Provides the information on the size of a tile.
 * "tiling": an initialized tiling structure.
 * "tileid": an identifier of a tile (a value between 0 and the number of tiles
 *           minus 1).
 * Returns the size of a tile.
 */
Kamil Iskra's avatar
Kamil Iskra committed
782
size_t aml_tiling_tilesize(const struct aml_tiling *tiling, int tileid);
783
784
785
786
787
788
789
790
791
792
/*
 * Provides the information on the location of a tile in memory.
 * "tiling": an initialized tiling structure.
 * "ptr": an address of the start of the complete user data structure that this
 *        tiling describes.
 * "tileid": an identifier of a tile (a value between 0 and the number of tiles
 *           minus 1).
 * Returns the address of the start of the tile identified by "tileid", within
 * the provided user data structure.
 */
Kamil Iskra's avatar
Kamil Iskra committed
793
794
void* aml_tiling_tilestart(const struct aml_tiling *tiling, const void *ptr,
			   int tileid);
795

796
797
798
799
800
801
802
803
/*
 * Allocates and initializes a new tiling iterator.
 * "tiling": an initialized tiling structure.
 * "iterator": an address where the pointer to the newly allocated iterator
 *             structure will be stored.
 * "flags": reserved for future use; pass 0 for now.
 * Returns 0 if successful; an error code otherwise.
 */
804
805
806
int aml_tiling_create_iterator(struct aml_tiling *tiling,
			       struct aml_tiling_iterator **iterator,
			       int flags);
807
808
809
810
811
812
813
/*
 * Initializes a tiling iterator.
 * "tiling": an initialized tiling structure.
 * "iterator": an allocated tiling iterator structure.
 * "flags": reserved for future use; pass 0 for now.
 * Returns 0 if successful; an error code otherwise.
 */
814
815
int aml_tiling_init_iterator(struct aml_tiling *tiling,
			     struct aml_tiling_iterator *iterator, int flags);
816
817
818
819
820
821
/*
 * Tears down an initialized tiling iterator.
 * "tiling": an initialized tiling structure.
 * "iterator": an initializing tiling iterator structure.
 * Returns 0 if successful; an error code otherwise.
 */
822
823
int aml_tiling_destroy_iterator(struct aml_tiling *tiling,
				struct aml_tiling_iterator *iterator);
824
825

struct aml_tiling_iterator_ops {
826
827
	int (*reset)(struct aml_tiling_iterator_data *iterator);
	int (*next)(struct aml_tiling_iterator_data *iterator);
Kamil Iskra's avatar
Kamil Iskra committed
828
829
830
	int (*end)(const struct aml_tiling_iterator_data *iterator);
	int (*get)(const struct aml_tiling_iterator_data *iterator,
		   va_list args);
831
832
833
834
835
836
837
};

struct aml_tiling_iterator {
	struct aml_tiling_iterator_ops *ops;
	struct aml_tiling_iterator_data *data;
};

838
839
840
841
842
/*
 * Resets a tiling iterator to the first tile.
 * "iterator": an initializing tiling iterator structure.
 * Returns 0 if successful; an error code otherwise.
 */
843
int aml_tiling_iterator_reset(struct aml_tiling_iterator *iterator);
844
845
846
847
848
/*
 * Advances a tiling iterator to the next tile.
 * "iterator": an initializing tiling iterator structure.
 * Returns 0 if successful; an error code otherwise.
 */
849
int aml_tiling_iterator_next(struct aml_tiling_iterator *iterator);
850
851
852
853
854
855
/*
 * Checks whether the iterator is past the last tile.
 * "iterator": an initializing tiling iterator structure.
 * Returns 0 if the iterator points at a valid tile; 1 if it's past the last
 * tile.
 */
Kamil Iskra's avatar
Kamil Iskra committed
856
int aml_tiling_iterator_end(const struct aml_tiling_iterator *iterator);
857
858
859
860
861
862
863
864
/*
 * Queries the iterator.
 * "iterator": an initializing tiling iterator structure.
 * Variadic arguments:
 * - "x": an argument of type unsigned long*; on return gets filled with the
 *        identifier of the tile currently pointed to.
 * Returns 0 if successful; an error code otherwise.
 */
Kamil Iskra's avatar
Kamil Iskra committed
865
int aml_tiling_iterator_get(const struct aml_tiling_iterator *iterator, ...);
866

867
868
/* Tiling types passed to the tiling create()/init()/vinit() routines.  */
/* Regular, linear tiling with uniform tile sizes.  */
869
870
#define AML_TILING_TYPE_1D 0

871
872
873
874
875
876
877
878
879
880
881
882
/*
 * Allocates and initializes a new tiling.
 * "tiling": an address where the pointer to the newly allocated tiling
 *           structure will be stored.
 * "type": see AML_TILING_TYPE_*.
 * Variadic arguments:
 * - if "type" equals AML_TILING_TYPE_1D, two additional arguments are needed:
 *   - "tilesize": an argument of type size_t; provides the size of each tile.
 *   - "totalsize": an argument of type size_t; provides the size of the
 *                  complete user data structure to be tiled.
 * Returns 0 if successful; an error code otherwise.
 */
883
int aml_tiling_create(struct aml_tiling **tiling, int type, ...);
884
885
886
887
888
889
890
891
/*
 * Initializes a tiling.  This is a varargs-variant of the aml_tiling_vinit()
 * routine.
 * "tiling": an allocated tiling structure.
 * "type": see aml_tiling_create().
 * Variadic arguments: see aml_tiling_create().
 * Returns 0 if successful; an error code otherwise.
 */
892
int aml_tiling_init(struct aml_tiling *tiling, int type, ...);
893
894
895
896
897
898
899
/*
 * Initializes a tiling.
 * "tiling": an allocated tiling structure.
 * "type": see aml_tiling_create().
 * "args": see the variadic arguments of aml_tiling_create().
 * Returns 0 if successful; an error code otherwise.
 */
900
int aml_tiling_vinit(struct aml_tiling *tiling, int type, va_list args);
901
902
903
904
905
906
/*
 * Tears down an initialized tiling.
 * "tiling": an initialized tiling structure.
 * "type": see aml_tiling_create().
 * Returns 0 if successful; an error code otherwise.
 */
907
int aml_tiling_destroy(struct aml_tiling *tiling, int type);
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955

/*******************************************************************************
 * Tiling 1D:
 ******************************************************************************/

extern struct aml_tiling_ops aml_tiling_1d_ops;
extern struct aml_tiling_iterator_ops aml_tiling_iterator_1d_ops;

struct aml_tiling_1d_data {
	size_t blocksize;
	size_t totalsize;
};

struct aml_tiling_iterator_1d_data {
	size_t i;
	struct aml_tiling_1d_data *tiling;
};

#define AML_TILING_1D_DECL(name) \
	struct aml_tiling_1d_data __ ##name## _inner_data; \
	struct aml_tiling name = { \
		&aml_tiling_1d_ops, \
		(struct aml_tiling_data *)&__ ## name ## _inner_data, \
	};

#define AML_TILING_ITERATOR_1D_DECL(name) \
	struct aml_tiling_iterator_1d_data __ ##name## _inner_data; \
	struct aml_tiling_iterator name = { \
		&aml_tiling_iterator_1d_ops, \
		(struct aml_tiling_iterator_data *)&__ ## name ## _inner_data, \
	};

#define AML_TILING_1D_ALLOCSIZE (sizeof(struct aml_tiling_1d_data) + \
				 sizeof(struct aml_tiling))

#define AML_TILING_ITERATOR_1D_ALLOCSIZE \
	(sizeof(struct aml_tiling_iterator_1d_data) + \
	 sizeof(struct aml_tiling_iterator))

/*******************************************************************************
 * Binding:
 * Representation of page bindings in an area
 ******************************************************************************/

/* opaque handle to all bindings */
struct aml_binding_data;

struct aml_binding_ops {
Kamil Iskra's avatar
Kamil Iskra committed
956
957
958
959
960
961
962
963
964
	int (*nbpages)(const struct aml_binding_data *binding,
		       const struct aml_tiling *tiling, const void *ptr,
		       int tileid);
	int (*pages)(const struct aml_binding_data *binding, void **pages,
		     const struct aml_tiling *tiling, const void *ptr,
		     int tileid);
	int (*nodes)(const struct aml_binding_data *binding, int *nodes,
		     const struct aml_tiling *tiling, const void *ptr,
		     int tileid);
965
966
967
968
969
970
971
};

struct aml_binding {
	struct aml_binding_ops *ops;
	struct aml_binding_data *data;
};

972
973
974
975
976
977
978
979
/*
 * Provides the size of a tile in memory, in pages.
 * "binding": an initializing binding structure.
 * "tiling": an initialized tiling structure.
 * "ptr", "tileid": see aml_tiling_tilestart().
 * Returns the total number of pages that a tile occupies, including partial
 * pages.
 */
Kamil Iskra's avatar
Kamil Iskra committed
980
981
982
int aml_binding_nbpages(const struct aml_binding *binding,
			const struct aml_tiling *tiling,
			const void *ptr, int tileid);
983
984
985
986
987
988
989
990
991
992
/*
 * Provides the addresses of pages that a tile occupies.
 * "binding": an initializing binding structure.
 * "pages": an array that will be filled with start addresses of all pages
 *          that a tile occupies.  The array must be at least
 *          aml_binding_nbpages() elements long.
 * "tiling": an initialized tiling structure.
 * "ptr", "tileid": see aml_tiling_tilestart().
 * Returns 0 if successful; an error code otherwise.
 */
Kamil Iskra's avatar
Kamil Iskra committed
993
994
995
int aml_binding_pages(const struct aml_binding *binding, void **pages,
		      const struct aml_tiling *tiling, const void *ptr,
		      int tileid);
996
997
998
999
1000
1001
1002
1003
1004
1005
/*
 * Provides the NUMA node information of pages that a tile occupies.
 * "binding": an initializing binding structure.
 * "nodes": an array that will be filled with NUMA node id's of all pages
 *          that a tile occupies.  The array must be at least
 *          aml_binding_nbpages() elements long.
 * "tiling": an initialized tiling structure.
 * "ptr", "tileid": see aml_tiling_tilestart().
 * Returns 0 if successful; an error code otherwise.
 */
Kamil Iskra's avatar
Kamil Iskra committed
1006
1007
1008
int aml_binding_nodes(const struct aml_binding *binding, int *nodes,
		      const struct aml_tiling *tiling, const void *ptr,
		      int tileid);
1009

1010
1011
/* Binding types passed to the binding create()/init()/vinit() routines.  */
/* Binding where all pages are bound to the same NUMA node.  */
1012
#define AML_BINDING_TYPE_SINGLE 0
1013
/* Binding where pages are interleaved among multple NUMA nodes.  */
1014
1015
#define AML_BINDING_TYPE_INTERLEAVE 1

1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
/*
 * Allocates and initializes a new binding.
 * "binding": an address where the pointer to the newly allocated binding
 *            structure will be stored.
 * "type": see AML_BINDING_TYPE_*.
 * Variadic arguments:
 * - if "type" equals AML_BINDING_TYPE_SINGLE, one additional argument is
 *   needed:
 *   - "node": an argument of type int; provides a NUMA node id where pages
 *             will be allocated from.
 * - if "type" equals AML_BINDING_TYPE_INTERLEAVE, one additional argument is
 *   needed:
 *   - "mask": an argument of type const unsigned long*; provides an array
 *             at least AML_NODEMASK_SZ elements long, storing a bitmask of
 *             NUMA node ids where pages will be allocated from.  See
 *             AML_NODEMASK_* macros for more information.
 * Returns 0 if successful; an error code otherwise.
 */
1034
int aml_binding_create(struct aml_binding **binding, int type, ...);
1035
1036
1037
1038
1039
1040
1041
1042
/*
 * Initializes a new binding.  This is a varags-variant of the
 * aml_binding_vinit() routine.
 * "binding": an allocated binding structure.
 * "type": see aml_binding_create().
 * Variadic arguments: see aml_binding_create().
 * Returns 0 if successful; an error code otherwise.
 */
1043
int aml_binding_init(struct aml_binding *binding, int type, ...);
1044
1045
1046
1047
1048
1049
1050
/*
 * Initializes a new binding.
 * "binding": an allocated binding structure.
 * "type": see aml_binding_create().
 * "args": see the variadic arguments of aml_binding_create().
 * Returns 0 if successful; an error code otherwise.
 */
1051
int aml_binding_vinit(struct aml_binding *binding, int type, va_list args);
1052
1053
1054
1055
1056
1057
/*
 * Tears down an initialized binding.
 * "binding": an initialized binding structure.
 * "type": see aml_binding_create().
 * Returns 0 if successful; an error code otherwise.
 */
1058
int aml_binding_destroy(struct aml_binding *binding, int type);
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103

/*******************************************************************************
 * Single Binding:
 * All pages on the same node
 ******************************************************************************/

extern struct aml_binding_ops aml_binding_single_ops;

struct aml_binding_single_data {
	int node;
};

#define AML_BINDING_SINGLE_DECL(name) \
	struct aml_binding_single_data __ ##name## _inner_data; \
	struct aml_binding name = { \
		&aml_binding_single_ops, \
		(struct aml_binding_data *)&__ ## name ## _inner_data, \
	};

#define AML_BINDING_SINGLE_ALLOCSIZE (sizeof(struct aml_binding_single_data) + \
				      sizeof(struct aml_binding))

/*******************************************************************************
 * Interleave Binding:
 * each page, of each tile, interleaved across nodes.
 ******************************************************************************/

extern struct aml_binding_ops aml_binding_interleave_ops;

struct aml_binding_interleave_data {
	int nodes[AML_MAX_NUMA_NODES];
	int count;
};

#define AML_BINDING_INTERLEAVE_DECL(name) \
	struct aml_binding_interleave_data __ ##name## _inner_data; \
	struct aml_binding name = { \
		&aml_binding_interleave_ops, \
		(struct aml_binding_data *)&__ ## name ## _inner_data, \
	};

#define AML_BINDING_INTERLEAVE_ALLOCSIZE \
	(sizeof(struct aml_binding_interleave_data) + \
	 sizeof(struct aml_binding))

1104
1105
1106
1107
1108
/*******************************************************************************
 * DMA:
 * Management of low-level movement of memory.
 ******************************************************************************/

1109
1110
/* Internal macros used for tracking DMA request types.  */
/* Invalid request type.  Used for marking inactive requests in the vector.  */
1111
#define AML_DMA_REQUEST_TYPE_INVALID -1
1112
/* Copy request type.  Uses memcpy() for data migration.  */
1113
#define AML_DMA_REQUEST_TYPE_COPY 0
1114
/* Move request type.  Uses move_pages() for data migration.  */
1115
1116
#define AML_DMA_REQUEST_TYPE_MOVE 1

1117
struct aml_dma_request;
1118
1119
1120
struct aml_dma_data;

struct aml_dma_ops {
1121
	int (*create_request)(struct aml_dma_data *dma,
1122
			      struct aml_dma_request **req, int type,
1123
1124
1125
1126
1127
			      va_list args);
	int (*destroy_request)(struct aml_dma_data *dma,
			       struct aml_dma_request *req);
	int (*wait_request)(struct aml_dma_data *dma,
			    struct aml_dma_request *req);
1128
1129
1130
1131
1132
1133
1134
};

struct aml_dma {
	struct aml_dma_ops *ops;
	struct aml_dma_data *data;
};

1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
/*
 * Requests a synchronous data copy between two different tiles, using
 * memcpy() or equivalent.
 * "dma": an initialized DMA structure.
 * Variadic arguments:
 * - "dt": an argument of type struct aml_tiling*; the destination tiling
 *         structure.
 * - "dptr": an argument of type void*; the start address of the complete
 *           destination user data structure.
 * - "dtid": an argument of type int; the destination tile identifier.
 * - "st": an argument of type struct aml_tiling*; the source tiling structure.
 * - "sptr": an argument of type void*; the start address of the complete
 *           source user data structure.
 * - "stid": an argument of type int; the source tile identifier.
 * Returns 0 if successful; an error code otherwise.
 */
1151
int aml_dma_copy(struct aml_dma *dma, ...);
1152
1153
1154
1155
1156
1157
1158
1159
1160
/*
 * Requests a data copy between two different tiles.  This is an asynchronous
 * version of aml_dma_copy().
 * "dma": an initialized DMA structure.
 * "req": an address where the pointer to the newly assigned DMA request will be
 *        stored.
 * Variadic arguments: see aml_dma_copy().
 * Returns 0 if successful; an error code otherwise.
 */
1161
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...);
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
/*
 * Requests a synchronous data move of a tile to a new memory area, using
 * move_pages() or equivalent.
 * "dma": an initialized DMA structure.
 * Variadic arguments:
 * - "darea": an argument of type struct aml_area*; the destination memory area
 *         structure.
 * - "st": an argument of type struct aml_tiling*; the tiling structure.
 * - "sptr": an argument of type void*; the start address of the complete
 *           user data structure.
 * - "stid": an argument of type int; the tile identifier.
 * Returns 0 if successful; an error code otherwise.
 */
1175
int aml_dma_move(struct aml_dma *dma, ...);
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
/*
 * Requests a data move of a tile to a new memory area.  This is an asynchronous
 * version of aml_dma_move().
 * "dma": an initialized DMA structure.
 * "req": an address where the pointer to the newly assigned DMA request will be
 *        stored.
 * Variadic arguments: see aml_dma_move().
 * Returns 0 if successful; an error code otherwise.
 *
 */
1186
int aml_dma_async_move(struct aml_dma *dma, struct aml_dma_request **req, ...);
1187
1188
1189
1190
1191
1192
/*
 * Waits for an asynchronous DMA request to complete.
 * "dma": an initialized DMA structure.
 * "req": a DMA request obtained using aml_dma_async_*() calls.
 * Returns 0 if successful; an error code otherwise.
 */
1193
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req);
1194
1195
1196
1197
1198
1199
/*
 * Tears down an asynchronous DMA request before it completes.
 * "dma": an initialized DMA structure.
 * "req": a DMA request obtained using aml_dma_async_*() calls.
 * Returns 0 if successful; an error code otherwise.
 */
1200
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req);
1201
1202
1203
1204
1205
1206
1207

/*******************************************************************************
 * Linux Sequential DMA API:
 * DMA logic implemented based on general linux API, with the caller thread
 * used as the only execution thread.
 ******************************************************************************/

1208
1209
1210
extern struct aml_dma_ops aml_dma_linux_seq_ops;

struct aml_dma_request_linux_seq {
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
	int type;
	void *dest;
	void *src;
	size_t size;
	int count;
	void **pages;
	int *nodes;
};

struct aml_dma_linux_seq_data {
1221
	struct aml_vector requests;
1222
	pthread_mutex_t lock;
1223
1224
1225
};

struct aml_dma_linux_seq_ops {
1226
1227
1228
1229
	int (*do_copy)(struct aml_dma_linux_seq_data *dma,
		       struct aml_dma_request_linux_seq *req);
	int (*do_move)(struct aml_dma_linux_seq_data *dma,
		       struct aml_dma_request_linux_seq *req);
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
};

struct aml_dma_linux_seq {
	struct aml_dma_linux_seq_ops ops;
	struct aml_dma_linux_seq_data data;
};

#define AML_DMA_LINUX_SEQ_DECL(name) \
	struct aml_dma_linux_seq __ ##name## _inner_data; \
	struct aml_dma name = { \
		&aml_dma_linux_seq_ops, \
		(struct aml_dma_data *)&__ ## name ## _inner_data, \
	};

#define AML_DMA_LINUX_SEQ_ALLOCSIZE \
	(sizeof(struct aml_dma_linux_seq) + \
	 sizeof(struct aml_dma))

1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
/*
 * Allocates and initializes a new sequential DMA.
 * "dma": an address where the pointer to the newly allocated DMA structure
 *        will be stored.
 * Variadic arguments:
 * - "nbreqs": an argument of type size_t; the initial number of slots for
 *             asynchronous request that are in-flight (will be increased
 *             automatically if necessary).
 * Returns 0 if successful; an error code otherwise.
 */
1258
int aml_dma_linux_seq_create(struct aml_dma **dma, ...);
1259
1260
1261
1262
1263
1264
1265
/*
 * Initializes a new sequential DMA.  This is a varargs-variant of the
 * aml_dma_linux_seq_create() routine.
 * "dma": an allocated DMA structure.
 * Variadic arguments: see aml_dma_linux_seq_create().
 * Returns 0 if successful; an error code otherwise.
 */
1266
int aml_dma_linux_seq_init(struct aml_dma *dma, ...);
1267
1268
1269
1270
1271
1272
/*
 * Initializes a new sequential DMA.
 * "dma": an allocated DMA structure.
 * "args": see the variadic arguments of aml_dma_linux_seq_create().
 * Returns 0 if successful; an error code otherwise.
 */
1273
int aml_dma_linux_seq_vinit(struct aml_dma *dma, va_list args);
1274
1275
1276
1277
1278
/*
 * Tears down an initialized sequential DMA.
 * "dma": an allocated DMA structure.
 * Returns 0 if successful; an error code otherwise.
 */
1279
int aml_dma_linux_seq_destroy(struct aml_dma *dma);
1280

1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
/*******************************************************************************
 * Linux Parallel DMA API:
 * DMA logic implemented based on general linux API, with the caller thread
 * used as the only execution thread.
 ******************************************************************************/

extern struct aml_dma_ops aml_dma_linux_par_ops;

struct aml_dma_linux_par_thread_data {
	int tid;
	pthread_t thread;
	struct aml_dma_linux_par *dma;
	struct aml_dma_request_linux_par *req;
};

struct aml_dma_request_linux_par {
	int type;
	void *dest;
	void *src;
	size_t size;
	int count;
	void **pages;
	int *nodes;
	struct aml_dma_linux_par_thread_data *thread_data;
};

struct aml_dma_linux_par_data {
	size_t nbthreads;
1309
	struct aml_vector requests;
1310
	pthread_mutex_t lock;
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
};

struct aml_dma_linux_par_ops {
	void *(*do_thread)(void *);
	int (*do_copy)(struct aml_dma_linux_par_data *,
		       struct aml_dma_request_linux_par *, int tid);
	int (*do_move)(struct aml_dma_linux_par_data *,
		       struct aml_dma_request_linux_par *, int tid);
};

struct aml_dma_linux_par {
	struct aml_dma_linux_par_ops ops;
	struct aml_dma_linux_par_data data;
};

#define AML_DMA_LINUX_PAR_DECL(name) \
	struct aml_dma_linux_par __ ##name## _inner_data; \
	struct aml_dma name = { \
		&aml_dma_linux_par_ops, \
		(struct aml_dma_data *)&__ ## name ## _inner_data, \
	};

#define AML_DMA_LINUX_PAR_ALLOCSIZE \
	(sizeof(struct aml_dma_linux_par) + \
	 sizeof(struct aml_dma))

1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
/*
 * Allocates and initializes a new parallel DMA.
 * "dma": an address where the pointer to the newly allocated DMA structure
 *        will be stored.
 * Variadic arguments:
 * - "nbreqs": an argument of type size_t; the initial number of slots for
 *             asynchronous request that are in-flight (will be increased
 *             automatically if necessary).
 * - "nbthreads": an argument of type size_t; the number of threads to launch
 *                for each request.
 * Returns 0 if successful; an error code otherwise.
 */
1349
int aml_dma_linux_par_create(struct aml_dma **, ...);
1350
1351
1352
1353
1354
1355
1356
/*
 * Initializes a new parallel DMA.  This is a varargs-variant of the
 * aml_dma_linux_par_create() routine.
 * "dma": an allocated DMA structure.
 * Variadic arguments: see aml_dma_linux_par_create().
 * Returns 0 if successful; an error code otherwise.
 */
1357
int aml_dma_linux_par_init(struct aml_dma *, ...);
1358
1359
1360
1361
1362
1363
/*
 * Initializes a new parallel DMA.
 * "dma": an allocated DMA structure.
 * "args": see the variadic arguments of aml_dma_linux_par_create().
 * Returns 0 if successful; an error code otherwise.
 */
1364
int aml_dma_linux_par_vinit(struct aml_dma *, va_list);
1365
1366
1367
1368
1369
/*
 * Tears down an initialized parallel DMA.
 * "dma": an allocated DMA structure.
 * Returns 0 if successful; an error code otherwise.
 */
1370
1371
int aml_dma_linux_par_destroy(struct aml_dma *);

1372
1373
1374
1375
1376
1377
1378
1379
1380
/*******************************************************************************
 * Scratchpad:
 * Use an area to stage data from an another area in and out.
 * A dma handles the movement itself.
 ******************************************************************************/

struct aml_scratch_request;
struct aml_scratch_data;

1381
1382
/* Internal macros used for tracking scratchpad request types.  */
/* Invalid request type.  Used for marking inactive requests in the vector.  */
1383
#define AML_SCRATCH_REQUEST_TYPE_INVALID -1
1384
/* Push from the scratchpad to regular memory.  */
1385
#define AML_SCRATCH_REQUEST_TYPE_PUSH 0
1386
/* Pull from regular memory to the scratchpad.  */
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
#define AML_SCRATCH_REQUEST_TYPE_PULL 1

struct aml_scratch_ops {
	int (*create_request)(struct aml_scratch_data *scratch,
			      struct aml_scratch_request **req, int type,
			      va_list args);
	int (*destroy_request)(struct aml_scratch_data *scratch,
			       struct aml_scratch_request *req);
	int (*wait_request)(struct aml_scratch_data *scratch,
			    struct aml_scratch_request *req);
1397
	void *(*baseptr)(const struct aml_scratch_data *scratch);
1398
	int (*release)(struct aml_scratch_data *scratch, int scratchid);
1399
1400
1401
1402
1403
1404
1405
};

struct aml_scratch {
	struct aml_scratch_ops *ops;
	struct aml_scratch_data *data;
};

1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
/*
 * Requests a synchronous pull from regular memory to the scratchpad.
 * "scratch": an initialized scratchpad structure.
 * Variadic arguments:
 * - "scratchptr": an argument of type void*; the scratchpad base pointer (see
 *                 aml_scratch_baseptr()).
 * - "scratchid": an argument of type int*; gets filled with the scratch tile
 *                identifier where the data will be pulled into.
 * - "srcptr": an argument of type void*; the start address of the complete
 *             source user data structure.
 * - "srcid": an argument of type int; the source tile identifier.
 * Returns 0 if successful; an error code otherwise.
 */
1419
int aml_scratch_pull(struct aml_scratch *scratch, ...);
1420
1421
1422
1423
1424
1425
1426
1427
1428
/*
 * Requests a pull from regular memory to the scratchpad.  This is an
 * asynchronous version of aml_scratch_pull().
 * "scratch": an initialized scratchpad structure.
 * "req": an address where the pointer to the newly assigned scratch request
 *        will be stored.
 * Variadic arguments: see aml_scratch_pull().
 * Returns 0 if successful; an error code otherwise.
 */
1429
1430
int aml_scratch_async_pull(struct aml_scratch *scratch,
			   struct aml_scratch_request **req, ...);
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
/*
 * Requests a synchronous push from the scratchpad to regular memory.
 * "scratch": an initialized scratchpad structure.
 * Variadic arguments:
 * - "srcptr": an argument of type void*; the start address of the complete
 *             source user data structure.
 * - "srcid": an argument of type int*; gets filled with the source tile
 *            identifier where the data will be pushed into (and where it was
 *            pulled from in the first place).
 * - "scratchptr": an argument of type void*; the scratchpad base pointer (see
 *                 aml_scratch_baseptr()).
 * - "scratchid": an argument of type int; the scratchpad tile identifier.
 * Returns 0 if successful; an error code otherwise.
 */
1445
int aml_scratch_push(struct aml_scratch *scratch, ...);
1446
1447
1448
1449
1450
1451
1452
1453
1454
/*
 * Requests a push from the scratchpad to regular memory.  This is an
 * asynchronous version of aml_scratch_push().
 * "scratch": an initialized scratchpad structure.
 * "req": an address where the pointer to the newly assigned scratch request
 *        will be stored.
 * Variadic arguments: see aml_scratch_push().
 * Returns 0 if successful; an error code otherwise.
 */
1455
1456
int aml_scratch_async_push(struct aml_scratch *scratch,
			   struct aml_scratch_request **req, ...);
1457
1458
1459
1460
1461
1462
/*
 * Waits for an asynchronous scratch request to complete.
 * "scratch": an initialized scratchpad structure.
 * "req": a scratch request obtained using aml_scratch_async_*() calls.
 * Returns 0 if successful; an error code otherwise.
 */
1463
1464
int aml_scratch_wait(struct aml_scratch *scratch,
		     struct aml_scratch_request *req);
1465

1466
1467
1468
1469
1470
1471
/*
 * Tears down an asynchronous scratch request before it completes.
 * "scratch": an initialized scratch structure.
 * "req": a scratch request obtained using aml_scratch_async_*() calls.
 * Returns 0 if successful; an error code otherwise.
 */
1472
1473
int aml_scratch_cancel(struct aml_scratch *scratch,
		       struct aml_scratch_request *req);
1474
1475
1476
1477
1478
/*
 * Provides the location of the scratchpad.
 * "scratch": an initialized scratch structure.
 * Returns a base pointer to the scratchpad memory buffer.
 */
1479
void* aml_scratch_baseptr(const struct aml_scratch *scratch);
1480

1481
1482
1483
1484
1485
1486
1487
1488
/*
 * Release a scratch tile for immediate reuse.
 * "scratch": an initialized scratchpad structure.
 * "scratchid": an argument of type int; the scratchpad tile identifier.
 * Returns 0 if successuf; an error code otherwise.
 */
int aml_scratch_release(struct aml_scratch *scratch, int scratchid);

1489
1490
/*******************************************************************************
 * Sequential scratchpad API:
1491
 * Scratchpad uses calling thread to trigger asynchronous dma movements.
1492
1493
1494
1495
1496
1497
 ******************************************************************************/

extern struct aml_scratch_ops aml_scratch_seq_ops;

struct aml_scratch_request_seq {
	int type;
1498
	struct aml_tiling *tiling;
1499
1500
1501
1502
1503
1504
1505
1506
	void *srcptr;
	int srcid;
	void *dstptr;
	int dstid;
	struct aml_dma_request *dma_req;
};

struct aml_scratch_seq_data {
1507
1508
	struct aml_area *src_area, *sch_area;
	struct aml_tiling *tiling;
1509
	struct aml_dma *dma;
1510
	void * sch_ptr;
1511
1512
	struct aml_vector tilemap;
	struct aml_vector requests;
1513
	pthread_mutex_t lock;
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
};

struct aml_scratch_seq_ops {
	int (*doit)(struct aml_scratch_seq_data *scratch,
		    struct aml_scratch_request_seq *req);
};

struct aml_scratch_seq {
	struct aml_scratch_seq_ops ops;
	struct aml_scratch_seq_data data;
};

#define AML_SCRATCH_SEQ_DECL(name) \
	struct aml_scratch_seq __ ##name## _inner_data; \
	struct aml_scratch name = { \
		&aml_scratch_seq_ops, \
		(struct aml_scratch_data *)&__ ## name ## _inner_data, \
	};

#define AML_SCRATCH_SEQ_ALLOCSIZE \
	(sizeof(struct aml_scratch_seq) + \
	 sizeof(struct aml_scratch))

1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
/*
 * Allocates and initializes a new sequential scratchpad.
 * "scratch": an address where the pointer to the newly allocated scratchpad
 *            structure will be stored.
 * Variadic arguments:
 * - "scratch_area": an argument of type struct aml_area*; the memory area
 *                   where the scratchpad will be allocated from.
 * - "source_area": an argument of type struct aml_area*; the memory area
 *                  containing the user data structure.
 * - "dma": an argument of type struct aml_dma*; the DMA that will be used for
 *          migrating data to and from the scratchpad.
 * - "tiling": an argument of type struct aml_tiling*; the tiling to use on the
 *             user data structure and the scratchpad.
 * - "nbtiles": an argument of type size_t; number of tiles to divide the
 *              scratchpad into.
 * - "nbreqs": an argument of type size_t; the initial number of slots for
 *             asynchronous request that are in-flight (will be increased
 *             automatically if necessary).
 * Returns 0 if successful; an error code otherwise.
 */
1557
int aml_scratch_seq_create(struct aml_scratch **scratch, ...);
1558
1559
1560
1561
1562
1563
1564
/*
 * Initializes a new sequential scratchpad.  This is a varargs-variant of the
 * aml_scratch_seq_init() routine.
 * "scratch": an allocated scratchpad structure.
 * Variadic arguments: see aml_scratch_seq_create().
 * Returns 0 if successful; an error code otherwise.
 */
1565
int aml_scratch_seq_init(struct aml_scratch *scratch, ...);
1566
1567
1568
1569
1570
1571
/*
 * Initializes a new sequential scratchpad.
 * "scratch": an allocated scratchpad structure.
 * "args": see the variadic arguments of see aml_scratch_seq_create().
 * Returns 0 if successful; an error code otherwise.
 */
1572
int aml_scratch_seq_vinit(struct aml_scratch *scratch, va_list args);
1573
1574
1575
1576
1577
/*
 * Tears down an initialized sequential scratchpad.
 * "scratch": an initialized scratchpad structure.
 * Returns 0 if successful; an error code otherwise.
 */
1578
int aml_scratch_seq_destroy(struct aml_scratch *scratch);
1579