Commit 9059049c authored by Swann Perarnau's avatar Swann Perarnau

[refactor] convert area to new errors, create

Convert area/area_linux to new error handler.
Implement #31 for area.
Checkpatch area

Note that area_linux is still using a dynamic data structure no matter
what: a bitmask to the set of authorized nodes. The conversion to
aml_bitmap wouldn't help as there's no default value available for all
nodes on that type (similar to numa_all_nodes_ptr).
parent e243f7eb
......@@ -13,6 +13,9 @@
# files
include/aml.h
include/aml/area/linux.h
include/aml/utils/error.h
src/aml.c
src/area/area.c
src/area/linux.c
src/utils/error.c
......@@ -34,10 +34,10 @@ int main(int argc, char *argv[])
long int N = atol(argv[3]);
unsigned long memsize = sizeof(double)*N*N;
slow = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&slow, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&slowb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(slow != NULL);
fast = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&fast, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&fastb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(fast != NULL);
......@@ -65,8 +65,8 @@ int main(int argc, char *argv[])
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
aml_area_linux_destroy(slow);
aml_area_linux_destroy(fast);
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_finalize();
return 0;
}
......@@ -77,10 +77,10 @@ int main(int argc, char* argv[])
assert(!aml_tiling_init(&tiling_col, AML_TILING_TYPE_2D_COLMAJOR,
tilesize, memsize, N/T , N/T));
slow = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&slow, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&slowb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(slow != NULL);
fast = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&fast, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&fastb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(fast != NULL);
......@@ -154,8 +154,8 @@ int main(int argc, char* argv[])
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
aml_area_linux_destroy(slow);
aml_area_linux_destroy(fast);
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling_row, AML_TILING_TYPE_2D_ROWMAJOR);
aml_tiling_destroy(&tiling_col, AML_TILING_TYPE_2D_ROWMAJOR);
aml_finalize();
......
......@@ -105,10 +105,10 @@ int main(int argc, char* argv[])
assert(!aml_tiling_init(&tiling_prefetch, AML_TILING_TYPE_1D,
tilesize*(N/T), memsize));
slow = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&slow, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&slowb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(slow != NULL);
fast = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&fast, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&fastb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(fast != NULL);
......@@ -188,8 +188,8 @@ int main(int argc, char* argv[])
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
aml_area_linux_destroy(slow);
aml_area_linux_destroy(fast);
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling_row, AML_TILING_TYPE_2D_ROWMAJOR);
aml_tiling_destroy(&tiling_col, AML_TILING_TYPE_2D_ROWMAJOR);
aml_tiling_destroy(&tiling_prefetch, AML_TILING_TYPE_1D);
......
......@@ -64,10 +64,10 @@ int main(int argc, char *argv[])
/* initialize all the supporting struct */
assert(!aml_tiling_init(&tiling, AML_TILING_TYPE_1D, tilesz, memsize));
slow = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&slow, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&slowb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(slow != NULL);
fast = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&fast, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&fastb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(fast != NULL);
assert(!aml_dma_linux_par_init(&dma, numthreads*2, numthreads));
......@@ -124,8 +124,8 @@ int main(int argc, char *argv[])
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
aml_area_linux_destroy(slow);
aml_area_linux_destroy(fast);
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
aml_finalize();
return 0;
......
......@@ -95,10 +95,10 @@ int main(int argc, char *argv[])
/* initialize all the supporting struct */
assert(!aml_tiling_init(&tiling, AML_TILING_TYPE_1D, tilesz, memsize));
slow = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&slow, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&slowb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(slow != NULL);
fast = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&fast, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&fastb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(fast != NULL);
assert(!aml_dma_linux_seq_init(&dma, numthreads*2));
......@@ -137,8 +137,8 @@ int main(int argc, char *argv[])
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
aml_area_linux_destroy(slow);
aml_area_linux_destroy(fast);
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
aml_finalize();
return 0;
......
......@@ -101,10 +101,10 @@ int main(int argc, char *argv[])
/* initialize all the supporting struct */
assert(!aml_tiling_init(&tiling, AML_TILING_TYPE_1D, tilesz, memsize));
slow = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&slow, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&slowb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(slow != NULL);
fast = aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
aml_area_linux_create(&fast, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&fastb, AML_AREA_LINUX_BINDING_FLAG_BIND);
assert(fast != NULL);
assert(!aml_dma_linux_seq_init(&dma, (size_t)numthreads*4));
......@@ -148,8 +148,8 @@ int main(int argc, char *argv[])
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
aml_area_linux_destroy(slow);
aml_area_linux_destroy(fast);
aml_area_linux_destroy(&slow);
aml_area_linux_destroy(&fast);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
aml_finalize();
return 0;
......
......@@ -37,38 +37,12 @@ struct aml_area_linux_data {
};
/* Default linux area with private mapping and no binding. */
extern const struct aml_area aml_area_linux;
extern struct aml_area aml_area_linux;
#define AML_AREA_LINUX_DECL(name) \
struct aml_area_linux_data __ ##name## _inner_data; \
struct aml_area name = { \
&aml_area_linux_ops, \
(struct aml_area_data *)&__ ## name ## _inner_data, \
}
#define AML_AREA_LINUX_ALLOCSIZE \
(sizeof(struct aml_area_linux_data) + \
sizeof(struct aml_area))
/**
* Initialize area data with struct aml_area_linux_binding. Subsequent calls to
* aml_area_mmap() with this returned area will apply binding settings.
* Returns NULL on failure with aml_errno set to:
* - AML_AREA_ENOMEM if there is not enough memory available for the operation
* - AML_AREA_EINVAL flags were not one of linux area flags.
* - AML_AREA_EDOM if binding nodeset is out of allowed nodeset.
**/
struct aml_area* aml_area_linux_create(const int mmap_flags,
const struct aml_bitmap *nodemask,
const int binding_flags);
/**
* Destroy area data containing struct aml_area_linux_binding
**/
void
aml_area_linux_destroy(struct aml_area* area);
/*******************************************************************************
* Linux operators
*******************************************************************************/
/**
* Bind memory of size "size" pointed by "ptr" to binding set in "bind".
......@@ -80,10 +54,10 @@ aml_area_linux_mbind(struct aml_area_linux_data *bind,
void *ptr,
size_t size);
/**
* Function to check whether binding of a ptr obtained with
* aml_area_linux_mmap() then aml_area_linux_mbind() match area settings.
* Returns 1 if mapped memory binding in ptr match area_data binding settings,
/**
* Function to check whether binding of a ptr obtained with
* aml_area_linux_mmap() then aml_area_linux_mbind() match area settings.
* Returns 1 if mapped memory binding in ptr match area_data binding settings,
* else 0.
**/
int
......@@ -91,7 +65,7 @@ aml_area_linux_check_binding(struct aml_area_linux_data *area_data,
void *ptr,
size_t size);
/**
/**
* mmap hook for aml area.
* Fails with AML_FAILURE. On failure errno should be checked for further
* error investigations.
......@@ -101,14 +75,70 @@ aml_area_linux_mmap(const struct aml_area_data *area_data,
void *ptr,
size_t size);
/**
/**
* munmap hook for aml area, to unmap memory mapped with aml_area_linux_mmap().
* Fails with AML_FAILURE. On failure errno should be checked for further
* error investigations.
**/
int
aml_area_linux_munmap(const struct aml_area_data* area_data,
aml_area_linux_munmap(const struct aml_area_data *area_data,
void *ptr,
const size_t size);
/*******************************************************************************
* create/destroy and others
*******************************************************************************/
#define AML_AREA_LINUX_DECL(name) \
struct aml_area_linux_data __ ##name## _inner_data; \
struct aml_area name = { \
&aml_area_linux_ops, \
(struct aml_area_data *)&__ ## name ## _inner_data, \
}
#define AML_AREA_LINUX_ALLOCSIZE \
(sizeof(struct aml_area_linux_data) + \
sizeof(struct aml_area))
/**
* Allocate and initialize a struct aml_area implemented by aml_area_linux
* operations.
* @param[out] area pointer to an uninitialized struct aml_area pointer to
* receive the new area.
* @param[in] mmap_flags flags to use when retrieving virtual memory with mmap
* @param[in] binding_flags, flags to use when binding memory.
* @param[in] nodemask list of memory nodes to use. Default to allowed memory
* nodes if NULL.
* @return On success, returns 0 and area points to the new aml_area.
* @return On failure, sets area to NULL and returns one of AML error codes:
* - AML_ENOMEM if there wasn't enough memory available.
* - AML_EINVAL if inputs flags were invalid.
* - AML_EDOM the nodemask provided is out of bounds (allowed nodeset).
**/
int aml_area_linux_create(struct aml_area **area, const int mmap_flags,
const struct aml_bitmap *nodemask,
const int binding_flags);
/**
* Initialize a struct aml_area declared using the AML_AREA_LINUX_DECL macro.
* See aml_area_linux_create for details on arguments.
*/
int aml_area_linux_init(struct aml_area *area, const int mmap_flags,
const struct aml_bitmap *nodemask,
const int binding_flags);
/**
* Finalize a struct aml_area initialized with aml_area_linux_init.
*/
void aml_area_linux_fini(struct aml_area *area);
/**
* Destroy (finalize and free resources) a struct aml_area created by
* aml_area_linux_create.
*
* @param area is NULL after this call.
**/
void aml_area_linux_destroy(struct aml_area **area);
#endif //AML_AREA_LINUX_NUMA_H
......@@ -8,24 +8,21 @@
* SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/
#include <stdlib.h>
#include "aml.h"
#include <stdlib.h>
void*
aml_area_mmap(const struct aml_area *area,
void **ptr,
size_t size)
void *aml_area_mmap(const struct aml_area *area, void **ptr, size_t size)
{
if(size == 0)
if (size == 0)
return NULL;
if(area == NULL){
aml_errno = AML_AREA_EINVAL;
if (area == NULL) {
aml_errno = AML_EINVAL;
return NULL;
}
if(area->ops->mmap == NULL){
aml_errno = AML_AREA_ENOTSUP;
if (area->ops->mmap == NULL) {
aml_errno = AML_ENOTSUP;
return NULL;
}
......@@ -33,20 +30,17 @@ aml_area_mmap(const struct aml_area *area,
return area->ops->mmap(area->data, ptr, size);
}
int
aml_area_munmap(const struct aml_area *area,
void *ptr,
size_t size)
int aml_area_munmap(const struct aml_area *area, void *ptr, size_t size)
{
if(ptr == NULL || size == 0)
if (ptr == NULL || size == 0)
return AML_SUCCESS;
if(area == NULL)
return AML_AREA_EINVAL;
if(area->ops->munmap == NULL)
return AML_AREA_ENOTSUP;
if (area == NULL)
return -AML_EINVAL;
if (area->ops->munmap == NULL)
return -AML_ENOTSUP;
return area->ops->munmap(area->data, ptr, size);
}
This diff is collapsed.
......@@ -29,14 +29,13 @@ void test_binding(struct aml_bitmap *bitmap){
size_t s;
int bf, mf, i, nnodes, binding_flag, mmap_flag;
struct aml_area *area;
for(bf=0; bf<sizeof(binding_flags)/sizeof(*binding_flags); bf++){
binding_flag = binding_flags[bf];
for(mf=0; mf<sizeof(mmap_flags)/sizeof(*mmap_flags); mf++){
mmap_flag = mmap_flags[mf];
for(s = 0; s<sizeof(sizes)/sizeof(*sizes); s++){
area = aml_area_linux_create(mmap_flag, bitmap, binding_flag);
aml_area_linux_create(&area, mmap_flag, bitmap, binding_flag);
assert(area != NULL);
ptr = area->ops->mmap((struct aml_area_data*)area->data,
NULL,
......@@ -45,6 +44,7 @@ void test_binding(struct aml_bitmap *bitmap){
memset(ptr, 0, sizes[s]);
assert(aml_area_linux_check_binding((struct aml_area_linux_data*)area->data, ptr, sizes[s]) > 0);
assert(area->ops->munmap((struct aml_area_data*)area->data, ptr, sizes[s]) == AML_SUCCESS);
aml_area_linux_destroy(&area);
}
}
......@@ -56,16 +56,17 @@ void test_bind(){
struct bitmask *nodeset;
int i, num_nodes;
struct aml_bitmap bitmap;
struct aml_area *area;
nodeset = numa_get_mems_allowed();
num_nodes = numa_bitmask_weight(nodeset);
aml_bitmap_fill(&bitmap);
if(aml_bitmap_last(&bitmap) > num_nodes){
assert(aml_area_linux_create(AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
assert(aml_area_linux_create(&area, AML_AREA_LINUX_MMAP_FLAG_PRIVATE,
&bitmap,
AML_AREA_LINUX_BINDING_FLAG_PREFERRED) == NULL);
assert(aml_errno == AML_AREA_EDOM);
AML_AREA_LINUX_BINDING_FLAG_PREFERRED) == -AML_EDOM);
assert(area == NULL);
}
test_binding(NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment