Commit 1e0d24b8 authored by Swann Perarnau's avatar Swann Perarnau

[feature] areas can now provide their binding

Allows memory movement logic to ask a target area how memory should be
bound to it.

Note that it would be safer in the long term to have areas take a
binding at creation time, and translate to nodemasks internally.
parent 085b9762
......@@ -23,6 +23,7 @@
******************************************************************************/
struct aml_area;
struct aml_binding;
/*******************************************************************************
* Arenas:
......@@ -103,6 +104,7 @@ struct aml_area_ops {
void (*release)(struct aml_area_data *, void *);
void *(*mmap)(struct aml_area_data *, void *ptr, size_t);
int (*available)(struct aml_area_data *);
int (*binding)(struct aml_area_data *, struct aml_binding *);
};
struct aml_area {
......@@ -174,10 +176,13 @@ struct aml_area_linux_mbind_data {
struct aml_area_linux_mbind_ops {
int (*pre_bind)(struct aml_area_linux_mbind_data *);
int (*post_bind)(struct aml_area_linux_mbind_data *, void *, size_t);
int (*binding)(struct aml_area_linux_mbind_data *, struct aml_binding *);
};
int aml_area_linux_mbind_setdata(struct aml_area_linux_mbind_data *, int,
unsigned long *);
int aml_area_linux_mbind_generic_binding(struct aml_area_linux_mbind_data *,
struct aml_binding *);
int aml_area_linux_mbind_regular_pre_bind(struct aml_area_linux_mbind_data *);
int aml_area_linux_mbind_regular_post_bind(struct aml_area_linux_mbind_data *,
void *, size_t);
......@@ -270,6 +275,7 @@ void *aml_area_acquire(struct aml_area *, size_t);
void aml_area_release(struct aml_area *, void *);
void *aml_area_mmap(struct aml_area *, void *, size_t);
int aml_area_available(struct aml_area *);
int aml_area_binding(struct aml_area *, struct aml_binding *);
/*******************************************************************************
* DMA Engines:
......
......@@ -54,3 +54,9 @@ int aml_area_available(struct aml_area *area)
assert(area != NULL);
return area->ops->available(area->data);
}
int aml_area_binding(struct aml_area *area, struct aml_binding *binding)
{
assert(area != NULL);
return area->ops->binding(area->data, binding);
}
......@@ -28,6 +28,13 @@ int aml_area_linux_available(struct aml_area_data *a)
return 1;
}
int aml_area_linux_binding(struct aml_area_data *a, struct aml_binding *b)
{
assert(a != NULL);
struct aml_area_linux *area = (struct aml_area_linux *)a;
return area->ops.mbind.binding(&area->data.mbind, b);
}
/*******************************************************************************
* Public API:
* The actual functions that will be called on the area from users
......@@ -110,6 +117,7 @@ struct aml_area_ops aml_area_linux_ops = {
aml_area_linux_release,
aml_area_linux_mmap,
aml_area_linux_available,
aml_area_linux_binding,
};
/*******************************************************************************
......
......@@ -8,6 +8,29 @@
* Only handles the actual mbind/mempolicy calls
******************************************************************************/
/* common to both methods */
int aml_area_linux_mbind_generic_binding(struct aml_area_linux_mbind_data *data,
struct aml_binding *b)
{
assert(data != NULL);
/* not exactly proper, we should inspect the nodemask to find the real
* binding policy.
*/
if(data->policy == MPOL_BIND)
{
for(int i = 0; i < AML_MAX_NUMA_NODES; i++)
if(AML_NODEMASK_ISSET(data->nodemask, i))
return aml_binding_init(b, AML_BINDING_TYPE_SINGLE,i);
}
else if(data->policy == MPOL_INTERLEAVE)
{
return aml_binding_init(b, AML_BINDING_TYPE_INTERLEAVE,
data->nodemask);
}
return 0;
}
int aml_area_linux_mbind_regular_pre_bind(struct aml_area_linux_mbind_data *data)
{
assert(data != NULL);
......@@ -24,6 +47,7 @@ int aml_area_linux_mbind_regular_post_bind(struct aml_area_linux_mbind_data *dat
struct aml_area_linux_mbind_ops aml_area_linux_mbind_regular_ops = {
aml_area_linux_mbind_regular_pre_bind,
aml_area_linux_mbind_regular_post_bind,
aml_area_linux_mbind_generic_binding,
};
int aml_area_linux_mbind_setdata(struct aml_area_linux_mbind_data *data,
......@@ -69,6 +93,7 @@ int aml_area_linux_mbind_mempolicy_post_bind(struct aml_area_linux_mbind_data *d
struct aml_area_linux_mbind_ops aml_area_linux_mbind_mempolicy_ops = {
aml_area_linux_mbind_mempolicy_pre_bind,
aml_area_linux_mbind_mempolicy_post_bind,
aml_area_linux_mbind_generic_binding,
};
int aml_area_linux_mbind_init(struct aml_area_linux_mbind_data *data,
......
......@@ -23,6 +23,16 @@ int aml_area_posix_available(struct aml_area_data *data)
return 1;
}
/* same thing here, it makes no sense to ask for this area to provide its
* binding scheme, as no-one should migrate to this area.
*/
int aml_area_posix_binding(struct aml_area_data *data, struct aml_binding *b)
{
assert(data != NULL);
/* numa node 0 should always be available */
return aml_binding_init(b, AML_BINDING_TYPE_SINGLE, 0);
}
/*******************************************************************************
* Public API:
* The actual functions that will be called on the area from users
......@@ -74,6 +84,7 @@ struct aml_area_ops aml_area_posix_ops = {
aml_area_posix_release,
aml_area_posix_mmap,
aml_area_posix_available,
aml_area_posix_binding,
};
/*******************************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment