Commit e04522e9 authored by Swann Perarnau's avatar Swann Perarnau

[refactor] remove move operation from DMA

Poorly supported feature that doesn't play well with the rest of the
library and limits what we can do in the future.
parent 6c2c6cdb
......@@ -1116,8 +1116,6 @@ struct aml_binding_interleave_data {
#define AML_DMA_REQUEST_TYPE_INVALID -1
/* Copy request type. Uses memcpy() for data migration. */
#define AML_DMA_REQUEST_TYPE_COPY 0
/* Move request type. Uses move_pages() for data migration. */
#define AML_DMA_REQUEST_TYPE_MOVE 1
struct aml_dma_request;
struct aml_dma_data;
......@@ -1164,31 +1162,6 @@ int aml_dma_copy(struct aml_dma *dma, ...);
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...);
/*
* Requests a synchronous data move of a tile to a new memory area, using
* move_pages() or equivalent.
* "dma": an initialized DMA structure.
* Variadic arguments:
* - "darea": an argument of type struct aml_area*; the destination memory area
* structure.
* - "st": an argument of type struct aml_tiling*; the tiling structure.
* - "sptr": an argument of type void*; the start address of the complete
* user data structure.
* - "stid": an argument of type int; the tile identifier.
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_move(struct aml_dma *dma, ...);
/*
* Requests a data move of a tile to a new memory area. This is an asynchronous
* version of aml_dma_move().
* "dma": an initialized DMA structure.
* "req": an address where the pointer to the newly assigned DMA request will be
* stored.
* Variadic arguments: see aml_dma_move().
* Returns 0 if successful; an error code otherwise.
*
*/
int aml_dma_async_move(struct aml_dma *dma, struct aml_dma_request **req, ...);
/*
* Waits for an asynchronous DMA request to complete.
* "dma": an initialized DMA structure.
......
......@@ -31,9 +31,6 @@ struct aml_dma_request_linux_par {
void *dest;
void *src;
size_t size;
int count;
void **pages;
int *nodes;
struct aml_dma_linux_par_thread_data *thread_data;
};
......@@ -47,8 +44,6 @@ struct aml_dma_linux_par_ops {
void *(*do_thread)(void *);
int (*do_copy)(struct aml_dma_linux_par_data *,
struct aml_dma_request_linux_par *, int tid);
int (*do_move)(struct aml_dma_linux_par_data *,
struct aml_dma_request_linux_par *, int tid);
};
struct aml_dma_linux_par {
......
......@@ -24,9 +24,6 @@ struct aml_dma_request_linux_seq {
void *dest;
void *src;
size_t size;
int count;
void **pages;
int *nodes;
};
struct aml_dma_linux_seq_data {
......@@ -37,8 +34,6 @@ struct aml_dma_linux_seq_data {
struct aml_dma_linux_seq_ops {
int (*do_copy)(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req);
int (*do_move)(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req);
};
struct aml_dma_linux_seq {
......@@ -98,12 +93,4 @@ int aml_dma_linux_seq_destroy(struct aml_dma *dma);
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req);
/* Performs a move request.
* "dma" the dma_linux_seq_data associated with a linux_seq dma.
* "req" a valid linux_seq request.
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_seq_do_move(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req);
#endif // AML_DMA_LINUX_SEQ_H
......@@ -47,34 +47,6 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...)
return ret;
}
int aml_dma_move(struct aml_dma *dma, ...)
{
assert(dma != NULL);
struct aml_dma_request *req;
va_list ap;
int ret;
va_start(ap, dma);
ret = dma->ops->create_request(dma->data, &req,
AML_DMA_REQUEST_TYPE_MOVE, ap);
va_end(ap);
ret = dma->ops->wait_request(dma->data, req);
return ret;
}
int aml_dma_async_move(struct aml_dma *dma, struct aml_dma_request **req, ...)
{
assert(dma != NULL);
assert(req != NULL);
va_list ap;
int ret;
va_start(ap, req);
ret = dma->ops->create_request(dma->data, req,
AML_DMA_REQUEST_TYPE_MOVE, ap);
va_end(ap);
return ret;
}
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req)
{
assert(dma != NULL);
......
......@@ -18,7 +18,7 @@
/*******************************************************************************
* Linux-backed, paruential dma
* The dma itself is organized into several different components
* - request types: copy or move
* - request types: copy
* - implementation of the request
* - user API (i.e. generic request creation and call)
* - how to init the dma
......@@ -51,33 +51,6 @@ int aml_dma_request_linux_par_copy_destroy(struct aml_dma_request_linux_par *r)
return 0;
}
int aml_dma_request_linux_par_move_init(struct aml_dma_request_linux_par *req,
struct aml_area *darea,
struct aml_tiling *tiling,
void *startptr, int tileid)
{
assert(req != NULL);
struct aml_binding *binding;
req->type = AML_DMA_REQUEST_TYPE_MOVE;
aml_area_binding(darea, &binding);
req->count = aml_binding_nbpages(binding, tiling, startptr, tileid);
req->pages = calloc(req->count, sizeof(void *));
req->nodes = calloc(req->count, sizeof(int));
aml_binding_pages(binding, req->pages, tiling, startptr, tileid);
aml_binding_nodes(binding, req->nodes, tiling, startptr, tileid);
free(binding);
return 0;
}
int aml_dma_request_linux_par_move_destroy(struct aml_dma_request_linux_par *req)
{
assert(req != NULL);
free(req->pages);
free(req->nodes);
return 0;
}
/*******************************************************************************
* Internal functions
******************************************************************************/
......@@ -89,8 +62,6 @@ void *aml_dma_linux_par_do_thread(void *arg)
if(data->req->type == AML_DMA_REQUEST_TYPE_COPY)
data->dma->ops.do_copy(&data->dma->data, data->req, data->tid);
else if(data->req->type == AML_DMA_REQUEST_TYPE_MOVE)
data->dma->ops.do_move(&data->dma->data, data->req, data->tid);
return NULL;
}
......@@ -114,36 +85,9 @@ int aml_dma_linux_par_do_copy(struct aml_dma_linux_par_data *dma,
return 0;
}
int aml_dma_linux_par_do_move(struct aml_dma_linux_par_data *dma,
struct aml_dma_request_linux_par *req, int tid)
{
assert(dma != NULL);
assert(req != NULL);
size_t nbthreads = dma->nbthreads;
size_t chunksize = req->count / nbthreads;
size_t idx = tid * chunksize;
if(tid == nbthreads - 1 && req->count > chunksize * nbthreads)
chunksize += req->count % nbthreads;
int status[chunksize];
int err;
err = move_pages(0, chunksize, &req->pages[idx], &req->nodes[idx],
status, MPOL_MF_MOVE);
if(err)
{
perror("move_pages:");
return errno;
}
return 0;
}
struct aml_dma_linux_par_ops aml_dma_linux_par_inner_ops = {
aml_dma_linux_par_do_thread,
aml_dma_linux_par_do_copy,
aml_dma_linux_par_do_move,
};
/*******************************************************************************
......@@ -179,15 +123,6 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
aml_dma_request_linux_par_copy_init(req, dt, dptr, dtid,
st, sptr, stid);
}
else if(type == AML_DMA_REQUEST_TYPE_MOVE)
{
struct aml_area *darea = va_arg(ap, struct aml_area *);
struct aml_tiling *st = va_arg(ap, struct aml_tiling *);
void *sptr = va_arg(ap, void *);
int stid = va_arg(ap, int);
aml_dma_request_linux_par_move_init(req, darea, st, sptr,
stid);
}
pthread_mutex_unlock(&dma->data.lock);
for(int i = 0; i < dma->data.nbthreads; i++)
......@@ -222,8 +157,6 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_par_copy_destroy(req);
else if(req->type == AML_DMA_REQUEST_TYPE_MOVE)
aml_dma_request_linux_par_move_destroy(req);
pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(&dma->data.requests, req);
......@@ -246,8 +179,6 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
/* destroy a completed request */
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_par_copy_destroy(req);
else if(req->type == AML_DMA_REQUEST_TYPE_MOVE)
aml_dma_request_linux_par_move_destroy(req);
pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(&dma->data.requests, req);
......
......@@ -51,33 +51,6 @@ int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r)
return 0;
}
int aml_dma_request_linux_seq_move_init(struct aml_dma_request_linux_seq *req,
struct aml_area *darea,
const struct aml_tiling *tiling,
void *startptr, int tileid)
{
assert(req != NULL);
struct aml_binding *binding;
req->type = AML_DMA_REQUEST_TYPE_MOVE;
aml_area_binding(darea, &binding);
req->count = aml_binding_nbpages(binding, tiling, startptr, tileid);
req->pages = calloc(req->count, sizeof(void *));
req->nodes = calloc(req->count, sizeof(int));
aml_binding_pages(binding, req->pages, tiling, startptr, tileid);
aml_binding_nodes(binding, req->nodes, tiling, startptr, tileid);
free(binding);
return 0;
}
int aml_dma_request_linux_seq_move_destroy(struct aml_dma_request_linux_seq *req)
{
assert(req != NULL);
free(req->pages);
free(req->nodes);
return 0;
}
/*******************************************************************************
* Internal functions
******************************************************************************/
......@@ -90,26 +63,8 @@ int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
return 0;
}
int aml_dma_linux_seq_do_move(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req)
{
assert(dma != NULL);
assert(req != NULL);
int status[req->count];
int err;
err = move_pages(0, req->count, req->pages, req->nodes, status,
MPOL_MF_MOVE);
if(err)
{
perror("move_pages:");
return errno;
}
return 0;
}
struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
aml_dma_linux_seq_do_copy,
aml_dma_linux_seq_do_move,
};
/*******************************************************************************
......@@ -145,15 +100,6 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
aml_dma_request_linux_seq_copy_init(req, dt, dptr, dtid,
st, sptr, stid);
}
else if(type == AML_DMA_REQUEST_TYPE_MOVE)
{
struct aml_area *darea = va_arg(ap, struct aml_area *);
struct aml_tiling *st = va_arg(ap, struct aml_tiling *);
void *sptr = va_arg(ap, void *);
int stid = va_arg(ap, int);
aml_dma_request_linux_seq_move_init(req, darea, st, sptr,
stid);
}
pthread_mutex_unlock(&dma->data.lock);
*r = (struct aml_dma_request *)req;
return 0;
......@@ -172,8 +118,6 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_seq_copy_destroy(req);
else if(req->type == AML_DMA_REQUEST_TYPE_MOVE)
aml_dma_request_linux_seq_move_destroy(req);
/* enough to remove from request vector */
pthread_mutex_lock(&dma->data.lock);
......@@ -194,8 +138,6 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
/* execute */
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
dma->ops.do_copy(&dma->data, req);
else if(req->type == AML_DMA_REQUEST_TYPE_MOVE)
dma->ops.do_move(&dma->data, req);
/* destroy a completed request */
aml_dma_linux_seq_destroy_request(d, r);
......
......@@ -61,10 +61,6 @@ int main(int argc, char *argv[])
assert(!memcmp(src, dst, TILESIZE*PAGE_SIZE*NBTILES));
/* now move it by pages */
for(int i = 0; i < NBTILES; i++)
aml_dma_move(&dma, &area, &tiling, src, i);
/* delete everything */
aml_dma_linux_par_destroy(&dma);
aml_area_free(&area, dst);
......
......@@ -60,10 +60,6 @@ int main(int argc, char *argv[])
assert(!memcmp(src, dst, TILESIZE*PAGE_SIZE*NBTILES));
/* now move it by pages */
for(int i = 0; i < NBTILES; i++)
aml_dma_move(&dma, &area, &tiling, src, i);
/* delete everything */
aml_dma_linux_seq_destroy(&dma);
aml_area_free(&area, dst);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment