From f43d2d6d23c4808c7440c1c56d8ee7914816c72c Mon Sep 17 00:00:00 2001 From: Swann Perarnau Date: Fri, 16 Aug 2019 13:17:17 -0500 Subject: [PATCH] [refactor/fix] harden dmas Various fixes related to dma API, missing validation on arguments, pthread management and so on. --- include/aml.h | 8 +-- include/aml/dma/linux-par.h | 2 +- src/dma/dma.c | 16 +++--- src/dma/dma_linux_par.c | 99 +++++++++++++++++++------------- src/dma/dma_linux_seq.c | 54 +++++++++++------- src/scratch/scratch_seq.c | 4 +- tests/dma/test_dma_linux_par.c | 100 +++++++++++++++++++++++---------- tests/dma/test_dma_linux_seq.c | 93 ++++++++++++++++++++---------- 8 files changed, 244 insertions(+), 132 deletions(-) diff --git a/include/aml.h b/include/aml.h index 8477334..2276134 100644 --- a/include/aml.h +++ b/include/aml.h @@ -877,7 +877,7 @@ struct aml_dma_ops { * @return an AML error code. **/ int (*destroy_request)(struct aml_dma_data *dma, - struct aml_dma_request *req); + struct aml_dma_request **req); /** * Wait for termination of a data movement and destroy the request @@ -888,7 +888,7 @@ struct aml_dma_ops { * @return an AML error code. **/ int (*wait_request)(struct aml_dma_data *dma, - struct aml_dma_request *req); + struct aml_dma_request **req); }; /** @@ -931,7 +931,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, * @param req: a DMA request obtained using aml_dma_async_*() calls. * @return 0 if successful; an error code otherwise. **/ -int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req); +int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req); /** * Tears down an asynchronous DMA request before it completes. @@ -939,7 +939,7 @@ int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req); * @param req: a DMA request obtained using aml_dma_async_*() calls. * @return 0 if successful; an error code otherwise. **/ -int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req); +int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req); /** * Generic helper to copy from one layout to another. diff --git a/include/aml/dma/linux-par.h b/include/aml/dma/linux-par.h index 1b7fc63..f3d21e5 100644 --- a/include/aml/dma/linux-par.h +++ b/include/aml/dma/linux-par.h @@ -50,7 +50,7 @@ struct aml_dma_linux_par_request_data { pthread_t thread; }; -/** Inside of a parallel request for linux movement. **/ +/** Inside of a parallel dma for linux movement. **/ struct aml_dma_linux_par_data { struct aml_vector *requests; pthread_mutex_t lock; diff --git a/src/dma/dma.c b/src/dma/dma.c index ba8854e..d956648 100644 --- a/src/dma/dma.c +++ b/src/dma/dma.c @@ -88,7 +88,9 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...) va_start(ap, type); ret = dma->ops->create_request(dma->data, &req, type, ap); va_end(ap); - ret = dma->ops->wait_request(dma->data, req); + if (ret != AML_SUCCESS) + return ret; + ret = dma->ops->wait_request(dma->data, &req); return ret; } @@ -107,16 +109,16 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, return ret; } -int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req) +int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req) { - assert(dma != NULL); - assert(req != NULL); + if (dma == NULL || req == NULL) + return -AML_EINVAL; return dma->ops->destroy_request(dma->data, req); } -int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req) +int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req) { - assert(dma != NULL); - assert(req != NULL); + if (dma == NULL || req == NULL) + return -AML_EINVAL; return dma->ops->wait_request(dma->data, req); } diff --git a/src/dma/dma_linux_par.c b/src/dma/dma_linux_par.c index d4427a1..5d487e1 100644 --- a/src/dma/dma_linux_par.c +++ b/src/dma/dma_linux_par.c @@ -68,6 +68,7 @@ void *aml_dma_linux_par_do_thread(void *arg) struct aml_dma_linux_par_request_data *req = (struct aml_dma_linux_par_request_data *)arg; + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); if (req->type != AML_DMA_REQUEST_TYPE_INVALID) aml_copy_layout_generic(req->dest, req->src); return NULL; @@ -91,6 +92,7 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, (struct aml_dma_linux_par *)d; struct aml_dma_request_linux_par *ret; struct aml_dma_linux_par_request_data *req; + int err = AML_SUCCESS; pthread_mutex_lock(&dma->data.lock); req = aml_vector_add(dma->data.requests); @@ -101,6 +103,10 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, dl = va_arg(ap, struct aml_layout *); sl = va_arg(ap, struct aml_layout *); + if (dl == NULL || sl == NULL) { + err = -AML_EINVAL; + goto unlock; + } aml_dma_linux_par_request_data_init(req, AML_DMA_REQUEST_TYPE_LAYOUT, dl, sl); @@ -112,17 +118,21 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, dp = va_arg(ap, void *); sp = va_arg(ap, void *); sz = va_arg(ap, size_t); + if (dp == NULL || sp == NULL || sz == 0) { + err = -AML_EINVAL; + goto unlock; + } /* simple 1D layout, none of the parameters really matter, as * long as the copy generates a single memcpy. */ - aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1, - &sz, NULL, NULL); - aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1, - &sz, NULL, NULL); + aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL); + aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL); aml_dma_linux_par_request_data_init(req, AML_DMA_REQUEST_TYPE_PTR, dl, sl); - } + } else + err = -AML_EINVAL; +unlock: pthread_mutex_unlock(&dma->data.lock); if (req->type != AML_DMA_REQUEST_TYPE_INVALID) { int uuid = aml_vector_getid(dma->data.requests, req); @@ -131,21 +141,23 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, aml_dma_request_linux_par_create(&ret, uuid); *r = (struct aml_dma_request *)ret; } - return 0; + return err; } int aml_dma_linux_par_destroy_request(struct aml_dma_data *d, - struct aml_dma_request *r) + struct aml_dma_request **r) { assert(d != NULL); assert(r != NULL); struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d; - - struct aml_dma_request_linux_par *req = - (struct aml_dma_request_linux_par *)r; + struct aml_dma_request_linux_par *req; struct aml_dma_linux_par_request_data *inner_req; + if (*r == NULL) + return -AML_EINVAL; + req = (struct aml_dma_request_linux_par *)*r; + inner_req = aml_vector_get(dma->data.requests, req->uuid); if (inner_req == NULL) return -AML_EINVAL; @@ -154,44 +166,49 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d, if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) { pthread_cancel(inner_req->thread); pthread_join(inner_req->thread, NULL); - if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) { - aml_layout_dense_destroy(&inner_req->dest); - aml_layout_dense_destroy(&inner_req->src); - } + } + + if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) { + aml_layout_dense_destroy(&inner_req->dest); + aml_layout_dense_destroy(&inner_req->src); } pthread_mutex_lock(&dma->data.lock); aml_vector_remove(dma->data.requests, inner_req); pthread_mutex_unlock(&dma->data.lock); aml_dma_request_linux_par_destroy(&req); + *r = NULL; return 0; } int aml_dma_linux_par_wait_request(struct aml_dma_data *d, - struct aml_dma_request *r) + struct aml_dma_request **r) { assert(d != NULL); assert(r != NULL); struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d; - struct aml_dma_request_linux_par *req = - (struct aml_dma_request_linux_par *)r; + struct aml_dma_request_linux_par *req; struct aml_dma_linux_par_request_data *inner_req; + if (*r == NULL) + return -AML_EINVAL; + req = (struct aml_dma_request_linux_par *)*r; + inner_req = aml_vector_get(dma->data.requests, req->uuid); if (inner_req == NULL) return -AML_EINVAL; - if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) { + if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) pthread_join(inner_req->thread, NULL); - if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) { - aml_layout_dense_destroy(&inner_req->dest); - aml_layout_dense_destroy(&inner_req->src); - } - } + if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) { + aml_layout_dense_destroy(&inner_req->dest); + aml_layout_dense_destroy(&inner_req->src); + } pthread_mutex_lock(&dma->data.lock); aml_vector_remove(dma->data.requests, inner_req); pthread_mutex_unlock(&dma->data.lock); aml_dma_request_linux_par_destroy(&req); + *r = NULL; return 0; } @@ -236,22 +253,28 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs) return 0; } -void aml_dma_linux_par_destroy(struct aml_dma **dma) +void aml_dma_linux_par_destroy(struct aml_dma **d) { - struct aml_dma *d; - struct aml_dma_linux_par *l; + struct aml_dma_linux_par *dma; - if (dma == NULL) - return; - d = *dma; - if (d == NULL) + if (d == NULL || *d == NULL) return; - - assert(d->data != NULL); - l = (struct aml_dma_linux_par *)d->data; - aml_vector_destroy(&l->data.requests); - pthread_mutex_destroy(&l->data.lock); - - free(d); - *dma = NULL; + dma = (struct aml_dma_linux_par *)(*d)->data; + for (size_t i = 0; i < aml_vector_size(dma->data.requests); i++) { + struct aml_dma_linux_par_request_data *req; + + req = aml_vector_get(dma->data.requests, i); + if (req->type != AML_DMA_REQUEST_TYPE_INVALID) { + pthread_cancel(req->thread); + pthread_join(req->thread, NULL); + } + if (req->type == AML_DMA_REQUEST_TYPE_PTR) { + aml_layout_dense_destroy(&req->dest); + aml_layout_dense_destroy(&req->src); + } + } + aml_vector_destroy(&dma->data.requests); + pthread_mutex_destroy(&dma->data.lock); + free(*d); + *d = NULL; } diff --git a/src/dma/dma_linux_seq.c b/src/dma/dma_linux_seq.c index 71eca96..a9a8be2 100644 --- a/src/dma/dma_linux_seq.c +++ b/src/dma/dma_linux_seq.c @@ -90,6 +90,7 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, (struct aml_dma_linux_seq *)d; struct aml_dma_request_linux_seq *ret; struct aml_dma_linux_seq_request_data *req; + int err = AML_SUCCESS; pthread_mutex_lock(&dma->data.lock); req = aml_vector_add(dma->data.requests); @@ -100,6 +101,10 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, dl = va_arg(ap, struct aml_layout *); sl = va_arg(ap, struct aml_layout *); + if (dl == NULL || sl == NULL) { + err = -AML_EINVAL; + goto unlock; + } aml_dma_linux_seq_request_data_init(req, AML_DMA_REQUEST_TYPE_LAYOUT, dl, sl); @@ -111,38 +116,46 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, dp = va_arg(ap, void *); sp = va_arg(ap, void *); sz = va_arg(ap, size_t); + if (dp == NULL || sp == NULL || sz == 0) { + err = -AML_EINVAL; + goto unlock; + } /* simple 1D layout, none of the parameters really matter, as * long as the copy generates a single memcpy. */ - aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1, - &sz, NULL, NULL); - aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1, - &sz, NULL, NULL); + aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL); + aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL); aml_dma_linux_seq_request_data_init(req, AML_DMA_REQUEST_TYPE_PTR, dl, sl); - } - int uuid = aml_vector_getid(dma->data.requests, req); - - assert(uuid != AML_DMA_REQUEST_TYPE_INVALID); - aml_dma_request_linux_seq_create(&ret, uuid); - *r = (struct aml_dma_request *)ret; + } else + err = -AML_EINVAL; +unlock: pthread_mutex_unlock(&dma->data.lock); - return 0; + if (req->type != AML_DMA_REQUEST_TYPE_INVALID) { + int uuid = aml_vector_getid(dma->data.requests, req); + + assert(uuid != AML_DMA_REQUEST_TYPE_INVALID); + aml_dma_request_linux_seq_create(&ret, uuid); + *r = (struct aml_dma_request *)ret; + } + return err; } int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d, - struct aml_dma_request *r) + struct aml_dma_request **r) { assert(d != NULL); assert(r != NULL); struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d; - - struct aml_dma_request_linux_seq *req = - (struct aml_dma_request_linux_seq *)r; + struct aml_dma_request_linux_seq *req; struct aml_dma_linux_seq_request_data *inner_req; + if (*r == NULL) + return -AML_EINVAL; + req = (struct aml_dma_request_linux_seq *)*r; + inner_req = aml_vector_get(dma->data.requests, req->uuid); if (inner_req == NULL) return -AML_EINVAL; @@ -153,23 +166,26 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d, aml_layout_dense_destroy(&inner_req->src); } - /* enough to remove from request vector */ aml_vector_remove(dma->data.requests, inner_req); pthread_mutex_unlock(&dma->data.lock); aml_dma_request_linux_seq_destroy(&req); + *r = NULL; return 0; } int aml_dma_linux_seq_wait_request(struct aml_dma_data *d, - struct aml_dma_request *r) + struct aml_dma_request **r) { assert(d != NULL); assert(r != NULL); struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d; - struct aml_dma_request_linux_seq *req = - (struct aml_dma_request_linux_seq *)r; + struct aml_dma_request_linux_seq *req; struct aml_dma_linux_seq_request_data *inner_req; + if (*r == NULL) + return -AML_EINVAL; + req = (struct aml_dma_request_linux_seq *)*r; + inner_req = aml_vector_get(dma->data.requests, req->uuid); if (inner_req == NULL) return -AML_EINVAL; diff --git a/src/scratch/scratch_seq.c b/src/scratch/scratch_seq.c index f5e68ed..bcec3b3 100644 --- a/src/scratch/scratch_seq.c +++ b/src/scratch/scratch_seq.c @@ -183,7 +183,7 @@ int aml_scratch_seq_destroy_request(struct aml_scratch_data *d, return -AML_EINVAL; if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) - aml_dma_cancel(scratch->data.dma, inner_req->dma_req); + aml_dma_cancel(scratch->data.dma, &inner_req->dma_req); /* destroy removes the tile from the scratch */ if (inner_req->type == AML_SCRATCH_REQUEST_TYPE_PUSH) @@ -215,7 +215,7 @@ int aml_scratch_seq_wait_request(struct aml_scratch_data *d, /* wait for completion of the request */ if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) - aml_dma_wait(scratch->data.dma, inner_req->dma_req); + aml_dma_wait(scratch->data.dma, &inner_req->dma_req); /* cleanup a completed request. In case of push, free up the tile */ pthread_mutex_lock(&scratch->data.lock); diff --git a/tests/dma/test_dma_linux_par.c b/tests/dma/test_dma_linux_par.c index ac3f67f..ebf3755 100644 --- a/tests/dma/test_dma_linux_par.c +++ b/tests/dma/test_dma_linux_par.c @@ -9,56 +9,94 @@ *******************************************************************************/ #include "aml.h" -#include "aml/area/linux.h" +#include "aml/layout/dense.h" #include "aml/dma/linux-par.h" -#include "aml/tiling/1d.h" #include -#define TILESIZE (2) -#define NBTILES (16) - int main(int argc, char *argv[]) { - struct aml_tiling *tiling; struct aml_dma *dma; - void *dst, *src; + size_t isz = 1<<16; + int idest[isz]; + int isrc[isz]; + struct aml_layout *idl, *isl; /* library initialization */ aml_init(&argc, &argv); - /* initialize all the supporting struct */ - assert(!aml_tiling_1d_create(&tiling, TILESIZE*_SC_PAGE_SIZE, - TILESIZE*_SC_PAGE_SIZE*NBTILES)); + /* support data structures */ + assert(!aml_layout_dense_create(&idl, idest, 0, sizeof(int), 1, &isz, + NULL, NULL)); + assert(!aml_layout_dense_create(&isl, isrc, 0, sizeof(int), 1, &isz, + NULL, NULL)); + for (size_t i = 0; i < isz; i++) { + idest[i] = 42; + isrc[i] = 24; + } + /* invalid create input */ + assert(aml_dma_linux_par_create(NULL, 1) == -AML_EINVAL); + + /* invalid requests */ assert(!aml_dma_linux_par_create(&dma, 1)); + assert(aml_dma_copy(dma, 42) == -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, NULL, isrc, isz) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, NULL, isz) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, isrc, 0) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, NULL, isl) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, idl, NULL) == + -AML_EINVAL); - /* allocate some memory */ - src = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES); - assert(src != NULL); - dst = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES); - assert(dst != NULL); + struct aml_dma_request *r1, *r2; + /* force dma to increase its requests queue */ + assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + assert(!aml_dma_async_copy(dma, &r2, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); - memset(src, 42, TILESIZE*_SC_PAGE_SIZE*NBTILES); - memset(dst, 24, TILESIZE*_SC_PAGE_SIZE*NBTILES); + assert(aml_dma_wait(dma, NULL) == -AML_EINVAL); + assert(!aml_dma_wait(dma, &r1)); + assert(!aml_dma_wait(dma, &r2)); + aml_dma_linux_par_destroy(&dma); + + /* cancel a request on the fly */ + assert(!aml_dma_linux_par_create(&dma, 1)); + assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL); + assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + assert(!aml_dma_cancel(dma, &r1)); - /* move some stuff by copy */ - struct aml_dma_request *requests[NBTILES]; - for(int i = 0; i < NBTILES; i++) { - void *d = aml_tiling_tilestart(tiling, dst, i); - void *s = aml_tiling_tilestart(tiling, src, i); - aml_dma_async_copy(dma, &requests[i], AML_DMA_REQUEST_TYPE_PTR, - d, s, TILESIZE*_SC_PAGE_SIZE); + /* destroy a running dma */ + assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + aml_dma_linux_par_destroy(&dma); + + /* move data around */ + assert(!aml_dma_linux_par_create(&dma, 1)); + struct aml_dma_request *requests[16]; + for (int i = 0; i < 16; i++) { + size_t sz = isz/16; + size_t off = i*sz; + void *dptr = (void *)&(idest[off]); + void *sptr = (void *)&(isrc[off]); + + assert(!aml_dma_async_copy(dma, &requests[i], + AML_DMA_REQUEST_TYPE_PTR, + dptr, sptr, sz*sizeof(int))); + assert(requests[i] != NULL); } - for(int i = 0; i < NBTILES; i++) - aml_dma_wait(dma, requests[i]); + for(int i = 0; i < 16; i++) + assert(!aml_dma_wait(dma, &requests[i])); - assert(!memcmp(src, dst, TILESIZE*_SC_PAGE_SIZE*NBTILES)); + assert(!memcmp(isrc, idest, isz*sizeof(int))); /* delete everything */ aml_dma_linux_par_destroy(&dma); - aml_area_munmap(&aml_area_linux, dst, TILESIZE*_SC_PAGE_SIZE*NBTILES); - aml_area_munmap(&aml_area_linux, src, TILESIZE*_SC_PAGE_SIZE*NBTILES); - aml_tiling_1d_destroy(&tiling); - + aml_layout_dense_destroy(&idl); + aml_layout_dense_destroy(&isl); aml_finalize(); return 0; } diff --git a/tests/dma/test_dma_linux_seq.c b/tests/dma/test_dma_linux_seq.c index 393cdde..c3e1c1a 100644 --- a/tests/dma/test_dma_linux_seq.c +++ b/tests/dma/test_dma_linux_seq.c @@ -9,58 +9,91 @@ *******************************************************************************/ #include "aml.h" -#include "aml/area/linux.h" +#include "aml/layout/dense.h" #include "aml/dma/linux-seq.h" -#include "aml/tiling/1d.h" #include -#define TILESIZE (2) -#define NBTILES (16) - int main(int argc, char *argv[]) { - struct aml_tiling *tiling; struct aml_dma *dma; - void *dst, *src; + size_t isz = 1<<16; + int idest[isz]; + int isrc[isz]; + struct aml_layout *idl, *isl; /* library initialization */ aml_init(&argc, &argv); - /* initialize all the supporting struct */ - assert(!aml_tiling_1d_create(&tiling, TILESIZE*_SC_PAGE_SIZE, - TILESIZE*_SC_PAGE_SIZE*NBTILES)); + /* support data structures */ + assert(!aml_layout_dense_create(&idl, idest, 0, sizeof(int), 1, &isz, + NULL, NULL)); + assert(!aml_layout_dense_create(&isl, isrc, 0, sizeof(int), 1, &isz, + NULL, NULL)); + for (size_t i = 0; i < isz; i++) { + idest[i] = 42; + isrc[i] = 24; + } + /* invalid create input */ + assert(aml_dma_linux_seq_create(NULL, 1) == -AML_EINVAL); + + /* invalid requests */ assert(!aml_dma_linux_seq_create(&dma, 1)); + assert(aml_dma_copy(dma, 42) == -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, NULL, isrc, isz) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, NULL, isz) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, isrc, 0) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, NULL, isl) == + -AML_EINVAL); + assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, idl, NULL) == + -AML_EINVAL); + + struct aml_dma_request *r1, *r2; + /* force dma to increase its requests queue */ + assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + assert(!aml_dma_async_copy(dma, &r2, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + + assert(aml_dma_wait(dma, NULL) == -AML_EINVAL); + assert(!aml_dma_wait(dma, &r1)); + assert(!aml_dma_wait(dma, &r2)); - /* allocate some memory */ - src = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES); - assert(src != NULL); - dst = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES); - assert(dst != NULL); + /* cancel a request on the fly */ + assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL); + assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + assert(!aml_dma_cancel(dma, &r1)); - memset(src, 42, TILESIZE*_SC_PAGE_SIZE*NBTILES); - memset(dst, 24, TILESIZE*_SC_PAGE_SIZE*NBTILES); - /* move some stuff by copy */ - struct aml_dma_request *requests[NBTILES]; - for(int i = 0; i < NBTILES; i++) { - void *d = aml_tiling_tilestart(tiling, dst, i); - void *s = aml_tiling_tilestart(tiling, src, i); + /* destroy a running dma */ + assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT, + idl, isl)); + aml_dma_linux_seq_destroy(&dma); + + /* move data around */ + assert(!aml_dma_linux_seq_create(&dma, 1)); + struct aml_dma_request *requests[16]; + for (int i = 0; i < 16; i++) { + size_t sz = isz/16; + size_t off = i*sz; + void *dptr = (void *)&(idest[off]); + void *sptr = (void *)&(isrc[off]); + assert(!aml_dma_async_copy(dma, &requests[i], AML_DMA_REQUEST_TYPE_PTR, - d, s, (size_t)TILESIZE*_SC_PAGE_SIZE)); + dptr, sptr, sz*sizeof(int))); assert(requests[i] != NULL); } - for(int i = 0; i < NBTILES; i++) - assert(!aml_dma_wait(dma, requests[i])); + for(int i = 0; i < 16; i++) + assert(!aml_dma_wait(dma, &requests[i])); - assert(!memcmp(src, dst, TILESIZE*_SC_PAGE_SIZE*NBTILES)); + assert(!memcmp(isrc, idest, isz*sizeof(int))); /* delete everything */ aml_dma_linux_seq_destroy(&dma); - aml_area_munmap(&aml_area_linux, dst, TILESIZE*_SC_PAGE_SIZE*NBTILES); - aml_area_munmap(&aml_area_linux, src, TILESIZE*_SC_PAGE_SIZE*NBTILES); - aml_tiling_1d_destroy(&tiling); - aml_finalize(); return 0; } -- 2.26.2