Commit f43d2d6d authored by Swann Perarnau's avatar Swann Perarnau

[refactor/fix] harden dmas

Various fixes related to dma API, missing validation on arguments,
pthread management and so on.
parent 625a0e57
Pipeline #8311 passed with stages
in 5 minutes and 44 seconds
...@@ -877,7 +877,7 @@ struct aml_dma_ops { ...@@ -877,7 +877,7 @@ struct aml_dma_ops {
* @return an AML error code. * @return an AML error code.
**/ **/
int (*destroy_request)(struct aml_dma_data *dma, int (*destroy_request)(struct aml_dma_data *dma,
struct aml_dma_request *req); struct aml_dma_request **req);
/** /**
* Wait for termination of a data movement and destroy the request * Wait for termination of a data movement and destroy the request
...@@ -888,7 +888,7 @@ struct aml_dma_ops { ...@@ -888,7 +888,7 @@ struct aml_dma_ops {
* @return an AML error code. * @return an AML error code.
**/ **/
int (*wait_request)(struct aml_dma_data *dma, int (*wait_request)(struct aml_dma_data *dma,
struct aml_dma_request *req); struct aml_dma_request **req);
}; };
/** /**
...@@ -931,7 +931,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...@@ -931,7 +931,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
* @param req: a DMA request obtained using aml_dma_async_*() calls. * @param req: a DMA request obtained using aml_dma_async_*() calls.
* @return 0 if successful; an error code otherwise. * @return 0 if successful; an error code otherwise.
**/ **/
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req); int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req);
/** /**
* Tears down an asynchronous DMA request before it completes. * Tears down an asynchronous DMA request before it completes.
...@@ -939,7 +939,7 @@ int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req); ...@@ -939,7 +939,7 @@ int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req);
* @param req: a DMA request obtained using aml_dma_async_*() calls. * @param req: a DMA request obtained using aml_dma_async_*() calls.
* @return 0 if successful; an error code otherwise. * @return 0 if successful; an error code otherwise.
**/ **/
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req); int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req);
/** /**
* Generic helper to copy from one layout to another. * Generic helper to copy from one layout to another.
......
...@@ -50,7 +50,7 @@ struct aml_dma_linux_par_request_data { ...@@ -50,7 +50,7 @@ struct aml_dma_linux_par_request_data {
pthread_t thread; pthread_t thread;
}; };
/** Inside of a parallel request for linux movement. **/ /** Inside of a parallel dma for linux movement. **/
struct aml_dma_linux_par_data { struct aml_dma_linux_par_data {
struct aml_vector *requests; struct aml_vector *requests;
pthread_mutex_t lock; pthread_mutex_t lock;
......
...@@ -88,7 +88,9 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...) ...@@ -88,7 +88,9 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...)
va_start(ap, type); va_start(ap, type);
ret = dma->ops->create_request(dma->data, &req, type, ap); ret = dma->ops->create_request(dma->data, &req, type, ap);
va_end(ap); va_end(ap);
ret = dma->ops->wait_request(dma->data, req); if (ret != AML_SUCCESS)
return ret;
ret = dma->ops->wait_request(dma->data, &req);
return ret; return ret;
} }
...@@ -107,16 +109,16 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...@@ -107,16 +109,16 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
return ret; return ret;
} }
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req) int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req)
{ {
assert(dma != NULL); if (dma == NULL || req == NULL)
assert(req != NULL); return -AML_EINVAL;
return dma->ops->destroy_request(dma->data, req); return dma->ops->destroy_request(dma->data, req);
} }
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req) int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req)
{ {
assert(dma != NULL); if (dma == NULL || req == NULL)
assert(req != NULL); return -AML_EINVAL;
return dma->ops->wait_request(dma->data, req); return dma->ops->wait_request(dma->data, req);
} }
...@@ -68,6 +68,7 @@ void *aml_dma_linux_par_do_thread(void *arg) ...@@ -68,6 +68,7 @@ void *aml_dma_linux_par_do_thread(void *arg)
struct aml_dma_linux_par_request_data *req = struct aml_dma_linux_par_request_data *req =
(struct aml_dma_linux_par_request_data *)arg; (struct aml_dma_linux_par_request_data *)arg;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
aml_copy_layout_generic(req->dest, req->src); aml_copy_layout_generic(req->dest, req->src);
return NULL; return NULL;
...@@ -91,6 +92,7 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -91,6 +92,7 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
(struct aml_dma_linux_par *)d; (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *ret; struct aml_dma_request_linux_par *ret;
struct aml_dma_linux_par_request_data *req; struct aml_dma_linux_par_request_data *req;
int err = AML_SUCCESS;
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests); req = aml_vector_add(dma->data.requests);
...@@ -101,6 +103,10 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -101,6 +103,10 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
dl = va_arg(ap, struct aml_layout *); dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *); sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_linux_par_request_data_init(req, aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT, AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl); dl, sl);
...@@ -112,17 +118,21 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -112,17 +118,21 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
dp = va_arg(ap, void *); dp = va_arg(ap, void *);
sp = va_arg(ap, void *); sp = va_arg(ap, void *);
sz = va_arg(ap, size_t); sz = va_arg(ap, size_t);
if (dp == NULL || sp == NULL || sz == 0) {
err = -AML_EINVAL;
goto unlock;
}
/* simple 1D layout, none of the parameters really matter, as /* simple 1D layout, none of the parameters really matter, as
* long as the copy generates a single memcpy. * long as the copy generates a single memcpy.
*/ */
aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1, aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL);
&sz, NULL, NULL); aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
&sz, NULL, NULL);
aml_dma_linux_par_request_data_init(req, aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR, AML_DMA_REQUEST_TYPE_PTR,
dl, sl); dl, sl);
} } else
err = -AML_EINVAL;
unlock:
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) { if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req); int uuid = aml_vector_getid(dma->data.requests, req);
...@@ -131,21 +141,23 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -131,21 +141,23 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
aml_dma_request_linux_par_create(&ret, uuid); aml_dma_request_linux_par_create(&ret, uuid);
*r = (struct aml_dma_request *)ret; *r = (struct aml_dma_request *)ret;
} }
return 0; return err;
} }
int aml_dma_linux_par_destroy_request(struct aml_dma_data *d, int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
struct aml_dma_request *r) struct aml_dma_request **r)
{ {
assert(d != NULL); assert(d != NULL);
assert(r != NULL); assert(r != NULL);
struct aml_dma_linux_par *dma = struct aml_dma_linux_par *dma =
(struct aml_dma_linux_par *)d; (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req;
struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r;
struct aml_dma_linux_par_request_data *inner_req; struct aml_dma_linux_par_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_par *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid); inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL) if (inner_req == NULL)
return -AML_EINVAL; return -AML_EINVAL;
...@@ -154,44 +166,49 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d, ...@@ -154,44 +166,49 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) { if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(inner_req->thread); pthread_cancel(inner_req->thread);
pthread_join(inner_req->thread, NULL); pthread_join(inner_req->thread, NULL);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) { }
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src); if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
} aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
} }
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, inner_req); aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req); aml_dma_request_linux_par_destroy(&req);
*r = NULL;
return 0; return 0;
} }
int aml_dma_linux_par_wait_request(struct aml_dma_data *d, int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
struct aml_dma_request *r) struct aml_dma_request **r)
{ {
assert(d != NULL); assert(d != NULL);
assert(r != NULL); assert(r != NULL);
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d; struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req = struct aml_dma_request_linux_par *req;
(struct aml_dma_request_linux_par *)r;
struct aml_dma_linux_par_request_data *inner_req; struct aml_dma_linux_par_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_par *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid); inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL) if (inner_req == NULL)
return -AML_EINVAL; return -AML_EINVAL;
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) { if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID)
pthread_join(inner_req->thread, NULL); pthread_join(inner_req->thread, NULL);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
}
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, inner_req); aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req); aml_dma_request_linux_par_destroy(&req);
*r = NULL;
return 0; return 0;
} }
...@@ -236,22 +253,28 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs) ...@@ -236,22 +253,28 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs)
return 0; return 0;
} }
void aml_dma_linux_par_destroy(struct aml_dma **dma) void aml_dma_linux_par_destroy(struct aml_dma **d)
{ {
struct aml_dma *d; struct aml_dma_linux_par *dma;
struct aml_dma_linux_par *l;
if (dma == NULL) if (d == NULL || *d == NULL)
return;
d = *dma;
if (d == NULL)
return; return;
dma = (struct aml_dma_linux_par *)(*d)->data;
assert(d->data != NULL); for (size_t i = 0; i < aml_vector_size(dma->data.requests); i++) {
l = (struct aml_dma_linux_par *)d->data; struct aml_dma_linux_par_request_data *req;
aml_vector_destroy(&l->data.requests);
pthread_mutex_destroy(&l->data.lock); req = aml_vector_get(dma->data.requests, i);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
free(d); pthread_cancel(req->thread);
*dma = NULL; pthread_join(req->thread, NULL);
}
if (req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&req->dest);
aml_layout_dense_destroy(&req->src);
}
}
aml_vector_destroy(&dma->data.requests);
pthread_mutex_destroy(&dma->data.lock);
free(*d);
*d = NULL;
} }
...@@ -90,6 +90,7 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -90,6 +90,7 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
(struct aml_dma_linux_seq *)d; (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *ret; struct aml_dma_request_linux_seq *ret;
struct aml_dma_linux_seq_request_data *req; struct aml_dma_linux_seq_request_data *req;
int err = AML_SUCCESS;
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests); req = aml_vector_add(dma->data.requests);
...@@ -100,6 +101,10 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -100,6 +101,10 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
dl = va_arg(ap, struct aml_layout *); dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *); sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_linux_seq_request_data_init(req, aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT, AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl); dl, sl);
...@@ -111,38 +116,46 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -111,38 +116,46 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
dp = va_arg(ap, void *); dp = va_arg(ap, void *);
sp = va_arg(ap, void *); sp = va_arg(ap, void *);
sz = va_arg(ap, size_t); sz = va_arg(ap, size_t);
if (dp == NULL || sp == NULL || sz == 0) {
err = -AML_EINVAL;
goto unlock;
}
/* simple 1D layout, none of the parameters really matter, as /* simple 1D layout, none of the parameters really matter, as
* long as the copy generates a single memcpy. * long as the copy generates a single memcpy.
*/ */
aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1, aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL);
&sz, NULL, NULL); aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
&sz, NULL, NULL);
aml_dma_linux_seq_request_data_init(req, aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR, AML_DMA_REQUEST_TYPE_PTR,
dl, sl); dl, sl);
} } else
int uuid = aml_vector_getid(dma->data.requests, req); err = -AML_EINVAL;
unlock:
assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
aml_dma_request_linux_seq_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
return 0; if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req);
assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
aml_dma_request_linux_seq_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
}
return err;
} }
int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d, int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
struct aml_dma_request *r) struct aml_dma_request **r)
{ {
assert(d != NULL); assert(d != NULL);
assert(r != NULL); assert(r != NULL);
struct aml_dma_linux_seq *dma = struct aml_dma_linux_seq *dma =
(struct aml_dma_linux_seq *)d; (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req;
struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r;
struct aml_dma_linux_seq_request_data *inner_req; struct aml_dma_linux_seq_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_seq *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid); inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL) if (inner_req == NULL)
return -AML_EINVAL; return -AML_EINVAL;
...@@ -153,23 +166,26 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d, ...@@ -153,23 +166,26 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
aml_layout_dense_destroy(&inner_req->src); aml_layout_dense_destroy(&inner_req->src);
} }
/* enough to remove from request vector */
aml_vector_remove(dma->data.requests, inner_req); aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_seq_destroy(&req); aml_dma_request_linux_seq_destroy(&req);
*r = NULL;
return 0; return 0;
} }
int aml_dma_linux_seq_wait_request(struct aml_dma_data *d, int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
struct aml_dma_request *r) struct aml_dma_request **r)
{ {
assert(d != NULL); assert(d != NULL);
assert(r != NULL); assert(r != NULL);
struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d; struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req = struct aml_dma_request_linux_seq *req;
(struct aml_dma_request_linux_seq *)r;
struct aml_dma_linux_seq_request_data *inner_req; struct aml_dma_linux_seq_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_seq *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid); inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL) if (inner_req == NULL)
return -AML_EINVAL; return -AML_EINVAL;
......
...@@ -183,7 +183,7 @@ int aml_scratch_seq_destroy_request(struct aml_scratch_data *d, ...@@ -183,7 +183,7 @@ int aml_scratch_seq_destroy_request(struct aml_scratch_data *d,
return -AML_EINVAL; return -AML_EINVAL;
if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP)
aml_dma_cancel(scratch->data.dma, inner_req->dma_req); aml_dma_cancel(scratch->data.dma, &inner_req->dma_req);
/* destroy removes the tile from the scratch */ /* destroy removes the tile from the scratch */
if (inner_req->type == AML_SCRATCH_REQUEST_TYPE_PUSH) if (inner_req->type == AML_SCRATCH_REQUEST_TYPE_PUSH)
...@@ -215,7 +215,7 @@ int aml_scratch_seq_wait_request(struct aml_scratch_data *d, ...@@ -215,7 +215,7 @@ int aml_scratch_seq_wait_request(struct aml_scratch_data *d,
/* wait for completion of the request */ /* wait for completion of the request */
if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP)
aml_dma_wait(scratch->data.dma, inner_req->dma_req); aml_dma_wait(scratch->data.dma, &inner_req->dma_req);
/* cleanup a completed request. In case of push, free up the tile */ /* cleanup a completed request. In case of push, free up the tile */
pthread_mutex_lock(&scratch->data.lock); pthread_mutex_lock(&scratch->data.lock);
......
...@@ -9,56 +9,94 @@ ...@@ -9,56 +9,94 @@
*******************************************************************************/ *******************************************************************************/
#include "aml.h" #include "aml.h"
#include "aml/area/linux.h" #include "aml/layout/dense.h"
#include "aml/dma/linux-par.h" #include "aml/dma/linux-par.h"
#include "aml/tiling/1d.h"
#include <assert.h> #include <assert.h>
#define TILESIZE (2)
#define NBTILES (16)
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct aml_tiling *tiling;
struct aml_dma *dma; struct aml_dma *dma;
void *dst, *src; size_t isz = 1<<16;
int idest[isz];
int isrc[isz];
struct aml_layout *idl, *isl;
/* library initialization */ /* library initialization */
aml_init(&argc, &argv); aml_init(&argc, &argv);
/* initialize all the supporting struct */ /* support data structures */
assert(!aml_tiling_1d_create(&tiling, TILESIZE*_SC_PAGE_SIZE, assert(!aml_layout_dense_create(&idl, idest, 0, sizeof(int), 1, &isz,
TILESIZE*_SC_PAGE_SIZE*NBTILES)); NULL, NULL));
assert(!aml_layout_dense_create(&isl, isrc, 0, sizeof(int), 1, &isz,
NULL, NULL));
for (size_t i = 0; i < isz; i++) {
idest[i] = 42;
isrc[i] = 24;
}
/* invalid create input */
assert(aml_dma_linux_par_create(NULL, 1) == -AML_EINVAL);
/* invalid requests */
assert(!aml_dma_linux_par_create(&dma, 1)); assert(!aml_dma_linux_par_create(&dma, 1));
assert(aml_dma_copy(dma, 42) == -AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, NULL, isrc, isz) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, NULL, isz) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, isrc, 0) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, NULL, isl) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, idl, NULL) ==
-AML_EINVAL);
/* allocate some memory */ struct aml_dma_request *r1, *r2;
src = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES); /* force dma to increase its requests queue */
assert(src != NULL); assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
dst = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES); idl, isl));
assert(dst != NULL); assert(!aml_dma_async_copy(dma, &r2, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
memset(src, 42, TILESIZE*_SC_PAGE_SIZE*NBTILES); assert(aml_dma_wait(dma, NULL) == -AML_EINVAL);
memset(dst, 24, TILESIZE*_SC_PAGE_SIZE*NBTILES); assert(!aml_dma_wait(dma, &r1));
assert(!aml_dma_wait(dma, &r2));
aml_dma_linux_par_destroy(&dma);
/* cancel a request on the fly */
assert(!aml_dma_linux_par_create(&dma, 1));
assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_cancel(dma, &r1));
/* move some stuff by copy */ /* destroy a running dma */
struct aml_dma_request *requests[NBTILES]; assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
for(int i = 0; i < NBTILES; i++) { idl, isl));
void *d = aml_tiling_tilestart(tiling, dst, i); aml_dma_linux_par_destroy(&dma);
void *s = aml_tiling_tilestart(tiling, src, i);
aml_dma_async_copy(dma, &requests[i], AML_DMA_REQUEST_TYPE_PTR, /* move data around */
d, s, TILESIZE*_SC_PAGE_SIZE); assert(!aml_dma_linux_par_create(&dma, 1));
struct aml_dma_request *requests[16];
for (int i = 0; i < 16; i++) {
size_t sz = isz/16;
size_t off = i*sz;
void *dptr = (void *)&(idest[off]);
void *sptr = (void *)&(isrc[off]);
assert(!aml_dma_async_copy(dma, &requests[i],
AML_DMA_REQUEST_TYPE_PTR,
dptr, sptr, sz*sizeof(int)));
assert(requests[i] != NULL);
} }
for(int i = 0; i < NBTILES; i++) for(int i = 0; i < 16; i++)
aml_dma_wait(dma, requests[i]); assert(!aml_dma_wait(dma, &requests[i]));
assert(!memcmp(src, dst, TILESIZE*_SC_PAGE_SIZE*NBTILES)); assert(!memcmp(isrc, idest, isz*sizeof(int)));
/* delete everything */ /* delete everything */
aml_dma_linux_par_destroy(&dma); aml_dma_linux_par_destroy(&dma);
aml_area_munmap(&aml_area_linux, dst, TILESIZE*_SC_PAGE_SIZE*NBTILES); aml_layout_dense_destroy(&idl);
aml_area_munmap(&aml_area_linux, src, TILESIZE*_SC_PAGE_SIZE*NBTILES); aml_layout_dense_destroy(&isl);
aml_tiling_1d_destroy(&tiling);
aml_finalize(); aml_finalize();
return 0; return 0;
} }
...@@ -9,58 +9,91 @@ ...@@ -9,58 +9,91 @@
*******************************************************************************/ *******************************************************************************/
#include "aml.h" #include "aml.h"
#include "aml/area/linux.h" #include "aml/layout/dense.h"
#include "aml/dma/linux-seq.h" #include "aml/dma/linux-seq.h"
#include "aml/tiling/1d.h"
#include <assert.h> #include <assert.h>
#define TILESIZE (2)
#define NBTILES (16)
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct aml_tiling *tiling;
struct aml_dma *dma; struct aml_dma *dma;
void *dst, *src; size_t isz = 1<<16;