Commit 9a97ce3a authored by Swann Perarnau's avatar Swann Perarnau

[refactor] remove va_args from DMAs

Now that only one type of request is allowed (layouts), remove va_args
for the DMA API, making it significantly cleaner.

Fix #49.
parent 2b3a44c8
Pipeline #8318 passed with stages
in 9 minutes and 26 seconds
......@@ -854,14 +854,14 @@ struct aml_dma_ops {
* @param dma: dma_implementation internal data.
* @param req[out]: the request handle to manage termination
* of the movement.
* @param type: A valid AML_DMA_REQUEST_TYPE_* specifying the kind
* of operation to perform.
* @param args: list of variadic arguments provided to aml_dma_copy()
* @param dest: layout describing the destination.
* @param src: layout describing the source.
* @return an AML error code.
**/
int (*create_request)(struct aml_dma_data *dma,
struct aml_dma_request **req,
int type, va_list args);
struct aml_layout *dest,
struct aml_layout *src);
/**
* Destroy the request handle. If the data movement is still ongoing,
......@@ -903,10 +903,12 @@ struct aml_dma {
/**
* Requests a synchronous data copy between two different buffers.
* @param dma: an initialized DMA structure.
* Variadic arguments: implementation-specific.
* @param dest: layout describing the destination.
* @param src: layout describing the source.
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_copy(struct aml_dma *dma, int type, ...);
int aml_dma_copy(struct aml_dma *dma, struct aml_layout *dest,
struct aml_layout *src);
/**
* Requests a data copy between two different buffers.This is an asynchronous
......@@ -914,11 +916,13 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...);
* @param dma: an initialized DMA structure.
* @param req: an address where the pointer to the newly assigned DMA request
* will be stored.
* Variadic arguments: implementation-specific.
* @param dest: layout describing the destination.
* @param src: layout describing the source.
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
int type, ...);
struct aml_layout *dest,
struct aml_layout *src);
/**
* Waits for an asynchronous DMA request to complete.
......
......@@ -76,18 +76,16 @@ int aml_copy_layout_generic(struct aml_layout *dst,
* abstract the request creation after this layer.
******************************************************************************/
int aml_dma_copy(struct aml_dma *dma, int type, ...)
int aml_dma_copy(struct aml_dma *dma, struct aml_layout *dest,
struct aml_layout *src)
{
va_list ap;
int ret;
struct aml_dma_request *req;
if (dma == NULL)
if (dma == NULL || dest == NULL || src == NULL)
return -AML_EINVAL;
va_start(ap, type);
ret = dma->ops->create_request(dma->data, &req, type, ap);
va_end(ap);
ret = dma->ops->create_request(dma->data, &req, dest, src);
if (ret != AML_SUCCESS)
return ret;
ret = dma->ops->wait_request(dma->data, &req);
......@@ -95,18 +93,12 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...)
}
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
int type, ...)
struct aml_layout *dest, struct aml_layout *src)
{
va_list ap;
int ret;
if (dma == NULL || req == NULL)
if (dma == NULL || req == NULL || dest == NULL || src == NULL)
return -AML_EINVAL;
va_start(ap, type);
ret = dma->ops->create_request(dma->data, req, type, ap);
va_end(ap);
return ret;
return dma->ops->create_request(dma->data, req, dest, src);
}
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req)
......
......@@ -30,12 +30,11 @@
******************************************************************************/
int aml_dma_request_linux_par_copy_init(struct aml_dma_request_linux_par *req,
int type,
struct aml_layout *dest,
struct aml_layout *src)
{
assert(req != NULL);
req->type = type;
req->type = AML_DMA_REQUEST_TYPE_LAYOUT;
req->dest = dest;
req->src = src;
return 0;
......@@ -72,40 +71,25 @@ struct aml_dma_linux_par_ops aml_dma_linux_par_inner_ops = {
int aml_dma_linux_par_create_request(struct aml_dma_data *d,
struct aml_dma_request **r,
int type, va_list ap)
struct aml_layout *dest,
struct aml_layout *src)
{
/* NULL checks done by the generic API */
assert(d != NULL);
assert(r != NULL);
assert(dest != NULL);
assert(src != NULL);
struct aml_dma_linux_par *dma =
(struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req;
int err = AML_SUCCESS;
pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests);
/* init the request */
if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
struct aml_layout *dl, *sl;
dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_request_linux_par_copy_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl);
} else
err = -AML_EINVAL;
unlock:
aml_dma_request_linux_par_copy_init(req, dest, src);
pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_create(&req->thread, NULL, dma->ops.do_thread, req);
*r = (struct aml_dma_request *)req;
}
return err;
pthread_create(&req->thread, NULL, dma->ops.do_thread, req);
*r = (struct aml_dma_request *)req;
return 0;
}
int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
......
......@@ -30,12 +30,11 @@
******************************************************************************/
int aml_dma_request_linux_seq_copy_init(struct aml_dma_request_linux_seq *req,
int type,
struct aml_layout *dest,
struct aml_layout *src)
{
assert(req != NULL);
req->type = type;
req->type = AML_DMA_REQUEST_TYPE_LAYOUT;
req->dest = dest;
req->src = src;
return 0;
......@@ -70,38 +69,24 @@ struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
struct aml_dma_request **r,
int type, va_list ap)
struct aml_layout *dest,
struct aml_layout *src)
{
/* NULL checks done by the generic API */
assert(d != NULL);
assert(r != NULL);
assert(dest != NULL);
assert(src != NULL);
struct aml_dma_linux_seq *dma =
(struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req;
int err = AML_SUCCESS;
pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests);
/* init the request */
if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
struct aml_layout *dl, *sl;
dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_request_linux_seq_copy_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl);
} else
err = -AML_EINVAL;
unlock:
aml_dma_request_linux_seq_copy_init(req, dest, src);
pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
*r = (struct aml_dma_request *)req;
return err;
*r = (struct aml_dma_request *)req;
return 0;
}
int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
......
......@@ -65,8 +65,7 @@ void *aml_scratch_par_do_thread(void *arg)
(struct aml_scratch_request_par *)arg;
struct aml_scratch_par *scratch = req->scratch;
aml_dma_copy(scratch->data.dma, AML_DMA_REQUEST_TYPE_LAYOUT,
req->dst, req->src);
aml_dma_copy(scratch->data.dma, req->dst, req->src);
return NULL;
}
......
......@@ -64,7 +64,6 @@ int aml_scratch_seq_doit(struct aml_scratch_seq_data *scratch,
assert(scratch != NULL);
assert(req != NULL);
return aml_dma_async_copy(scratch->dma, &req->dma_req,
AML_DMA_REQUEST_TYPE_LAYOUT,
req->dst, req->src);
}
......
......@@ -38,18 +38,13 @@ int main(int argc, char *argv[])
/* invalid requests */
assert(!aml_dma_linux_par_create(&dma, 1));
assert(aml_dma_copy(dma, 42) == -AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, NULL, isl) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, idl, NULL) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, NULL, isl) == -AML_EINVAL);
assert(aml_dma_copy(dma, idl, NULL) == -AML_EINVAL);
struct aml_dma_request *r1, *r2;
/* force dma to increase its requests queue */
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r2, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r2, idl, isl));
assert(aml_dma_wait(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_wait(dma, &r1));
......@@ -59,13 +54,11 @@ int main(int argc, char *argv[])
/* cancel a request on the fly */
assert(!aml_dma_linux_par_create(&dma, 1));
assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_cancel(dma, &r1));
/* destroy a running dma */
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
aml_dma_linux_par_destroy(&dma);
/* move data around */
......@@ -82,7 +75,6 @@ int main(int argc, char *argv[])
aml_layout_dense_create(&layouts[i][1], sptr, 0, sizeof(int),
1, &sz, NULL, NULL);
assert(!aml_dma_async_copy(dma, &requests[i],
AML_DMA_REQUEST_TYPE_LAYOUT,
layouts[i][0], layouts[i][1]));
assert(requests[i] != NULL);
}
......
......@@ -38,18 +38,13 @@ int main(int argc, char *argv[])
/* invalid requests */
assert(!aml_dma_linux_seq_create(&dma, 1));
assert(aml_dma_copy(dma, 42) == -AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, NULL, isl) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, idl, NULL) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, NULL, isl) == -AML_EINVAL);
assert(aml_dma_copy(dma, idl, NULL) == -AML_EINVAL);
struct aml_dma_request *r1, *r2;
/* force dma to increase its requests queue */
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r2, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r2, idl, isl));
assert(aml_dma_wait(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_wait(dma, &r1));
......@@ -57,14 +52,12 @@ int main(int argc, char *argv[])
/* cancel a request on the fly */
assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_cancel(dma, &r1));
/* destroy a running dma */
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
aml_dma_linux_seq_destroy(&dma);
/* move data around */
......@@ -81,7 +74,6 @@ int main(int argc, char *argv[])
aml_layout_dense_create(&layouts[i][1], sptr, 0, sizeof(int),
1, &sz, NULL, NULL);
assert(!aml_dma_async_copy(dma, &requests[i],
AML_DMA_REQUEST_TYPE_LAYOUT,
layouts[i][0], layouts[i][1]));
assert(requests[i] != NULL);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment