Commit 1a4d44d0 authored by Swann Perarnau's avatar Swann Perarnau

[refactor] convert dma to new erros, create

Implement #31 for dma.
Improve error management.
Checkpatch dma
parent f6e2c2d7
......@@ -14,9 +14,14 @@
# files
include/aml.h
include/aml/area/linux.h
include/aml/dma/linux-seq.h
include/aml/dma/linux-par.h
include/aml/utils/bitmap.h
include/aml/utils/error.h
src/aml.c
src/area/area.c
src/area/linux.c
src/dma/dma.c
src/dma/dma_linux_seq.c
src/dma/dma_linux_par.c
src/utils/error.c
......@@ -184,7 +184,7 @@ int main(int argc, char* argv[])
flops/1e9);
aml_scratch_par_destroy(&sa);
aml_scratch_par_destroy(&sb);
aml_dma_linux_seq_destroy(&dma);
aml_dma_linux_seq_fini(&dma);
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
......
......@@ -120,7 +120,7 @@ int main(int argc, char *argv[])
aml_scratch_seq_destroy(&sa);
aml_scratch_seq_destroy(&sb);
aml_dma_linux_par_destroy(&dma);
aml_dma_linux_par_fini(&dma);
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
......
......@@ -133,7 +133,7 @@ int main(int argc, char *argv[])
aml_scratch_par_destroy(&sa);
aml_scratch_par_destroy(&sb);
aml_dma_linux_seq_destroy(&dma);
aml_dma_linux_seq_fini(&dma);
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
......
......@@ -144,7 +144,7 @@ int main(int argc, char *argv[])
aml_scratch_par_destroy(&sa);
aml_scratch_par_destroy(&sb);
aml_dma_linux_seq_destroy(&dma);
aml_dma_linux_seq_fini(&dma);
aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize);
......
......@@ -41,9 +41,9 @@ struct aml_dma_linux_par_data {
};
struct aml_dma_linux_par_ops {
void *(*do_thread)(void *);
int (*do_copy)(struct aml_dma_linux_par_data *,
struct aml_dma_request_linux_par *, int tid);
void *(*do_thread)(void *thread_data);
int (*do_copy)(struct aml_dma_linux_par_data *data,
struct aml_dma_request_linux_par *request, int tid);
};
struct aml_dma_linux_par {
......@@ -62,39 +62,42 @@ struct aml_dma_linux_par {
(sizeof(struct aml_dma_linux_par) + \
sizeof(struct aml_dma))
/*
/**
* Allocates and initializes a new parallel DMA.
* "dma": an address where the pointer to the newly allocated DMA structure
* will be stored.
* Variadic arguments:
* - "nbreqs": an argument of type size_t; the initial number of slots for
* asynchronous request that are in-flight (will be increased
* automatically if necessary).
* - "nbthreads": an argument of type size_t; the number of threads to launch
* for each request.
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_par_create(struct aml_dma **, ...);
/*
* Initializes a new parallel DMA. This is a varargs-variant of the
* aml_dma_linux_par_vinit() routine.
* "dma": an allocated DMA structure.
* Variadic arguments: see aml_dma_linux_par_create().
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_par_init(struct aml_dma *, ...);
/*
*
* @param dma an address where the pointer to the newly allocated DMA structure
* will be stored.
* @param nbreqs the initial number of slots for asynchronous requests that are
* in-flight (will be increased automatically if necessary).
* @param nbthreads the number of threads to launch for each request.
*
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs,
size_t nbthreads);
/**
* Initializes a new parallel DMA.
* "dma": an allocated DMA structure.
* "args": see the variadic arguments of aml_dma_linux_par_create().
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_par_vinit(struct aml_dma *, va_list);
/*
* Tears down an initialized parallel DMA.
* "dma": an initialized DMA structure.
* Returns 0 if successful; an error code otherwise.
*
* @param dma a pointer to a dma declared with the AML_DMA_LINUX_PAR_DECL macro
* @param nbreqs the initial number of slots for asynchronous requests that are
* in-flight (will be increased automatically if necessary).
* @param nbthreads the number of threads to launch for each request.
*
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_linux_par_init(struct aml_dma *dma, size_t nbreqs,
size_t nbthreads);
/**
* Finalize a parallel DMA
**/
void aml_dma_linux_par_fini(struct aml_dma *dma);
/**
* Tears down a parallel DMA created with aml_dma_linux_par_create.
* @param dma the address of a pointer to a parallel dma. Will be NULL after.
*/
int aml_dma_linux_par_destroy(struct aml_dma *);
void aml_dma_linux_par_destroy(struct aml_dma **dma);
#endif // AML_LINUX_DMA_LINUX_PAR_H
......@@ -52,38 +52,38 @@ struct aml_dma_linux_seq {
(sizeof(struct aml_dma_linux_seq) + \
sizeof(struct aml_dma))
/*
/**
* Allocates and initializes a new sequential DMA.
* "dma": an address where the pointer to the newly allocated DMA structure
* will be stored.
* Variadic arguments:
* - "nbreqs": an argument of type size_t; the initial number of slots for
* asynchronous request that are in-flight (will be increased
* automatically if necessary).
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_seq_create(struct aml_dma **dma, ...);
/*
* Initializes a new sequential DMA. This is a varargs-variant of the
* aml_dma_linux_seq_vinit() routine.
* "dma": an allocated DMA structure.
* Variadic arguments: see aml_dma_linux_seq_create().
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_seq_init(struct aml_dma *dma, ...);
/*
*
* @param dma an address where the pointer to the newly allocated DMA structure
* will be stored.
* @param nbreqs the initial number of slots for asynchronous requests that are
* in-flight (will be increased automatically if necessary).
*
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs);
/**
* Initializes a new sequential DMA.
* "dma": an allocated DMA structure.
* "args": see the variadic arguments of aml_dma_linux_seq_create().
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_seq_vinit(struct aml_dma *dma, va_list args);
/*
* Tears down an initialized sequential DMA.
* "dma": an initialized DMA structure.
* Returns 0 if successful; an error code otherwise.
*
* @param dma a pointer to a dma declared with the AML_DMA_LINUX_SEQ_DECL macro
* @param nbreqs same as the create version.
*
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_linux_seq_init(struct aml_dma *dma, size_t nbreqs);
/**
* Finalize a sequential DMA
**/
void aml_dma_linux_seq_fini(struct aml_dma *dma);
/**
* Tears down a sequential DMA created with aml_dma_linux_seq_create.
* @param dma the address of a pointer to a sequential dma. Will be NULL after.
*/
int aml_dma_linux_seq_destroy(struct aml_dma *dma);
void aml_dma_linux_seq_destroy(struct aml_dma **dma);
/* Performs a copy request.
* "dma" the dma_linux_seq_data associated with a linux_seq dma.
......
......@@ -26,6 +26,7 @@ int aml_dma_copy(struct aml_dma *dma, ...)
va_list ap;
int ret;
struct aml_dma_request *req;
va_start(ap, dma);
ret = dma->ops->create_request(dma->data, &req,
AML_DMA_REQUEST_TYPE_COPY, ap);
......@@ -40,6 +41,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...)
assert(req != NULL);
va_list ap;
int ret;
va_start(ap, req);
ret = dma->ops->create_request(dma->data, req,
AML_DMA_REQUEST_TYPE_COPY, ap);
......
......@@ -60,7 +60,7 @@ void *aml_dma_linux_par_do_thread(void *arg)
struct aml_dma_linux_par_thread_data *data =
(struct aml_dma_linux_par_thread_data *)arg;
if(data->req->type == AML_DMA_REQUEST_TYPE_COPY)
if (data->req->type == AML_DMA_REQUEST_TYPE_COPY)
data->dma->ops.do_copy(&data->dma->data, data->req, data->tid);
return NULL;
}
......@@ -75,10 +75,10 @@ int aml_dma_linux_par_do_copy(struct aml_dma_linux_par_data *dma,
size_t nbthreads = dma->nbthreads;
size_t chunksize = req->size / nbthreads;
void *dest = (void*)((intptr_t)req->dest + tid * chunksize);
void *src = (void*)((intptr_t)req->src + tid * chunksize);
void *dest = (void *)((intptr_t)req->dest + tid * chunksize);
void *src = (void *)((intptr_t)req->src + tid * chunksize);
if(tid == nbthreads - 1 && req->size > chunksize * nbthreads)
if (tid == nbthreads - 1 && req->size > chunksize * nbthreads)
chunksize += req->size % nbthreads;
memcpy(dest, src, chunksize);
......@@ -109,11 +109,11 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
req = aml_vector_add(&dma->data.requests);
/* init the request */
if(type == AML_DMA_REQUEST_TYPE_COPY)
{
if (type == AML_DMA_REQUEST_TYPE_COPY) {
struct aml_tiling *dt, *st;
void *dptr, *sptr;
int dtid, stid;
dt = va_arg(ap, struct aml_tiling *);
dptr = va_arg(ap, void *);
dtid = va_arg(ap, int);
......@@ -125,9 +125,9 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
}
pthread_mutex_unlock(&dma->data.lock);
for(int i = 0; i < dma->data.nbthreads; i++)
{
for (int i = 0; i < dma->data.nbthreads; i++) {
struct aml_dma_linux_par_thread_data *rd = &req->thread_data[i];
rd->req = req;
rd->dma = dma;
rd->tid = i;
......@@ -149,13 +149,12 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
(struct aml_dma_request_linux_par *)r;
/* we cancel and join, instead of killing, for a cleaner result */
for(int i = 0; i < dma->data.nbthreads; i++)
{
for (int i = 0; i < dma->data.nbthreads; i++) {
pthread_cancel(req->thread_data[i].thread);
pthread_join(req->thread_data[i].thread, NULL);
}
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
if (req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_par_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock);
......@@ -173,11 +172,11 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r;
for(int i = 0; i < dma->data.nbthreads; i++)
for (int i = 0; i < dma->data.nbthreads; i++)
pthread_join(req->thread_data[i].thread, NULL);
/* destroy a completed request */
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
if (req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_par_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock);
......@@ -196,70 +195,88 @@ struct aml_dma_ops aml_dma_linux_par_ops = {
* Init functions:
******************************************************************************/
int aml_dma_linux_par_create(struct aml_dma **d, ...)
int aml_dma_linux_par_create(struct aml_dma **d, size_t nbreqs,
size_t nbthreads)
{
va_list ap;
struct aml_dma *ret = NULL;
intptr_t baseptr, dataptr;
va_start(ap, d);
int err;
if (d == NULL)
return -AML_EINVAL;
/* alloc */
baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_PAR_ALLOCSIZE);
if (baseptr == 0) {
*d = NULL;
return -AML_ENOMEM;
}
dataptr = baseptr + sizeof(struct aml_dma);
ret = (struct aml_dma *)baseptr;
ret->data = (struct aml_dma_data *)dataptr;
ret->ops = &aml_dma_linux_par_ops;
aml_dma_linux_par_vinit(ret, ap);
err = aml_dma_linux_par_init(ret, nbreqs, nbthreads);
if (err) {
*d = NULL;
free(ret);
return err;
}
va_end(ap);
*d = ret;
return 0;
}
int aml_dma_linux_par_vinit(struct aml_dma *d, va_list ap)
int aml_dma_linux_par_init(struct aml_dma *d, size_t nbreqs,
size_t nbthreads)
{
d->ops = &aml_dma_linux_par_ops;
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d->data;
struct aml_dma_linux_par *dma;
if (d == NULL || d->data == NULL)
return -AML_EINVAL;
dma = (struct aml_dma_linux_par *)d->data;
dma->ops = aml_dma_linux_par_inner_ops;
/* allocate request array */
size_t nbreqs = va_arg(ap, size_t);
dma->data.nbthreads = va_arg(ap, size_t);
/* allocate request array */
dma->data.nbthreads = nbthreads;
aml_vector_init(&dma->data.requests, nbreqs,
sizeof(struct aml_dma_request_linux_par),
offsetof(struct aml_dma_request_linux_par, type),
AML_DMA_REQUEST_TYPE_INVALID);
for(int i = 0; i < nbreqs; i++)
{
for (int i = 0; i < nbreqs; i++) {
struct aml_dma_request_linux_par *req =
aml_vector_get(&dma->data.requests, i);
req->thread_data = calloc(dma->data.nbthreads,
sizeof(struct aml_dma_linux_par_thread_data));
sizeof(struct aml_dma_linux_par_thread_data));
}
pthread_mutex_init(&dma->data.lock, NULL);
return 0;
}
int aml_dma_linux_par_init(struct aml_dma *d, ...)
{
int err;
va_list ap;
va_start(ap, d);
err = aml_dma_linux_par_vinit(d, ap);
va_end(ap);
return err;
}
int aml_dma_linux_par_destroy(struct aml_dma *d)
void aml_dma_linux_par_fini(struct aml_dma *d)
{
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d->data;
for(int i = 0; i < aml_vector_size(&dma->data.requests); i++)
{
struct aml_dma_linux_par *dma;
if (d == NULL || d->data == NULL)
return;
dma = (struct aml_dma_linux_par *)d->data;
for (int i = 0; i < aml_vector_size(&dma->data.requests); i++) {
struct aml_dma_request_linux_par *req =
aml_vector_get(&dma->data.requests, i);
free(req->thread_data);
}
aml_vector_destroy(&dma->data.requests);
pthread_mutex_destroy(&dma->data.lock);
return 0;
}
void aml_dma_linux_par_destroy(struct aml_dma **d)
{
if (d == NULL)
return;
aml_dma_linux_par_fini(*d);
free(*d);
*d = NULL;
}
......@@ -86,11 +86,11 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
req = aml_vector_add(&dma->data.requests);
/* init the request */
if(type == AML_DMA_REQUEST_TYPE_COPY)
{
if (type == AML_DMA_REQUEST_TYPE_COPY) {
struct aml_tiling *dt, *st;
void *dptr, *sptr;
int dtid, stid;
dt = va_arg(ap, struct aml_tiling *);
dptr = va_arg(ap, void *);
dtid = va_arg(ap, int);
......@@ -116,7 +116,7 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r;
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
if (req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_seq_copy_destroy(req);
/* enough to remove from request vector */
......@@ -136,7 +136,7 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
(struct aml_dma_request_linux_seq *)r;
/* execute */
if(req->type == AML_DMA_REQUEST_TYPE_COPY)
if (req->type == AML_DMA_REQUEST_TYPE_COPY)
dma->ops.do_copy(&dma->data, req);
/* destroy a completed request */
......@@ -154,35 +154,48 @@ struct aml_dma_ops aml_dma_linux_seq_ops = {
* Init functions:
******************************************************************************/
int aml_dma_linux_seq_create(struct aml_dma **d, ...)
int aml_dma_linux_seq_create(struct aml_dma **d, size_t nbreqs)
{
va_list ap;
struct aml_dma *ret = NULL;
intptr_t baseptr, dataptr;
va_start(ap, d);
int err;
if (d == NULL)
return -AML_EINVAL;
/* alloc */
baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_SEQ_ALLOCSIZE);
if (baseptr == 0) {
*d = NULL;
return -AML_ENOMEM;
}
dataptr = baseptr + sizeof(struct aml_dma);
ret = (struct aml_dma *)baseptr;
ret->data = (struct aml_dma_data *)dataptr;
ret->ops = &aml_dma_linux_seq_ops;
aml_dma_linux_seq_vinit(ret, ap);
err = aml_dma_linux_seq_init(ret, nbreqs);
if (err) {
*d = NULL;
free(ret);
return err;
}
va_end(ap);
*d = ret;
return 0;
}
int aml_dma_linux_seq_vinit(struct aml_dma *d, va_list ap)
int aml_dma_linux_seq_init(struct aml_dma *d, size_t nbreqs)
{
d->ops = &aml_dma_linux_seq_ops;
struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d->data;
struct aml_dma_linux_seq *dma;
if (d == NULL || d->data == NULL)
return -AML_EINVAL;
dma = (struct aml_dma_linux_seq *)d->data;
dma->ops = aml_dma_linux_seq_inner_ops;
/* request vector */
size_t nbreqs = va_arg(ap, size_t);
aml_vector_init(&dma->data.requests, nbreqs,
sizeof(struct aml_dma_request_linux_seq),
offsetof(struct aml_dma_request_linux_seq, type),
......@@ -190,20 +203,22 @@ int aml_dma_linux_seq_vinit(struct aml_dma *d, va_list ap)
pthread_mutex_init(&dma->data.lock, NULL);
return 0;
}
int aml_dma_linux_seq_init(struct aml_dma *d, ...)
{
int err;
va_list ap;
va_start(ap, d);
err = aml_dma_linux_seq_vinit(d, ap);
va_end(ap);
return err;
}
int aml_dma_linux_seq_destroy(struct aml_dma *d)
void aml_dma_linux_seq_fini(struct aml_dma *d)
{
if (d == NULL || d->data == NULL)
return;
struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d->data;
aml_vector_destroy(&dma->data.requests);
pthread_mutex_destroy(&dma->data.lock);
return 0;
}
void aml_dma_linux_seq_destroy(struct aml_dma **d)
{
if (d == NULL)
return;
aml_dma_linux_seq_fini(*d);
free(*d);
*d = NULL;
}
......@@ -53,7 +53,7 @@ int main(int argc, char *argv[])
assert(!memcmp(src, dst, TILESIZE*PAGE_SIZE*NBTILES));
/* delete everything */
aml_dma_linux_par_destroy(&dma);
aml_dma_linux_par_fini(&dma);
aml_area_munmap(&aml_area_linux, dst, TILESIZE*PAGE_SIZE*NBTILES);
aml_area_munmap(&aml_area_linux, src, TILESIZE*PAGE_SIZE*NBTILES);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
......
......@@ -52,7 +52,7 @@ int main(int argc, char *argv[])
assert(!memcmp(src, dst, TILESIZE*PAGE_SIZE*NBTILES));
/* delete everything */
aml_dma_linux_seq_destroy(&dma);
aml_dma_linux_seq_fini(&dma);
aml_area_munmap(&aml_area_linux, dst, TILESIZE*PAGE_SIZE*NBTILES);
aml_area_munmap(&aml_area_linux, src, TILESIZE*PAGE_SIZE*NBTILES);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
......
......@@ -72,7 +72,7 @@ int main(int argc, char *argv[])
/* delete everything */
aml_scratch_par_destroy(&scratch);
aml_dma_linux_seq_destroy(&dma);
aml_dma_linux_seq_fini(&dma);
aml_area_munmap(&aml_area_linux, dst, TILESIZE*PAGE_SIZE*NBTILES);
aml_area_munmap(&aml_area_linux, src, TILESIZE*PAGE_SIZE*NBTILES);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
......
......@@ -73,7 +73,7 @@ int main(int argc, char *argv[])
/* delete everything */
aml_scratch_seq_destroy(&scratch);
aml_dma_linux_par_destroy(&dma);
aml_dma_linux_par_fini(&dma);
aml_area_munmap(&aml_area_linux, dst, TILESIZE*PAGE_SIZE*NBTILES);
aml_area_munmap(&aml_area_linux, src, TILESIZE*PAGE_SIZE*NBTILES);
aml_tiling_destroy(&tiling, AML_TILING_TYPE_1D);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment