Commit 1a4d44d0 authored by Swann Perarnau's avatar Swann Perarnau
Browse files

[refactor] convert dma to new erros, create

Implement #31 for dma.
Improve error management.
Checkpatch dma
parent f6e2c2d7
...@@ -14,9 +14,14 @@ ...@@ -14,9 +14,14 @@
# files # files
include/aml.h include/aml.h
include/aml/area/linux.h include/aml/area/linux.h
include/aml/dma/linux-seq.h
include/aml/dma/linux-par.h
include/aml/utils/bitmap.h include/aml/utils/bitmap.h
include/aml/utils/error.h include/aml/utils/error.h
src/aml.c src/aml.c
src/area/area.c src/area/area.c
src/area/linux.c src/area/linux.c
src/dma/dma.c
src/dma/dma_linux_seq.c
src/dma/dma_linux_par.c
src/utils/error.c src/utils/error.c
...@@ -184,7 +184,7 @@ int main(int argc, char* argv[]) ...@@ -184,7 +184,7 @@ int main(int argc, char* argv[])
flops/1e9); flops/1e9);
aml_scratch_par_destroy(&sa); aml_scratch_par_destroy(&sa);
aml_scratch_par_destroy(&sb); aml_scratch_par_destroy(&sb);
aml_dma_linux_seq_destroy(&dma); aml_dma_linux_seq_fini(&dma);
aml_area_munmap(slow, a, memsize); aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize); aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize); aml_area_munmap(fast, c, memsize);
......
...@@ -120,7 +120,7 @@ int main(int argc, char *argv[]) ...@@ -120,7 +120,7 @@ int main(int argc, char *argv[])
aml_scratch_seq_destroy(&sa); aml_scratch_seq_destroy(&sa);
aml_scratch_seq_destroy(&sb); aml_scratch_seq_destroy(&sb);
aml_dma_linux_par_destroy(&dma); aml_dma_linux_par_fini(&dma);
aml_area_munmap(slow, a, memsize); aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize); aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize); aml_area_munmap(fast, c, memsize);
......
...@@ -133,7 +133,7 @@ int main(int argc, char *argv[]) ...@@ -133,7 +133,7 @@ int main(int argc, char *argv[])
aml_scratch_par_destroy(&sa); aml_scratch_par_destroy(&sa);
aml_scratch_par_destroy(&sb); aml_scratch_par_destroy(&sb);
aml_dma_linux_seq_destroy(&dma); aml_dma_linux_seq_fini(&dma);
aml_area_munmap(slow, a, memsize); aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize); aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize); aml_area_munmap(fast, c, memsize);
......
...@@ -144,7 +144,7 @@ int main(int argc, char *argv[]) ...@@ -144,7 +144,7 @@ int main(int argc, char *argv[])
aml_scratch_par_destroy(&sa); aml_scratch_par_destroy(&sa);
aml_scratch_par_destroy(&sb); aml_scratch_par_destroy(&sb);
aml_dma_linux_seq_destroy(&dma); aml_dma_linux_seq_fini(&dma);
aml_area_munmap(slow, a, memsize); aml_area_munmap(slow, a, memsize);
aml_area_munmap(slow, b, memsize); aml_area_munmap(slow, b, memsize);
aml_area_munmap(fast, c, memsize); aml_area_munmap(fast, c, memsize);
......
...@@ -41,9 +41,9 @@ struct aml_dma_linux_par_data { ...@@ -41,9 +41,9 @@ struct aml_dma_linux_par_data {
}; };
struct aml_dma_linux_par_ops { struct aml_dma_linux_par_ops {
void *(*do_thread)(void *); void *(*do_thread)(void *thread_data);
int (*do_copy)(struct aml_dma_linux_par_data *, int (*do_copy)(struct aml_dma_linux_par_data *data,
struct aml_dma_request_linux_par *, int tid); struct aml_dma_request_linux_par *request, int tid);
}; };
struct aml_dma_linux_par { struct aml_dma_linux_par {
...@@ -62,39 +62,42 @@ struct aml_dma_linux_par { ...@@ -62,39 +62,42 @@ struct aml_dma_linux_par {
(sizeof(struct aml_dma_linux_par) + \ (sizeof(struct aml_dma_linux_par) + \
sizeof(struct aml_dma)) sizeof(struct aml_dma))
/* /**
* Allocates and initializes a new parallel DMA. * Allocates and initializes a new parallel DMA.
* "dma": an address where the pointer to the newly allocated DMA structure *
* will be stored. * @param dma an address where the pointer to the newly allocated DMA structure
* Variadic arguments: * will be stored.
* - "nbreqs": an argument of type size_t; the initial number of slots for * @param nbreqs the initial number of slots for asynchronous requests that are
* asynchronous request that are in-flight (will be increased * in-flight (will be increased automatically if necessary).
* automatically if necessary). * @param nbthreads the number of threads to launch for each request.
* - "nbthreads": an argument of type size_t; the number of threads to launch *
* for each request. * @return 0 if successful; an error code otherwise.
* Returns 0 if successful; an error code otherwise. **/
*/ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs,
int aml_dma_linux_par_create(struct aml_dma **, ...); size_t nbthreads);
/*
* Initializes a new parallel DMA. This is a varargs-variant of the /**
* aml_dma_linux_par_vinit() routine.
* "dma": an allocated DMA structure.
* Variadic arguments: see aml_dma_linux_par_create().
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_par_init(struct aml_dma *, ...);
/*
* Initializes a new parallel DMA. * Initializes a new parallel DMA.
* "dma": an allocated DMA structure. *
* "args": see the variadic arguments of aml_dma_linux_par_create(). * @param dma a pointer to a dma declared with the AML_DMA_LINUX_PAR_DECL macro
* Returns 0 if successful; an error code otherwise. * @param nbreqs the initial number of slots for asynchronous requests that are
*/ * in-flight (will be increased automatically if necessary).
int aml_dma_linux_par_vinit(struct aml_dma *, va_list); * @param nbthreads the number of threads to launch for each request.
/* *
* Tears down an initialized parallel DMA. * @return 0 if successful; an error code otherwise.
* "dma": an initialized DMA structure. **/
* Returns 0 if successful; an error code otherwise. int aml_dma_linux_par_init(struct aml_dma *dma, size_t nbreqs,
size_t nbthreads);
/**
* Finalize a parallel DMA
**/
void aml_dma_linux_par_fini(struct aml_dma *dma);
/**
* Tears down a parallel DMA created with aml_dma_linux_par_create.
* @param dma the address of a pointer to a parallel dma. Will be NULL after.
*/ */
int aml_dma_linux_par_destroy(struct aml_dma *); void aml_dma_linux_par_destroy(struct aml_dma **dma);
#endif // AML_LINUX_DMA_LINUX_PAR_H #endif // AML_LINUX_DMA_LINUX_PAR_H
...@@ -52,38 +52,38 @@ struct aml_dma_linux_seq { ...@@ -52,38 +52,38 @@ struct aml_dma_linux_seq {
(sizeof(struct aml_dma_linux_seq) + \ (sizeof(struct aml_dma_linux_seq) + \
sizeof(struct aml_dma)) sizeof(struct aml_dma))
/* /**
* Allocates and initializes a new sequential DMA. * Allocates and initializes a new sequential DMA.
* "dma": an address where the pointer to the newly allocated DMA structure *
* will be stored. * @param dma an address where the pointer to the newly allocated DMA structure
* Variadic arguments: * will be stored.
* - "nbreqs": an argument of type size_t; the initial number of slots for * @param nbreqs the initial number of slots for asynchronous requests that are
* asynchronous request that are in-flight (will be increased * in-flight (will be increased automatically if necessary).
* automatically if necessary). *
* Returns 0 if successful; an error code otherwise. * @return 0 if successful; an error code otherwise.
*/ **/
int aml_dma_linux_seq_create(struct aml_dma **dma, ...); int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs);
/*
* Initializes a new sequential DMA. This is a varargs-variant of the /**
* aml_dma_linux_seq_vinit() routine.
* "dma": an allocated DMA structure.
* Variadic arguments: see aml_dma_linux_seq_create().
* Returns 0 if successful; an error code otherwise.
*/
int aml_dma_linux_seq_init(struct aml_dma *dma, ...);
/*
* Initializes a new sequential DMA. * Initializes a new sequential DMA.
* "dma": an allocated DMA structure. *
* "args": see the variadic arguments of aml_dma_linux_seq_create(). * @param dma a pointer to a dma declared with the AML_DMA_LINUX_SEQ_DECL macro
* Returns 0 if successful; an error code otherwise. * @param nbreqs same as the create version.
*/ *
int aml_dma_linux_seq_vinit(struct aml_dma *dma, va_list args); * @return 0 if successful; an error code otherwise.
/* **/
* Tears down an initialized sequential DMA. int aml_dma_linux_seq_init(struct aml_dma *dma, size_t nbreqs);
* "dma": an initialized DMA structure.
* Returns 0 if successful; an error code otherwise. /**
* Finalize a sequential DMA
**/
void aml_dma_linux_seq_fini(struct aml_dma *dma);
/**
* Tears down a sequential DMA created with aml_dma_linux_seq_create.
* @param dma the address of a pointer to a sequential dma. Will be NULL after.
*/ */
int aml_dma_linux_seq_destroy(struct aml_dma *dma); void aml_dma_linux_seq_destroy(struct aml_dma **dma);
/* Performs a copy request. /* Performs a copy request.
* "dma" the dma_linux_seq_data associated with a linux_seq dma. * "dma" the dma_linux_seq_data associated with a linux_seq dma.
......
...@@ -26,6 +26,7 @@ int aml_dma_copy(struct aml_dma *dma, ...) ...@@ -26,6 +26,7 @@ int aml_dma_copy(struct aml_dma *dma, ...)
va_list ap; va_list ap;
int ret; int ret;
struct aml_dma_request *req; struct aml_dma_request *req;
va_start(ap, dma); va_start(ap, dma);
ret = dma->ops->create_request(dma->data, &req, ret = dma->ops->create_request(dma->data, &req,
AML_DMA_REQUEST_TYPE_COPY, ap); AML_DMA_REQUEST_TYPE_COPY, ap);
...@@ -40,6 +41,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...) ...@@ -40,6 +41,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req, ...)
assert(req != NULL); assert(req != NULL);
va_list ap; va_list ap;
int ret; int ret;
va_start(ap, req); va_start(ap, req);
ret = dma->ops->create_request(dma->data, req, ret = dma->ops->create_request(dma->data, req,
AML_DMA_REQUEST_TYPE_COPY, ap); AML_DMA_REQUEST_TYPE_COPY, ap);
......
...@@ -60,7 +60,7 @@ void *aml_dma_linux_par_do_thread(void *arg) ...@@ -60,7 +60,7 @@ void *aml_dma_linux_par_do_thread(void *arg)
struct aml_dma_linux_par_thread_data *data = struct aml_dma_linux_par_thread_data *data =
(struct aml_dma_linux_par_thread_data *)arg; (struct aml_dma_linux_par_thread_data *)arg;
if(data->req->type == AML_DMA_REQUEST_TYPE_COPY) if (data->req->type == AML_DMA_REQUEST_TYPE_COPY)
data->dma->ops.do_copy(&data->dma->data, data->req, data->tid); data->dma->ops.do_copy(&data->dma->data, data->req, data->tid);
return NULL; return NULL;
} }
...@@ -75,10 +75,10 @@ int aml_dma_linux_par_do_copy(struct aml_dma_linux_par_data *dma, ...@@ -75,10 +75,10 @@ int aml_dma_linux_par_do_copy(struct aml_dma_linux_par_data *dma,
size_t nbthreads = dma->nbthreads; size_t nbthreads = dma->nbthreads;
size_t chunksize = req->size / nbthreads; size_t chunksize = req->size / nbthreads;
void *dest = (void*)((intptr_t)req->dest + tid * chunksize); void *dest = (void *)((intptr_t)req->dest + tid * chunksize);
void *src = (void*)((intptr_t)req->src + tid * chunksize); void *src = (void *)((intptr_t)req->src + tid * chunksize);
if(tid == nbthreads - 1 && req->size > chunksize * nbthreads) if (tid == nbthreads - 1 && req->size > chunksize * nbthreads)
chunksize += req->size % nbthreads; chunksize += req->size % nbthreads;
memcpy(dest, src, chunksize); memcpy(dest, src, chunksize);
...@@ -109,11 +109,11 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -109,11 +109,11 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
req = aml_vector_add(&dma->data.requests); req = aml_vector_add(&dma->data.requests);
/* init the request */ /* init the request */
if(type == AML_DMA_REQUEST_TYPE_COPY) if (type == AML_DMA_REQUEST_TYPE_COPY) {
{
struct aml_tiling *dt, *st; struct aml_tiling *dt, *st;
void *dptr, *sptr; void *dptr, *sptr;
int dtid, stid; int dtid, stid;
dt = va_arg(ap, struct aml_tiling *); dt = va_arg(ap, struct aml_tiling *);
dptr = va_arg(ap, void *); dptr = va_arg(ap, void *);
dtid = va_arg(ap, int); dtid = va_arg(ap, int);
...@@ -125,9 +125,9 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -125,9 +125,9 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
} }
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
for(int i = 0; i < dma->data.nbthreads; i++) for (int i = 0; i < dma->data.nbthreads; i++) {
{
struct aml_dma_linux_par_thread_data *rd = &req->thread_data[i]; struct aml_dma_linux_par_thread_data *rd = &req->thread_data[i];
rd->req = req; rd->req = req;
rd->dma = dma; rd->dma = dma;
rd->tid = i; rd->tid = i;
...@@ -149,13 +149,12 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d, ...@@ -149,13 +149,12 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
(struct aml_dma_request_linux_par *)r; (struct aml_dma_request_linux_par *)r;
/* we cancel and join, instead of killing, for a cleaner result */ /* we cancel and join, instead of killing, for a cleaner result */
for(int i = 0; i < dma->data.nbthreads; i++) for (int i = 0; i < dma->data.nbthreads; i++) {
{
pthread_cancel(req->thread_data[i].thread); pthread_cancel(req->thread_data[i].thread);
pthread_join(req->thread_data[i].thread, NULL); pthread_join(req->thread_data[i].thread, NULL);
} }
if(req->type == AML_DMA_REQUEST_TYPE_COPY) if (req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_par_copy_destroy(req); aml_dma_request_linux_par_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
...@@ -173,11 +172,11 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d, ...@@ -173,11 +172,11 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
struct aml_dma_request_linux_par *req = struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r; (struct aml_dma_request_linux_par *)r;
for(int i = 0; i < dma->data.nbthreads; i++) for (int i = 0; i < dma->data.nbthreads; i++)
pthread_join(req->thread_data[i].thread, NULL); pthread_join(req->thread_data[i].thread, NULL);
/* destroy a completed request */ /* destroy a completed request */
if(req->type == AML_DMA_REQUEST_TYPE_COPY) if (req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_par_copy_destroy(req); aml_dma_request_linux_par_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
...@@ -196,70 +195,88 @@ struct aml_dma_ops aml_dma_linux_par_ops = { ...@@ -196,70 +195,88 @@ struct aml_dma_ops aml_dma_linux_par_ops = {
* Init functions: * Init functions:
******************************************************************************/ ******************************************************************************/
int aml_dma_linux_par_create(struct aml_dma **d, ...) int aml_dma_linux_par_create(struct aml_dma **d, size_t nbreqs,
size_t nbthreads)
{ {
va_list ap;
struct aml_dma *ret = NULL; struct aml_dma *ret = NULL;
intptr_t baseptr, dataptr; intptr_t baseptr, dataptr;
va_start(ap, d); int err;
if (d == NULL)
return -AML_EINVAL;
/* alloc */ /* alloc */
baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_PAR_ALLOCSIZE); baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_PAR_ALLOCSIZE);
if (baseptr == 0) {
*d = NULL;
return -AML_ENOMEM;
}
dataptr = baseptr + sizeof(struct aml_dma); dataptr = baseptr + sizeof(struct aml_dma);
ret = (struct aml_dma *)baseptr; ret = (struct aml_dma *)baseptr;
ret->data = (struct aml_dma_data *)dataptr; ret->data = (struct aml_dma_data *)dataptr;
ret->ops = &aml_dma_linux_par_ops;
aml_dma_linux_par_vinit(ret, ap); err = aml_dma_linux_par_init(ret, nbreqs, nbthreads);
if (err) {
*d = NULL;
free(ret);
return err;
}
va_end(ap);
*d = ret; *d = ret;
return 0; return 0;
} }
int aml_dma_linux_par_vinit(struct aml_dma *d, va_list ap)
int aml_dma_linux_par_init(struct aml_dma *d, size_t nbreqs,
size_t nbthreads)
{ {
d->ops = &aml_dma_linux_par_ops; struct aml_dma_linux_par *dma;
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d->data;
if (d == NULL || d->data == NULL)
return -AML_EINVAL;
dma = (struct aml_dma_linux_par *)d->data;
dma->ops = aml_dma_linux_par_inner_ops; dma->ops = aml_dma_linux_par_inner_ops;
/* allocate request array */
size_t nbreqs = va_arg(ap, size_t);
dma->data.nbthreads = va_arg(ap, size_t);
/* allocate request array */
dma->data.nbthreads = nbthreads;
aml_vector_init(&dma->data.requests, nbreqs, aml_vector_init(&dma->data.requests, nbreqs,
sizeof(struct aml_dma_request_linux_par), sizeof(struct aml_dma_request_linux_par),
offsetof(struct aml_dma_request_linux_par, type), offsetof(struct aml_dma_request_linux_par, type),
AML_DMA_REQUEST_TYPE_INVALID); AML_DMA_REQUEST_TYPE_INVALID);
for(int i = 0; i < nbreqs; i++) for (int i = 0; i < nbreqs; i++) {
{
struct aml_dma_request_linux_par *req = struct aml_dma_request_linux_par *req =
aml_vector_get(&dma->data.requests, i); aml_vector_get(&dma->data.requests, i);
req->thread_data = calloc(dma->data.nbthreads, req->thread_data = calloc(dma->data.nbthreads,
sizeof(struct aml_dma_linux_par_thread_data)); sizeof(struct aml_dma_linux_par_thread_data));
} }
pthread_mutex_init(&dma->data.lock, NULL); pthread_mutex_init(&dma->data.lock, NULL);
return 0; return 0;
} }
int aml_dma_linux_par_init(struct aml_dma *d, ...)
{
int err;
va_list ap;
va_start(ap, d);
err = aml_dma_linux_par_vinit(d, ap);
va_end(ap);
return err;
}
int aml_dma_linux_par_destroy(struct aml_dma *d) void aml_dma_linux_par_fini(struct aml_dma *d)
{ {
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d->data; struct aml_dma_linux_par *dma;
for(int i = 0; i < aml_vector_size(&dma->data.requests); i++)
{ if (d == NULL || d->data == NULL)
return;
dma = (struct aml_dma_linux_par *)d->data;
for (int i = 0; i < aml_vector_size(&dma->data.requests); i++) {
struct aml_dma_request_linux_par *req = struct aml_dma_request_linux_par *req =
aml_vector_get(&dma->data.requests, i); aml_vector_get(&dma->data.requests, i);
free(req->thread_data); free(req->thread_data);
} }
aml_vector_destroy(&dma->data.requests); aml_vector_destroy(&dma->data.requests);
pthread_mutex_destroy(&dma->data.lock); pthread_mutex_destroy(&dma->data.lock);
return 0; }
void aml_dma_linux_par_destroy(struct aml_dma **d)
{
if (d == NULL)
return;
aml_dma_linux_par_fini(*d);
free(*d);
*d = NULL;
} }
...@@ -86,11 +86,11 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -86,11 +86,11 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
req = aml_vector_add(&dma->data.requests); req = aml_vector_add(&dma->data.requests);
/* init the request */ /* init the request */
if(type == AML_DMA_REQUEST_TYPE_COPY) if (type == AML_DMA_REQUEST_TYPE_COPY) {
{
struct aml_tiling *dt, *st; struct aml_tiling *dt, *st;
void *dptr, *sptr; void *dptr, *sptr;
int dtid, stid; int dtid, stid;
dt = va_arg(ap, struct aml_tiling *); dt = va_arg(ap, struct aml_tiling *);
dptr = va_arg(ap, void *); dptr = va_arg(ap, void *);
dtid = va_arg(ap, int); dtid = va_arg(ap, int);
...@@ -116,7 +116,7 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d, ...@@ -116,7 +116,7 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
struct aml_dma_request_linux_seq *req = struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r; (struct aml_dma_request_linux_seq *)r;
if(req->type == AML_DMA_REQUEST_TYPE_COPY) if (req->type == AML_DMA_REQUEST_TYPE_COPY)
aml_dma_request_linux_seq_copy_destroy(req); aml_dma_request_linux_seq_copy_destroy(req);
/* enough to remove from request vector */ /* enough to remove from request vector */
...@@ -136,7 +136,7 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d, ...@@ -136,7 +136,7 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
(struct aml_dma_request_linux_seq *)r; (struct aml_dma_request_linux_seq *)r;
/* execute */ /* execute */
if(req->type == AML_DMA_REQUEST_TYPE_COPY) if (req->type == AML_DMA_REQUEST_TYPE_COPY)
dma->ops.do_copy(&dma->data, req); dma->ops.do_copy(&dma->data, req);
/* destroy a completed request */ /* destroy a completed request */
...@@ -154,35 +154,48 @@ struct aml_dma_ops aml_dma_linux_seq_ops = { ...@@ -154,35 +154,48 @@ struct aml_dma_ops aml_dma_linux_seq_ops = {
* Init functions: * Init functions:
******************************************************************************/ ******************************************************************************/
int aml_dma_linux_seq_create(struct aml_dma **d, ...) int aml_dma_linux_seq_create(struct aml_dma **d, size_t nbreqs)
{ {
va_list ap;
struct aml_dma *ret = NULL; struct aml_dma *ret = NULL;
intptr_t baseptr, dataptr; intptr_t baseptr, dataptr;
va_start(ap, d); int err;
if (d == NULL)
return -AML_EINVAL;
/* alloc */ /* alloc */
baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_SEQ_ALLOCSIZE); baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_SEQ_ALLOCSIZE);
if (baseptr == 0) {
*d = NULL;
return -AML_ENOMEM;
}
dataptr = baseptr + sizeof(struct aml_dma); dataptr = baseptr + sizeof(struct aml_dma);