Commit 27460a5f authored by Swann Perarnau's avatar Swann Perarnau

Merge branch 'better-unit-tests' into 'master'

Improve speed and exhaustiveness of the unit tests.

See merge request !74
parents 3eb5ca16 f43d2d6d
Pipeline #8312 passed with stages
in 6 minutes and 16 seconds
......@@ -877,7 +877,7 @@ struct aml_dma_ops {
* @return an AML error code.
**/
int (*destroy_request)(struct aml_dma_data *dma,
struct aml_dma_request *req);
struct aml_dma_request **req);
/**
* Wait for termination of a data movement and destroy the request
......@@ -888,7 +888,7 @@ struct aml_dma_ops {
* @return an AML error code.
**/
int (*wait_request)(struct aml_dma_data *dma,
struct aml_dma_request *req);
struct aml_dma_request **req);
};
/**
......@@ -931,7 +931,7 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
* @param req: a DMA request obtained using aml_dma_async_*() calls.
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req);
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req);
/**
* Tears down an asynchronous DMA request before it completes.
......@@ -939,7 +939,7 @@ int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req);
* @param req: a DMA request obtained using aml_dma_async_*() calls.
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req);
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req);
/**
* Generic helper to copy from one layout to another.
......
......@@ -50,7 +50,7 @@ struct aml_dma_linux_par_request_data {
pthread_t thread;
};
/** Inside of a parallel request for linux movement. **/
/** Inside of a parallel dma for linux movement. **/
struct aml_dma_linux_par_data {
struct aml_vector *requests;
pthread_mutex_t lock;
......
......@@ -27,9 +27,9 @@
/** The type used to store bits **/
#define AML_BITMAP_TYPE unsigned long
/** The number of basic type elements used to store bits **/
#define AML_BITMAP_SIZE (AML_BITMAP_BYTES/sizeof(AML_BITMAP_TYPE))
#define AML_BITMAP_SIZE ((int)(AML_BITMAP_BYTES/sizeof(AML_BITMAP_TYPE)))
/** The number of bits held in each basic type element **/
#define AML_BITMAP_NBITS (8 * sizeof(AML_BITMAP_TYPE))
#define AML_BITMAP_NBITS ((int)(8 * sizeof(AML_BITMAP_TYPE)))
/**
* aml_bitmap is a static array of elements wrapped in a structure.
......@@ -50,13 +50,13 @@ void aml_bitmap_copy(struct aml_bitmap *dst, const struct aml_bitmap *src);
* Empty a bitmap with all bits cleared.
* @param bitmap: The bitmap to set.
**/
void aml_bitmap_zero(struct aml_bitmap *bitmap);
int aml_bitmap_zero(struct aml_bitmap *bitmap);
/**
* Fill a bitmap with all bits set.
* @param bitmap: The bitmap to set.
**/
void aml_bitmap_fill(struct aml_bitmap *bitmap);
int aml_bitmap_fill(struct aml_bitmap *bitmap);
/**
* Check whether a bit in bitmap is set.
......@@ -135,7 +135,7 @@ int aml_bitmap_clear_range(struct aml_bitmap *bitmap,
* @param bitmap: The bitmap to inspect.
* @return The number of bits set in bitmap.
**/
unsigned long aml_bitmap_nset(const struct aml_bitmap *bitmap);
int aml_bitmap_nset(const struct aml_bitmap *bitmap);
/**
* Copy a unsigned long array used as a bitmap into an actual bitmap.
......
......@@ -88,7 +88,9 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...)
va_start(ap, type);
ret = dma->ops->create_request(dma->data, &req, type, ap);
va_end(ap);
ret = dma->ops->wait_request(dma->data, req);
if (ret != AML_SUCCESS)
return ret;
ret = dma->ops->wait_request(dma->data, &req);
return ret;
}
......@@ -107,16 +109,16 @@ int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
return ret;
}
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request *req)
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req)
{
assert(dma != NULL);
assert(req != NULL);
if (dma == NULL || req == NULL)
return -AML_EINVAL;
return dma->ops->destroy_request(dma->data, req);
}
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request *req)
int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req)
{
assert(dma != NULL);
assert(req != NULL);
if (dma == NULL || req == NULL)
return -AML_EINVAL;
return dma->ops->wait_request(dma->data, req);
}
......@@ -68,6 +68,7 @@ void *aml_dma_linux_par_do_thread(void *arg)
struct aml_dma_linux_par_request_data *req =
(struct aml_dma_linux_par_request_data *)arg;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
aml_copy_layout_generic(req->dest, req->src);
return NULL;
......@@ -91,6 +92,7 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
(struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *ret;
struct aml_dma_linux_par_request_data *req;
int err = AML_SUCCESS;
pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests);
......@@ -101,6 +103,10 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl);
......@@ -112,17 +118,21 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
dp = va_arg(ap, void *);
sp = va_arg(ap, void *);
sz = va_arg(ap, size_t);
if (dp == NULL || sp == NULL || sz == 0) {
err = -AML_EINVAL;
goto unlock;
}
/* simple 1D layout, none of the parameters really matter, as
* long as the copy generates a single memcpy.
*/
aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1,
&sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
&sz, NULL, NULL);
aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL);
aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR,
dl, sl);
}
} else
err = -AML_EINVAL;
unlock:
pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req);
......@@ -131,21 +141,23 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
aml_dma_request_linux_par_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
}
return 0;
return err;
}
int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
struct aml_dma_request *r)
struct aml_dma_request **r)
{
assert(d != NULL);
assert(r != NULL);
struct aml_dma_linux_par *dma =
(struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r;
struct aml_dma_request_linux_par *req;
struct aml_dma_linux_par_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_par *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
......@@ -154,44 +166,49 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(inner_req->thread);
pthread_join(inner_req->thread, NULL);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
}
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req);
*r = NULL;
return 0;
}
int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
struct aml_dma_request *r)
struct aml_dma_request **r)
{
assert(d != NULL);
assert(r != NULL);
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r;
struct aml_dma_request_linux_par *req;
struct aml_dma_linux_par_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_par *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) {
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID)
pthread_join(inner_req->thread, NULL);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
}
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req);
*r = NULL;
return 0;
}
......@@ -236,22 +253,28 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs)
return 0;
}
void aml_dma_linux_par_destroy(struct aml_dma **dma)
void aml_dma_linux_par_destroy(struct aml_dma **d)
{
struct aml_dma *d;
struct aml_dma_linux_par *l;
struct aml_dma_linux_par *dma;
if (dma == NULL)
return;
d = *dma;
if (d == NULL)
if (d == NULL || *d == NULL)
return;
assert(d->data != NULL);
l = (struct aml_dma_linux_par *)d->data;
aml_vector_destroy(&l->data.requests);
pthread_mutex_destroy(&l->data.lock);
free(d);
*dma = NULL;
dma = (struct aml_dma_linux_par *)(*d)->data;
for (size_t i = 0; i < aml_vector_size(dma->data.requests); i++) {
struct aml_dma_linux_par_request_data *req;
req = aml_vector_get(dma->data.requests, i);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(req->thread);
pthread_join(req->thread, NULL);
}
if (req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&req->dest);
aml_layout_dense_destroy(&req->src);
}
}
aml_vector_destroy(&dma->data.requests);
pthread_mutex_destroy(&dma->data.lock);
free(*d);
*d = NULL;
}
......@@ -90,6 +90,7 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
(struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *ret;
struct aml_dma_linux_seq_request_data *req;
int err = AML_SUCCESS;
pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests);
......@@ -100,6 +101,10 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl);
......@@ -111,38 +116,46 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
dp = va_arg(ap, void *);
sp = va_arg(ap, void *);
sz = va_arg(ap, size_t);
if (dp == NULL || sp == NULL || sz == 0) {
err = -AML_EINVAL;
goto unlock;
}
/* simple 1D layout, none of the parameters really matter, as
* long as the copy generates a single memcpy.
*/
aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1,
&sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
&sz, NULL, NULL);
aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL);
aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR,
dl, sl);
}
int uuid = aml_vector_getid(dma->data.requests, req);
assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
aml_dma_request_linux_seq_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
} else
err = -AML_EINVAL;
unlock:
pthread_mutex_unlock(&dma->data.lock);
return 0;
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req);
assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
aml_dma_request_linux_seq_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
}
return err;
}
int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
struct aml_dma_request *r)
struct aml_dma_request **r)
{
assert(d != NULL);
assert(r != NULL);
struct aml_dma_linux_seq *dma =
(struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r;
struct aml_dma_request_linux_seq *req;
struct aml_dma_linux_seq_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_seq *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
......@@ -153,23 +166,26 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
aml_layout_dense_destroy(&inner_req->src);
}
/* enough to remove from request vector */
aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_seq_destroy(&req);
*r = NULL;
return 0;
}
int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
struct aml_dma_request *r)
struct aml_dma_request **r)
{
assert(d != NULL);
assert(r != NULL);
struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r;
struct aml_dma_request_linux_seq *req;
struct aml_dma_linux_seq_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_seq *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
......
......@@ -87,7 +87,8 @@ int aml_layout_dense_create(struct aml_layout **layout,
struct aml_layout_dense *data;
int err;
if (layout == NULL)
if (layout == NULL || ptr == NULL || !element_size || !ndims ||
dims == NULL)
return -AML_EINVAL;
err = aml_layout_dense_alloc(&l, ndims);
......@@ -111,8 +112,6 @@ int aml_layout_dense_create(struct aml_layout **layout,
else
data->pitch[i] = dims[ndims-i-1];
}
for (size_t i = 1; i <= ndims; i++)
data->cpitch[i] = data->cpitch[i-1]*pitch[ndims-i];
break;
case AML_LAYOUT_ORDER_COLUMN_MAJOR:
......@@ -124,14 +123,14 @@ int aml_layout_dense_create(struct aml_layout **layout,
memcpy(data->pitch, pitch, ndims * sizeof(size_t));
else
memcpy(data->pitch, dims, ndims * sizeof(size_t));
for (size_t i = 1; i <= ndims; i++)
data->cpitch[i] = data->cpitch[i-1]*data->pitch[i-1];
break;
default:
free(l);
return -AML_EINVAL;
}
for (size_t i = 1; i <= ndims; i++)
data->cpitch[i] = data->cpitch[i-1]*data->pitch[i-1];
*layout = l;
return AML_SUCCESS;
......
......@@ -183,7 +183,7 @@ int aml_scratch_seq_destroy_request(struct aml_scratch_data *d,
return -AML_EINVAL;
if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP)
aml_dma_cancel(scratch->data.dma, inner_req->dma_req);
aml_dma_cancel(scratch->data.dma, &inner_req->dma_req);
/* destroy removes the tile from the scratch */
if (inner_req->type == AML_SCRATCH_REQUEST_TYPE_PUSH)
......@@ -215,7 +215,7 @@ int aml_scratch_seq_wait_request(struct aml_scratch_data *d,
/* wait for completion of the request */
if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP)
aml_dma_wait(scratch->data.dma, inner_req->dma_req);
aml_dma_wait(scratch->data.dma, &inner_req->dma_req);
/* cleanup a completed request. In case of push, free up the tile */
pthread_mutex_lock(&scratch->data.lock);
......
......@@ -50,13 +50,18 @@ void aml_bitmap_copy_to_ulong(const struct aml_bitmap *dst,
src[AML_BITMAP_NTH(i)] |= (1UL << AML_BITMAP_ITH(i));
}
void aml_bitmap_zero(struct aml_bitmap *bitmap)
int aml_bitmap_zero(struct aml_bitmap *bitmap)
{
if (bitmap == NULL)
return -AML_EINVAL;
memset(bitmap, 0, sizeof(struct aml_bitmap));
return 0;
}
int aml_bitmap_iszero(const struct aml_bitmap *bitmap)
{
if (bitmap == NULL)
return -AML_EINVAL;
for (unsigned int i = 0; i < AML_BITMAP_SIZE; i++)
if (bitmap->mask[i] != AML_BITMAP_EMPTY)
return 0;
......@@ -65,35 +70,46 @@ int aml_bitmap_iszero(const struct aml_bitmap *bitmap)
int aml_bitmap_isfull(const struct aml_bitmap *bitmap)
{
if (bitmap == NULL)
return -AML_EINVAL;
for (unsigned int i = 0; i < AML_BITMAP_SIZE; i++)
if (bitmap->mask[i] != AML_BITMAP_FULL)
return 0;
return 1;
}
void aml_bitmap_fill(struct aml_bitmap *bitmap)
int aml_bitmap_fill(struct aml_bitmap *bitmap)
{
if (bitmap == NULL)
return -AML_EINVAL;
memset(bitmap, ~0, sizeof(struct aml_bitmap));
return 0;
}
int aml_bitmap_isset(const struct aml_bitmap *bitmap, const unsigned int i)
{
if (bitmap == NULL)
return -AML_EINVAL;
if (i >= AML_BITMAP_MAX)
return -1;
return -AML_EINVAL;
return (bitmap->mask[AML_BITMAP_NTH(i)] &
(1UL << AML_BITMAP_ITH(i))) > 0UL;
}
int aml_bitmap_set(struct aml_bitmap *bitmap, const unsigned int i)
{
if (bitmap == NULL)
return -AML_EINVAL;
if (i >= AML_BITMAP_MAX)
return -1;
return -AML_EINVAL;
bitmap->mask[AML_BITMAP_NTH(i)] |= (1UL << AML_BITMAP_ITH(i));
return 0;
}
int aml_bitmap_isequal(const struct aml_bitmap *a, const struct aml_bitmap *b)
{
if (a == NULL || b == NULL)
return -AML_EINVAL;
for (unsigned int i = 0; i < AML_BITMAP_SIZE; i++)
if (a->mask[i] != b->mask[i])
return 0;
......@@ -102,8 +118,10 @@ int aml_bitmap_isequal(const struct aml_bitmap *a, const struct aml_bitmap *b)
int aml_bitmap_clear(struct aml_bitmap *bitmap, const unsigned int i)
{
if (bitmap == NULL)
return -AML_EINVAL;
if (i >= AML_BITMAP_MAX)
return -1;
return -AML_EINVAL;
bitmap->mask[AML_BITMAP_NTH(i)] &= ~(1UL << AML_BITMAP_ITH(i));
return 0;
}
......@@ -111,8 +129,10 @@ int aml_bitmap_clear(struct aml_bitmap *bitmap, const unsigned int i)
int aml_bitmap_set_range(struct aml_bitmap *bitmap,
const unsigned int i, const unsigned int ii)
{
if (bitmap == NULL)
return -AML_EINVAL;
if (i >= AML_BITMAP_MAX || ii >= AML_BITMAP_MAX || i > ii)
return -1;
return -AML_EINVAL;
if (i == ii)
return aml_bitmap_set(bitmap, i);
......@@ -136,8 +156,10 @@ int aml_bitmap_set_range(struct aml_bitmap *bitmap,
int aml_bitmap_clear_range(struct aml_bitmap *bitmap,
const unsigned int i, const unsigned int ii)
{
if (bitmap == NULL)
return -AML_EINVAL;
if (i >= AML_BITMAP_MAX || ii >= AML_BITMAP_MAX || i > ii)
return -1;
return -AML_EINVAL;
if (i == ii)
return aml_bitmap_clear(bitmap, i);
......@@ -158,15 +180,18 @@ int aml_bitmap_clear_range(struct aml_bitmap *bitmap,
return 0;
}
unsigned long aml_bitmap_nset(const struct aml_bitmap *bitmap)
int aml_bitmap_nset(const struct aml_bitmap *bitmap)
{
unsigned long i, b, n;
unsigned long test = 1UL;
unsigned long nset = 0;
int nset = 0;
for (n = 0; n < AML_BITMAP_SIZE; n++) {
b = bitmap->mask[n];
for (i = 0; i < AML_BITMAP_NBITS; i++) {
if (bitmap == NULL)
return -AML_EINVAL;
for (int n = 0; n < AML_BITMAP_SIZE; n++) {
unsigned long b = bitmap->mask[n];
for (int i = 0; i < AML_BITMAP_NBITS; i++) {
nset += b & test ? 1 : 0;
b = b >> 1;
}
......@@ -176,11 +201,12 @@ unsigned long aml_bitmap_nset(const struct aml_bitmap *bitmap)
int aml_bitmap_last(const struct aml_bitmap *bitmap)
{
if (bitmap == NULL)
return -1;
int n;
unsigned int i = 0;
if (bitmap == NULL)
return -AML_EINVAL;
for (n = AML_BITMAP_SIZE - 1; n >= 0 && bitmap->mask[n] == 0; n--)
;
......@@ -301,6 +327,9 @@ int aml_bitmap_from_string(struct aml_bitmap *bitmap, const char *bitmap_str)
int aml_bitmap_create(struct aml_bitmap **map)
{
if (map == NULL)
return -AML_EINVAL;
struct aml_bitmap *b = calloc(1, sizeof(struct aml_bitmap));
if (b == NULL) {
......@@ -319,6 +348,9 @@ int aml_bitmap_create(struct aml_bitmap **map)
**/
void aml_bitmap_destroy(struct aml_bitmap **map)
{
if (map == NULL)
return;
free(*map);
*map = NULL;
}
......
......@@ -9,56 +9,94 @@
*******************************************************************************/
#include "aml.h"
#include "aml/area/linux.h"
#include "aml/layout/dense.h"
#include "aml/dma/linux-par.h"
#include "aml/tiling/1d.h"
#include <assert.h>
#define TILESIZE (2)
#define NBTILES (16)
int main(int argc, char *argv[])
{
struct aml_tiling *tiling;
struct aml_dma *dma;
void *dst, *src;
size_t isz = 1<<16;
int idest[isz];
int isrc[isz];
struct aml_layout *idl, *isl;
/* library initialization */
aml_init(&argc, &argv);
/* initialize all the supporting struct */
assert(!aml_tiling_1d_create(&tiling, TILESIZE*_SC_PAGE_SIZE,
TILESIZE*_SC_PAGE_SIZE*NBTILES));
/* support data structures */
assert(!aml_layout_dense_create(&idl, idest, 0, sizeof(int), 1, &isz,
NULL, NULL));
assert(!aml_layout_dense_create(&isl, isrc, 0, sizeof(int), 1, &isz,
NULL, NULL));
for (size_t i = 0; i < isz; i++) {
idest[i] = 42;
isrc[i] = 24;
}
/* invalid create input */
assert(aml_dma_linux_par_create(NULL, 1) == -AML_EINVAL);
/* invalid requests */
assert(!aml_dma_linux_par_create(&dma, 1));
assert(aml_dma_copy(dma, 42) == -AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, NULL, isrc, isz) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, NULL, isz) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_PTR, idest, isrc, 0) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, NULL, isl) ==
-AML_EINVAL);
assert(aml_dma_copy(dma, AML_DMA_REQUEST_TYPE_LAYOUT, idl, NULL) ==
-AML_EINVAL);
/* allocate some memory */
src = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES);
assert(src != NULL);
dst = aml_area_mmap(&aml_area_linux, NULL, TILESIZE*_SC_PAGE_SIZE*NBTILES);
assert(dst != NULL);
struct aml_dma_request *r1, *r2;
/* force dma to increase its requests queue */
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_async_copy(dma, &r2, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
memset(src, 42, TILESIZE*_SC_PAGE_SIZE*NBTILES);
memset(dst, 24, TILESIZE*_SC_PAGE_SIZE*NBTILES);
assert(aml_dma_wait(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_wait(dma, &r1));
assert(!aml_dma_wait(dma, &r2));
aml_dma_linux_par_destroy(&dma);
/* cancel a request on the fly */
assert(!aml_dma_linux_par_create(&dma, 1));
assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
assert(!aml_dma_cancel(dma, &r1));
/* move some stuff by copy */
struct aml_dma_request *requests[NBTILES];
for(int i = 0; i < NBTILES; i++) {
void *d = aml_tiling_tilestart(tiling, dst, i);
void *s = aml_tiling_tilestart(tiling, src, i);
aml_dma_async_copy(dma, &requests[i], AML_DMA_REQUEST_TYPE_PTR,
d, s, TILESIZE*_SC_PAGE_SIZE);
/* destroy a running dma */
assert(!aml_dma_async_copy(dma, &r1, AML_DMA_REQUEST_TYPE_LAYOUT,
idl, isl));
aml_dma_linux_par_destroy(&dma);
/* move data around */
assert(!aml_dma_linux_par_create(&dma, 1));
struct aml_dma_request *requests[16];
for (int i = 0; i <