Commit 4b129cab authored by ndenoyelle's avatar ndenoyelle

separation of concerns: linux code in linux header and sources

parent 66af48e8
Pipeline #8929 passed with stages
in 6 minutes and 59 seconds
......@@ -744,17 +744,6 @@ struct aml_layout *aml_tiling_index_byid(const struct aml_tiling *tiling,
////////////////////////////////////////////////////////////////////////////////
/**
* Internal macros used for tracking DMA request types.
* Invalid request type. Used for marking inactive requests in the vector.
**/
#define AML_DMA_REQUEST_TYPE_INVALID -1
/**
* The request is in the format (dest layout, src layout)
**/
#define AML_DMA_REQUEST_TYPE_LAYOUT 0
/**
* aml_dma is mainly used to asynchronously move data.
* aml_dma_request is an opaque structure containing information
......@@ -777,7 +766,9 @@ struct aml_dma_data;
* @param arg: extra argument needed by the operator
**/
typedef int (*aml_dma_operator)(struct aml_layout *dst,
const struct aml_layout *src, void *arg);
const struct aml_layout *src,
void *arg,
void **out);
/**
aml_dma_ops is a structure containing operations for a specific
......@@ -854,7 +845,7 @@ struct aml_dma {
* @param op_arg: optional argument to the operator
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_copy_custom(struct aml_dma *dma, struct aml_layout *dest,
int aml_dma_copy(struct aml_dma *dma, struct aml_layout *dest,
struct aml_layout *src, aml_dma_operator op, void *op_arg);
/**
......@@ -869,14 +860,14 @@ int aml_dma_copy_custom(struct aml_dma *dma, struct aml_layout *dest,
* @param op_arg: optional argument to the operator
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_async_copy_custom(struct aml_dma *dma, struct aml_dma_request **req,
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
struct aml_layout *dest,
struct aml_layout *src,
aml_dma_operator op, void *op_arg);
#define aml_dma_copy(dma, d, s) aml_dma_copy_custom(dma, d, s, NULL, NULL)
#define aml_dma_async_copy(dma, r, d, s) \
aml_dma_async_copy_custom(dma, r, d, s, NULL, NULL)
#define aml_dma_copy_helper(dma, d, s) aml_dma_copy(dma, d, s, NULL, NULL)
#define aml_dma_async_copy_helper(dma, r, d, s) \
aml_dma_async_copy(dma, r, d, s, NULL, NULL)
/**
* Waits for an asynchronous DMA request to complete.
......@@ -894,16 +885,6 @@ int aml_dma_wait(struct aml_dma *dma, struct aml_dma_request **req);
**/
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req);
/**
* Generic helper to copy from one layout to another.
* @param[out] dst: destination layout
* @param[in] src: source layout
* @param[in] arg: unused (should be NULL)
*/
int aml_copy_layout_generic(struct aml_layout *dst,
const struct aml_layout *src, void *arg);
////////////////////////////////////////////////////////////////////////////////
/**
......
......@@ -21,6 +21,17 @@
* @{
**/
/**
* Internal macros used for tracking DMA request types.
* Invalid request type. Used for marking inactive requests in the vector.
**/
#define AML_DMA_REQUEST_TYPE_INVALID -1
/**
* The request is in the format (dest layout, src layout)
**/
#define AML_DMA_REQUEST_TYPE_LAYOUT 0
/**
* Default table of dma request operations for linux
* sequential dma.
......@@ -112,6 +123,18 @@ void aml_dma_linux_seq_destroy(struct aml_dma **dma);
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req);
/**
* Generic helper to copy from one layout to another.
* @param[out] dst: destination layout
* @param[in] src: source layout
* @param[in] arg: unused (should be NULL)
* @param[out] out: A pointer where to store output of the function.
*/
int aml_copy_layout_generic(struct aml_layout *dst,
const struct aml_layout *src,
void *arg,
void **out);
/**
* @}
**/
......
......@@ -13,61 +13,6 @@
#include <assert.h>
/*******************************************************************************
* Generic DMA Copy implementations
*
* Needed by most DMAs. We don't provide introspection or any fancy API to it at
* this point.
******************************************************************************/
static inline void aml_copy_layout_generic_helper(size_t d,
struct aml_layout *dst,
const struct aml_layout *src,
const size_t *elem_number,
size_t elem_size,
size_t *coords)
{
if (d == 1) {
for (size_t i = 0; i < elem_number[0]; i += 1) {
coords[0] = i;
memcpy(aml_layout_deref_native(dst, coords),
aml_layout_deref_native(src, coords),
elem_size);
}
} else {
for (size_t i = 0; i < elem_number[d - 1]; i += 1) {
coords[d - 1] = i;
aml_copy_layout_generic_helper(d - 1, dst, src,
elem_number, elem_size,
coords);
}
}
}
int aml_copy_layout_generic(struct aml_layout *dst,
const struct aml_layout *src, void *arg)
{
size_t d;
size_t elem_size;
(void)arg;
assert(aml_layout_ndims(dst) == aml_layout_ndims(src));
d = aml_layout_ndims(dst);
assert(aml_layout_element_size(dst) == aml_layout_element_size(src));
elem_size = aml_layout_element_size(dst);
size_t coords[d];
size_t elem_number[d];
size_t elem_number2[d];
aml_layout_dims_native(src, elem_number);
aml_layout_dims_native(dst, elem_number2);
for (size_t i = 0; i < d; i += 1)
assert(elem_number[i] == elem_number2[i]);
aml_copy_layout_generic_helper(d, dst, src, elem_number, elem_size,
coords);
return 0;
}
/*******************************************************************************
* Generic DMA API:
* Most of the stuff is dispatched to a different layer, using type-specific
......@@ -77,7 +22,7 @@ int aml_copy_layout_generic(struct aml_layout *dst,
* abstract the request creation after this layer.
******************************************************************************/
int aml_dma_copy_custom(struct aml_dma *dma, struct aml_layout *dest,
int aml_dma_copy(struct aml_dma *dma, struct aml_layout *dest,
struct aml_layout *src, aml_dma_operator op, void *op_arg)
{
int ret;
......@@ -93,7 +38,7 @@ int aml_dma_copy_custom(struct aml_dma *dma, struct aml_layout *dest,
return ret;
}
int aml_dma_async_copy_custom(struct aml_dma *dma, struct aml_dma_request **req,
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
struct aml_layout *dest, struct aml_layout *src,
aml_dma_operator op, void *op_arg)
{
......
......@@ -9,6 +9,7 @@
*******************************************************************************/
#include "aml.h"
#include "aml/dma/linux-seq.h"
#include "aml/dma/linux-par.h"
#include "aml/layout/dense.h"
......@@ -60,7 +61,7 @@ void *aml_dma_linux_par_do_thread(void *arg)
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
req->op(req->dest, req->src, req->op_arg);
req->op(req->dest, req->src, req->op_arg, NULL);
return NULL;
}
......
......@@ -11,11 +11,70 @@
#include "aml.h"
#include "aml/dma/linux-seq.h"
#include "aml/layout/dense.h"
#include "aml/layout/native.h"
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>
/*******************************************************************************
* Generic DMA Copy implementations
*
* Needed by most DMAs. We don't provide introspection or any fancy API to it at
* this point.
******************************************************************************/
static inline void aml_copy_layout_generic_helper(size_t d,
struct aml_layout *dst,
const struct aml_layout *src,
const size_t *elem_number,
size_t elem_size,
size_t *coords)
{
if (d == 1) {
for (size_t i = 0; i < elem_number[0]; i += 1) {
coords[0] = i;
memcpy(aml_layout_deref_native(dst, coords),
aml_layout_deref_native(src, coords),
elem_size);
}
} else {
for (size_t i = 0; i < elem_number[d - 1]; i += 1) {
coords[d - 1] = i;
aml_copy_layout_generic_helper(d - 1, dst, src,
elem_number, elem_size,
coords);
}
}
}
int aml_copy_layout_generic(struct aml_layout *dst,
const struct aml_layout *src,
void *arg,
void **out)
{
size_t d;
size_t elem_size;
(void)arg;
(void)out;
assert(aml_layout_ndims(dst) == aml_layout_ndims(src));
d = aml_layout_ndims(dst);
assert(aml_layout_element_size(dst) == aml_layout_element_size(src));
elem_size = aml_layout_element_size(dst);
size_t coords[d];
size_t elem_number[d];
size_t elem_number2[d];
aml_layout_dims_native(src, elem_number);
aml_layout_dims_native(dst, elem_number2);
for (size_t i = 0; i < d; i += 1)
assert(elem_number[i] == elem_number2[i]);
aml_copy_layout_generic_helper(d, dst, src, elem_number, elem_size,
coords);
return 0;
}
/*******************************************************************************
* Linux-backed, sequential dma
* The dma itself is organized into several different components
......@@ -60,7 +119,7 @@ int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
assert(dma != NULL);
assert(req != NULL);
assert(req->op != NULL);
return req->op(req->dest, req->src, req->op_arg);
return req->op(req->dest, req->src, req->op_arg, NULL);
}
struct aml_dma_linux_seq_inner_ops aml_dma_linux_seq_inner_ops = {
......
......@@ -58,7 +58,7 @@ void *aml_scratch_par_do_thread(void *arg)
struct aml_scratch_par *scratch = req->scratch;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
aml_dma_copy(scratch->data.dma, req->dst, req->src);
aml_dma_copy(scratch->data.dma, req->dst, req->src, NULL, NULL);
return NULL;
}
......
......@@ -57,7 +57,7 @@ int aml_scratch_seq_doit(struct aml_scratch_seq_data *scratch,
assert(scratch != NULL);
assert(req != NULL);
return aml_dma_async_copy(scratch->dma, &req->dma_req,
req->dst, req->src);
req->dst, req->src, NULL, NULL);
}
struct aml_scratch_seq_inner_ops aml_scratch_seq_inner_ops = {
......
......@@ -38,13 +38,13 @@ int main(int argc, char *argv[])
/* invalid requests */
assert(!aml_dma_linux_par_create(&dma, 1, NULL, NULL));
assert(aml_dma_copy(dma, NULL, isl) == -AML_EINVAL);
assert(aml_dma_copy(dma, idl, NULL) == -AML_EINVAL);
assert(aml_dma_copy(dma, NULL, isl, NULL, NULL) == -AML_EINVAL);
assert(aml_dma_copy(dma, idl, NULL, NULL, NULL) == -AML_EINVAL);
struct aml_dma_request *r1, *r2;
/* force dma to increase its requests queue */
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r2, idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl, NULL, NULL));
assert(!aml_dma_async_copy(dma, &r2, idl, isl, NULL, NULL));
assert(aml_dma_wait(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_wait(dma, &r1));
......@@ -54,11 +54,11 @@ int main(int argc, char *argv[])
/* cancel a request on the fly */
assert(!aml_dma_linux_par_create(&dma, 1, NULL, NULL));
assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl, NULL, NULL));
assert(!aml_dma_cancel(dma, &r1));
/* destroy a running dma */
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl, NULL, NULL));
aml_dma_linux_par_destroy(&dma);
/* move data around */
......@@ -77,7 +77,8 @@ int main(int argc, char *argv[])
aml_layout_dense_create(&layouts[i][1], sptr, 0, sizeof(int),
1, &sz, NULL, NULL);
assert(!aml_dma_async_copy(dma, &requests[i],
layouts[i][0], layouts[i][1]));
layouts[i][0], layouts[i][1],
NULL, NULL));
assert(requests[i] != NULL);
}
for (int i = 0; i < 16; i++) {
......
......@@ -38,13 +38,13 @@ int main(int argc, char *argv[])
/* invalid requests */
assert(!aml_dma_linux_seq_create(&dma, 1, NULL, NULL));
assert(aml_dma_copy(dma, NULL, isl) == -AML_EINVAL);
assert(aml_dma_copy(dma, idl, NULL) == -AML_EINVAL);
assert(aml_dma_copy(dma, NULL, isl, NULL, NULL) == -AML_EINVAL);
assert(aml_dma_copy(dma, idl, NULL, NULL, NULL) == -AML_EINVAL);
struct aml_dma_request *r1, *r2;
/* force dma to increase its requests queue */
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r2, idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl, NULL, NULL));
assert(!aml_dma_async_copy(dma, &r2, idl, isl, NULL, NULL));
assert(aml_dma_wait(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_wait(dma, &r1));
......@@ -52,12 +52,12 @@ int main(int argc, char *argv[])
/* cancel a request on the fly */
assert(aml_dma_cancel(dma, NULL) == -AML_EINVAL);
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl, NULL, NULL));
assert(!aml_dma_cancel(dma, &r1));
/* destroy a running dma */
assert(!aml_dma_async_copy(dma, &r1, idl, isl));
assert(!aml_dma_async_copy(dma, &r1, idl, isl, NULL, NULL));
aml_dma_linux_seq_destroy(&dma);
/* move data around */
......@@ -76,7 +76,8 @@ int main(int argc, char *argv[])
aml_layout_dense_create(&layouts[i][1], sptr, 0, sizeof(int),
1, &sz, NULL, NULL);
assert(!aml_dma_async_copy(dma, &requests[i],
layouts[i][0], layouts[i][1]));
layouts[i][0], layouts[i][1],
NULL, NULL));
assert(requests[i] != NULL);
}
for (int i = 0; i < 16; i++) {
......
......@@ -9,6 +9,7 @@
*******************************************************************************/
#include "aml.h"
#include "aml/dma/linux-seq.h"
#include "aml/layout/dense.h"
#include "aml/layout/native.h"
#include "aml/tiling/resize.h"
......@@ -66,7 +67,7 @@ void test_tiling_even_mixed(void)
b = aml_tiling_index(t, (size_t[]){k, j, i});
bres = aml_tiling_index(tres,
(size_t[]){i, j, k});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -103,7 +104,7 @@ void test_tiling_even_mixed(void)
b = aml_tiling_index(t, (size_t[]){i, j, k});
bres = aml_tiling_index(tres,
(size_t[]){k, j, i});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -176,7 +177,7 @@ void test_tiling_even(void)
b = aml_tiling_index(t, (size_t[]){k, j, i});
bres = aml_tiling_index(tres,
(size_t[]){k, j, i});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -222,7 +223,7 @@ void test_tiling_even(void)
bres = aml_tiling_index(tres,
(size_t[]){i, j, k});
assert(b != NULL && bres != NULL);
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -296,7 +297,7 @@ void test_tiling_uneven(void)
b = aml_tiling_index(t, (size_t[]){k, j, i});
bres = aml_tiling_index(tres,
(size_t[]){k, j, i});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -342,7 +343,7 @@ void test_tiling_uneven(void)
b = aml_tiling_index(t, (size_t[]){i, j, k});
bres = aml_tiling_index(tres,
(size_t[]){i, j, k});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -415,7 +416,7 @@ void test_tiling_pad_even(void)
b = aml_tiling_index(t, (size_t[]){k, j, i});
bres = aml_tiling_index(tres,
(size_t[]){k, j, i});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -459,7 +460,7 @@ void test_tiling_pad_even(void)
b = aml_tiling_index(t, (size_t[]){i, j, k});
bres = aml_tiling_index(tres,
(size_t[]){i, j, k});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -539,7 +540,7 @@ void test_tiling_pad_uneven(void)
b = aml_tiling_index(t, (size_t[]){k, j, i});
bres = aml_tiling_index(tres,
(size_t[]){k, j, i});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......@@ -594,7 +595,7 @@ void test_tiling_pad_uneven(void)
b = aml_tiling_index(t, (size_t[]){i, j, k});
bres = aml_tiling_index(tres,
(size_t[]){i, j, k});
aml_copy_layout_generic(bres, b, NULL);
aml_copy_layout_generic(bres, b, NULL, NULL);
free(b);
free(bres);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment