Commit a97d2d96 authored by Swann Perarnau's avatar Swann Perarnau

Merge branch 'remove-dma-va-args' into 'master'

Remove dma va args

Closes #49

See merge request !77
parents 27460a5f 9a97ce3a
Pipeline #8320 passed with stages
in 6 minutes and 31 seconds
......@@ -820,11 +820,6 @@ int aml_layout_slice(const struct aml_layout *layout,
**/
#define AML_DMA_REQUEST_TYPE_LAYOUT 0
/**
* The request is in the format (dest ptr, src ptr, size)
*/
#define AML_DMA_REQUEST_TYPE_PTR 1
/**
* aml_dma is mainly used to asynchronously move data.
* aml_dma_request is an opaque structure containing information
......@@ -859,14 +854,14 @@ struct aml_dma_ops {
* @param dma: dma_implementation internal data.
* @param req[out]: the request handle to manage termination
* of the movement.
* @param type: A valid AML_DMA_REQUEST_TYPE_* specifying the kind
* of operation to perform.
* @param args: list of variadic arguments provided to aml_dma_copy()
* @param dest: layout describing the destination.
* @param src: layout describing the source.
* @return an AML error code.
**/
int (*create_request)(struct aml_dma_data *dma,
struct aml_dma_request **req,
int type, va_list args);
struct aml_layout *dest,
struct aml_layout *src);
/**
* Destroy the request handle. If the data movement is still ongoing,
......@@ -908,10 +903,12 @@ struct aml_dma {
/**
* Requests a synchronous data copy between two different buffers.
* @param dma: an initialized DMA structure.
* Variadic arguments: implementation-specific.
* @param dest: layout describing the destination.
* @param src: layout describing the source.
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_copy(struct aml_dma *dma, int type, ...);
int aml_dma_copy(struct aml_dma *dma, struct aml_layout *dest,
struct aml_layout *src);
/**
* Requests a data copy between two different buffers.This is an asynchronous
......@@ -919,11 +916,13 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...);
* @param dma: an initialized DMA structure.
* @param req: an address where the pointer to the newly assigned DMA request
* will be stored.
* Variadic arguments: implementation-specific.
* @param dest: layout describing the destination.
* @param src: layout describing the source.
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
int type, ...);
struct aml_layout *dest,
struct aml_layout *src);
/**
* Waits for an asynchronous DMA request to complete.
......
......@@ -27,14 +27,8 @@
**/
extern struct aml_dma_ops aml_dma_linux_par_ops;
/** Request handle for clients of the DMA. **/
struct aml_dma_request_linux_par {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a parallel request for linux movement. **/
struct aml_dma_linux_par_request_data {
struct aml_dma_request_linux_par {
/**
* The type of dma request
* @see <aml.h>
......
......@@ -27,14 +27,8 @@
**/
extern struct aml_dma_ops aml_dma_linux_seq_ops;
/** Request handle for clients of the DMA. **/
struct aml_dma_request_linux_seq {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a sequential request for linux movement. **/
struct aml_dma_linux_seq_request_data {
struct aml_dma_request_linux_seq {
/**
* The type of dma request
* @see <aml.h>
......@@ -66,7 +60,7 @@ struct aml_dma_linux_seq_ops {
* @see aml_area
**/
int (*do_copy)(struct aml_dma_linux_seq_data *dma,
struct aml_dma_linux_seq_request_data *req);
struct aml_dma_request_linux_seq *req);
};
/**
......@@ -105,7 +99,7 @@ void aml_dma_linux_seq_destroy(struct aml_dma **dma);
* @return 0 if successful; an error code otherwise.
**/
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
struct aml_dma_linux_seq_request_data *req);
struct aml_dma_request_linux_seq *req);
/**
* @}
......
......@@ -25,25 +25,19 @@
**/
extern struct aml_scratch_ops aml_scratch_par_ops;
/** Request handle for clients of the scratch. **/
struct aml_scratch_request_par {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a parallel scratch request with linux dma. **/
struct aml_scratch_par_request_data {
struct aml_scratch_request_par {
/**
* The type of scratchpad request
* @see <aml.h>
**/
int type;
/** The source pointer of the data movement **/
void *srcptr;
struct aml_layout *src;
/** The tile identifier in source pointer **/
int srcid;
/** The destination pointer of the data movement **/
void *dstptr;
struct aml_layout *dst;
/** The tile identifier in destination pointer **/
int dstid;
/** The scratchpad handling this request **/
......
......@@ -25,14 +25,8 @@
**/
extern struct aml_scratch_ops aml_scratch_seq_ops;
/** Request handle for clients of the scratch. **/
/** Inside of a sequential scratch request with linux dma. **/
struct aml_scratch_request_seq {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a sequential scratch request. **/
struct aml_scratch_seq_request_data {
/**
* The type of scratchpad request
* @see <aml.h>
......@@ -40,12 +34,12 @@ struct aml_scratch_seq_request_data {
int type;
/** The tiling used for data organization in source and destination **/
struct aml_tiling *tiling;
/** The source pointer of the data movement **/
void *srcptr;
/** The source layout of the data movement **/
struct aml_layout *src;
/** The identifier of the source tile **/
int srcid;
/** The destination pointer of the data movement **/
void *dstptr;
struct aml_layout *dst;
/** The identifier of the destination tile **/
int dstid;
/** The request used for movement **/
......@@ -85,7 +79,7 @@ struct aml_scratch_seq_ops {
* @param req: The request to execute.
**/
int (*doit)(struct aml_scratch_seq_data *scratch,
struct aml_scratch_seq_request_data *req);
struct aml_scratch_request_seq *req);
};
/** Sequential implementation of a scratchpad **/
......
......@@ -76,18 +76,16 @@ int aml_copy_layout_generic(struct aml_layout *dst,
* abstract the request creation after this layer.
******************************************************************************/
int aml_dma_copy(struct aml_dma *dma, int type, ...)
int aml_dma_copy(struct aml_dma *dma, struct aml_layout *dest,
struct aml_layout *src)
{
va_list ap;
int ret;
struct aml_dma_request *req;
if (dma == NULL)
if (dma == NULL || dest == NULL || src == NULL)
return -AML_EINVAL;
va_start(ap, type);
ret = dma->ops->create_request(dma->data, &req, type, ap);
va_end(ap);
ret = dma->ops->create_request(dma->data, &req, dest, src);
if (ret != AML_SUCCESS)
return ret;
ret = dma->ops->wait_request(dma->data, &req);
......@@ -95,18 +93,12 @@ int aml_dma_copy(struct aml_dma *dma, int type, ...)
}
int aml_dma_async_copy(struct aml_dma *dma, struct aml_dma_request **req,
int type, ...)
struct aml_layout *dest, struct aml_layout *src)
{
va_list ap;
int ret;
if (dma == NULL || req == NULL)
if (dma == NULL || req == NULL || dest == NULL || src == NULL)
return -AML_EINVAL;
va_start(ap, type);
ret = dma->ops->create_request(dma->data, req, type, ap);
va_end(ap);
return ret;
return dma->ops->create_request(dma->data, req, dest, src);
}
int aml_dma_cancel(struct aml_dma *dma, struct aml_dma_request **req)
......
......@@ -28,34 +28,21 @@
/*******************************************************************************
* Requests:
******************************************************************************/
int aml_dma_request_linux_par_create(struct aml_dma_request_linux_par **req,
int uuid)
{
assert(req != NULL);
*req = calloc(1, sizeof(struct aml_dma_request_linux_par));
if (*req == NULL)
return -AML_ENOMEM;
(*req)->uuid = uuid;
return 0;
}
void aml_dma_request_linux_par_destroy(struct aml_dma_request_linux_par **req)
int aml_dma_request_linux_par_copy_init(struct aml_dma_request_linux_par *req,
struct aml_layout *dest,
struct aml_layout *src)
{
assert(req != NULL);
free(*req);
*req = NULL;
req->type = AML_DMA_REQUEST_TYPE_LAYOUT;
req->dest = dest;
req->src = src;
return 0;
}
int aml_dma_linux_par_request_data_init(
struct aml_dma_linux_par_request_data *req,
int type,
struct aml_layout *dest,
struct aml_layout *src)
int aml_dma_request_linux_par_copy_destroy(struct aml_dma_request_linux_par *r)
{
assert(req != NULL);
req->type = type;
req->dest = dest;
req->src = src;
assert(r != NULL);
return 0;
}
......@@ -65,8 +52,8 @@ int aml_dma_linux_par_request_data_init(
void *aml_dma_linux_par_do_thread(void *arg)
{
struct aml_dma_linux_par_request_data *req =
(struct aml_dma_linux_par_request_data *)arg;
struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)arg;
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
......@@ -84,64 +71,25 @@ struct aml_dma_linux_par_ops aml_dma_linux_par_inner_ops = {
int aml_dma_linux_par_create_request(struct aml_dma_data *d,
struct aml_dma_request **r,
int type, va_list ap)
struct aml_layout *dest,
struct aml_layout *src)
{
/* NULL checks done by the generic API */
assert(d != NULL);
assert(r != NULL);
assert(dest != NULL);
assert(src != NULL);
struct aml_dma_linux_par *dma =
(struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *ret;
struct aml_dma_linux_par_request_data *req;
int err = AML_SUCCESS;
struct aml_dma_request_linux_par *req;
pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests);
/* init the request */
if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
struct aml_layout *dl, *sl;
dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl);
} else if (type == AML_DMA_REQUEST_TYPE_PTR) {
struct aml_layout *dl, *sl;
void *dp, *sp;
size_t sz;
dp = va_arg(ap, void *);
sp = va_arg(ap, void *);
sz = va_arg(ap, size_t);
if (dp == NULL || sp == NULL || sz == 0) {
err = -AML_EINVAL;
goto unlock;
}
/* simple 1D layout, none of the parameters really matter, as
* long as the copy generates a single memcpy.
*/
aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL);
aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR,
dl, sl);
} else
err = -AML_EINVAL;
unlock:
aml_dma_request_linux_par_copy_init(req, dest, src);
pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req);
pthread_create(&req->thread, NULL, dma->ops.do_thread, req);
aml_dma_request_linux_par_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
}
return err;
pthread_create(&req->thread, NULL, dma->ops.do_thread, req);
*r = (struct aml_dma_request *)req;
return 0;
}
int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
......@@ -152,30 +100,24 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
struct aml_dma_linux_par *dma =
(struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req;
struct aml_dma_linux_par_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_par *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
/* we cancel and join, instead of killing, for a cleaner result */
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(inner_req->thread);
pthread_join(inner_req->thread, NULL);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(req->thread);
pthread_join(req->thread, NULL);
}
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
/* make sure to destroy layouts before putting the request back in the
* vector
*/
aml_dma_request_linux_par_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, inner_req);
aml_vector_remove(dma->data.requests, req);
pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req);
*r = NULL;
return 0;
}
......@@ -187,27 +129,18 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
assert(r != NULL);
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req;
struct aml_dma_linux_par_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_par *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID)
pthread_join(inner_req->thread, NULL);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
pthread_join(req->thread, NULL);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
aml_dma_request_linux_par_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, inner_req);
aml_vector_remove(dma->data.requests, req);
pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req);
*r = NULL;
return 0;
}
......@@ -244,8 +177,8 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs)
/* allocate request array */
aml_vector_create(&d->data.requests, nbreqs,
sizeof(struct aml_dma_linux_par_request_data),
offsetof(struct aml_dma_linux_par_request_data, type),
sizeof(struct aml_dma_request_linux_par),
offsetof(struct aml_dma_request_linux_par, type),
AML_DMA_REQUEST_TYPE_INVALID);
pthread_mutex_init(&d->data.lock, NULL);
......@@ -261,17 +194,14 @@ void aml_dma_linux_par_destroy(struct aml_dma **d)
return;
dma = (struct aml_dma_linux_par *)(*d)->data;
for (size_t i = 0; i < aml_vector_size(dma->data.requests); i++) {
struct aml_dma_linux_par_request_data *req;
struct aml_dma_request_linux_par *req;
req = aml_vector_get(dma->data.requests, i);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(req->thread);
pthread_join(req->thread, NULL);
}
if (req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&req->dest);
aml_layout_dense_destroy(&req->src);
}
aml_dma_request_linux_par_copy_destroy(req);
}
aml_vector_destroy(&dma->data.requests);
pthread_mutex_destroy(&dma->data.lock);
......
......@@ -29,34 +29,21 @@
* Requests:
******************************************************************************/
int aml_dma_request_linux_seq_create(struct aml_dma_request_linux_seq **req,
int uuid)
int aml_dma_request_linux_seq_copy_init(struct aml_dma_request_linux_seq *req,
struct aml_layout *dest,
struct aml_layout *src)
{
assert(req != NULL);
*req = calloc(1, sizeof(struct aml_dma_request_linux_seq));
if (*req == NULL)
return -AML_ENOMEM;
(*req)->uuid = uuid;
req->type = AML_DMA_REQUEST_TYPE_LAYOUT;
req->dest = dest;
req->src = src;
return 0;
}
void aml_dma_request_linux_seq_destroy(struct aml_dma_request_linux_seq **req)
{
assert(req != NULL);
free(*req);
*req = NULL;
}
void aml_dma_linux_seq_request_data_init(
struct aml_dma_linux_seq_request_data *req,
int type,
struct aml_layout *dest,
struct aml_layout *src)
int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r)
{
assert(req != NULL);
req->type = type;
req->dest = dest;
req->src = src;
assert(r != NULL);
return 0;
}
/*******************************************************************************
......@@ -64,7 +51,7 @@ void aml_dma_linux_seq_request_data_init(
******************************************************************************/
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
struct aml_dma_linux_seq_request_data *req)
struct aml_dma_request_linux_seq *req)
{
assert(dma != NULL);
assert(req != NULL);
......@@ -82,64 +69,24 @@ struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
struct aml_dma_request **r,
int type, va_list ap)
struct aml_layout *dest,
struct aml_layout *src)
{
/* NULL checks done by the generic API */
assert(d != NULL);
assert(r != NULL);
assert(dest != NULL);
assert(src != NULL);
struct aml_dma_linux_seq *dma =
(struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *ret;
struct aml_dma_linux_seq_request_data *req;
int err = AML_SUCCESS;
struct aml_dma_request_linux_seq *req;
pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests);
/* init the request */
if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
struct aml_layout *dl, *sl;
dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *);
if (dl == NULL || sl == NULL) {
err = -AML_EINVAL;
goto unlock;
}
aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl);
} else if (type == AML_DMA_REQUEST_TYPE_PTR) {
struct aml_layout *dl, *sl;
void *dp, *sp;
size_t sz;
dp = va_arg(ap, void *);
sp = va_arg(ap, void *);
sz = va_arg(ap, size_t);
if (dp == NULL || sp == NULL || sz == 0) {
err = -AML_EINVAL;
goto unlock;
}
/* simple 1D layout, none of the parameters really matter, as
* long as the copy generates a single memcpy.
*/
aml_layout_dense_create(&dl, dp, 0, 1, 1, &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, 1, 1, &sz, NULL, NULL);
aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR,
dl, sl);
} else
err = -AML_EINVAL;
unlock:
aml_dma_request_linux_seq_copy_init(req, dest, src);
pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req);
assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
aml_dma_request_linux_seq_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
}
return err;
*r = (struct aml_dma_request *)req;
return 0;
}
int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
......@@ -150,25 +97,15 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
struct aml_dma_linux_seq *dma =
(struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req;
struct aml_dma_linux_seq_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_seq *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
aml_dma_request_linux_seq_copy_destroy(req);
pthread_mutex_lock(&dma->data.lock);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
aml_vector_remove(dma->data.requests, inner_req);
aml_vector_remove(dma->data.requests, req);
pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_seq_destroy(&req);
*r = NULL;
return 0;
}
......@@ -180,19 +117,14 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
assert(r != NULL);
struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req;
struct aml_dma_linux_seq_request_data *inner_req;
if (*r == NULL)
return -AML_EINVAL;
req = (struct aml_dma_request_linux_seq *)*r;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
/* execute */
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID)
dma->ops.do_copy(&dma->data, inner_req);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
dma->ops.do_copy(&dma->data, req);
/* destroy a completed request */
aml_dma_linux_seq_destroy_request(d, r);
......@@ -230,8 +162,8 @@ int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs)
d->ops = aml_dma_linux_seq_inner_ops;
aml_vector_create(&d->data.requests, nbreqs,
sizeof(struct aml_dma_linux_seq_request_data),
offsetof(struct aml_dma_linux_seq_request_data, type),
sizeof(struct aml_dma_request_linux_seq),
offsetof(struct aml_dma_request_linux_seq, type),
AML_DMA_REQUEST_TYPE_INVALID);
pthread_mutex_init(&d->data.lock, NULL);
......
......@@ -9,6 +9,7 @@
*******************************************************************************/
#include "aml.h"
#include "aml/layout/dense.h"
#include "aml/scratch/par.h"
#include <assert.h>
......@@ -24,36 +25,34 @@
/*******************************************************************************
* Requests:
******************************************************************************/
int aml_scratch_request_par_create(struct aml_scratch_request_par **req,
int uuid)
{
assert(req != NULL);
*req = calloc(1, sizeof(struct aml_scratch_request_par));
if (*req == NULL)
return -AML_ENOMEM;
(*req)->uuid = uuid;
return 0;
}
void aml_scratch_request_par_destroy(struct aml_scratch_request_par **req)
{
assert(req != NULL);
free(*req);
*req = NULL;
}
int aml_scratch_request_par_init(struct aml_scratch_request_par *req, int type,
struct aml_scratch_par *scratch,
void *dstptr, int dstid, void *srcptr,
int srcid)
int aml_scratch_par_request_data_init(struct aml_scratch_par_request_data *req,
int type, struct aml_scratch_par *scratch,
void *dstptr, int dstid, void *srcptr,
int srcid)
{
assert(req != NULL);
void *dp, *sp;
size_t size;
req->type = type;
req->scratch = scratch;
req->srcptr = srcptr;
req->srcid = srcid;
req->dstptr = dstptr;
req->dstid = dstid;
dp = aml_tiling_tilestart(scratch->data.tiling, dstptr, dstid);
sp = aml_tiling_tilestart(scratch->data.tiling, srcptr, srcid);
size = aml_tiling_tilesize(scratch->data.tiling, srcid);
aml_layout_dense_create(&req->dst, dp, 0, 1, 1, &size, NULL, NULL);
aml_layout_dense_create(&req->src, sp, 0, 1, 1, &size, NULL, NULL);
return 0;
}
int aml_scratch_request_par_destroy(struct aml_scratch_request_par *r)
{
assert(r != NULL);
aml_layout_dense_destroy(&r->dst);
aml_layout_dense_destroy(&r->src);
return 0;
}
......@@ -62,21 +61,11 @@ int aml_scratch_par_request_data_init(struct aml_scratch_par_request_data *req,
******************************************************************************/
void *aml_scratch_par_do_thread(void *arg)
{
struct aml_scratch_par_request_data *req =
(struct aml_scratch_par_request_data *)arg;
struct aml_scratch_request_par *req =
(struct aml_scratch_request_par *)arg;
struct aml_scratch_par *scratch = req->scratch;
void *dest, *src;
size_t size;
dest = aml_tiling_tilestart(scratch->data.tiling,