Commit 04315e6e authored by Swann Perarnau's avatar Swann Perarnau

[refactor/fix] use uuid in user-side request type

Since vector resize can cause pointers into the vector to become
invalid, this patch introduces a level of indirection for DMAs and
Scratchs, using uuid on the user-side of a request to index into the
request vector.
parent e964e69e
Pipeline #8300 passed with stages
in 26 minutes and 29 seconds
...@@ -27,8 +27,14 @@ ...@@ -27,8 +27,14 @@
**/ **/
extern struct aml_dma_ops aml_dma_linux_par_ops; extern struct aml_dma_ops aml_dma_linux_par_ops;
/** Inside of a parallel request for linux movement. **/ /** Request handle for clients of the DMA. **/
struct aml_dma_request_linux_par { struct aml_dma_request_linux_par {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a parallel request for linux movement. **/
struct aml_dma_linux_par_request_data {
/** /**
* The type of dma request * The type of dma request
* @see <aml.h> * @see <aml.h>
......
...@@ -27,8 +27,14 @@ ...@@ -27,8 +27,14 @@
**/ **/
extern struct aml_dma_ops aml_dma_linux_seq_ops; extern struct aml_dma_ops aml_dma_linux_seq_ops;
/** Inside of a sequential request for linux movement. **/ /** Request handle for clients of the DMA. **/
struct aml_dma_request_linux_seq { struct aml_dma_request_linux_seq {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a sequential request for linux movement. **/
struct aml_dma_linux_seq_request_data {
/** /**
* The type of dma request * The type of dma request
* @see <aml.h> * @see <aml.h>
...@@ -60,7 +66,7 @@ struct aml_dma_linux_seq_ops { ...@@ -60,7 +66,7 @@ struct aml_dma_linux_seq_ops {
* @see aml_area * @see aml_area
**/ **/
int (*do_copy)(struct aml_dma_linux_seq_data *dma, int (*do_copy)(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req); struct aml_dma_linux_seq_request_data *req);
}; };
/** /**
...@@ -99,7 +105,7 @@ void aml_dma_linux_seq_destroy(struct aml_dma **dma); ...@@ -99,7 +105,7 @@ void aml_dma_linux_seq_destroy(struct aml_dma **dma);
* @return 0 if successful; an error code otherwise. * @return 0 if successful; an error code otherwise.
**/ **/
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma, int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req); struct aml_dma_linux_seq_request_data *req);
/** /**
* @} * @}
......
...@@ -25,8 +25,14 @@ ...@@ -25,8 +25,14 @@
**/ **/
extern struct aml_scratch_ops aml_scratch_par_ops; extern struct aml_scratch_ops aml_scratch_par_ops;
/** Inside of a parallel scratch request with linux dma. **/ /** Request handle for clients of the scratch. **/
struct aml_scratch_request_par { struct aml_scratch_request_par {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a parallel scratch request with linux dma. **/
struct aml_scratch_par_request_data {
/** /**
* The type of scratchpad request * The type of scratchpad request
* @see <aml.h> * @see <aml.h>
......
...@@ -25,8 +25,14 @@ ...@@ -25,8 +25,14 @@
**/ **/
extern struct aml_scratch_ops aml_scratch_seq_ops; extern struct aml_scratch_ops aml_scratch_seq_ops;
/** Inside of a sequential scratch request with linux dma. **/ /** Request handle for clients of the scratch. **/
struct aml_scratch_request_seq { struct aml_scratch_request_seq {
/** internal request uuid, index in the request vector. **/
int uuid;
};
/** Inside of a sequential scratch request. **/
struct aml_scratch_seq_request_data {
/** /**
* The type of scratchpad request * The type of scratchpad request
* @see <aml.h> * @see <aml.h>
...@@ -79,7 +85,7 @@ struct aml_scratch_seq_ops { ...@@ -79,7 +85,7 @@ struct aml_scratch_seq_ops {
* @param req: The request to execute. * @param req: The request to execute.
**/ **/
int (*doit)(struct aml_scratch_seq_data *scratch, int (*doit)(struct aml_scratch_seq_data *scratch,
struct aml_scratch_request_seq *req); struct aml_scratch_seq_request_data *req);
}; };
/** Sequential implementation of a scratchpad **/ /** Sequential implementation of a scratchpad **/
......
...@@ -28,26 +28,34 @@ ...@@ -28,26 +28,34 @@
/******************************************************************************* /*******************************************************************************
* Requests: * Requests:
******************************************************************************/ ******************************************************************************/
int aml_dma_request_linux_par_create(struct aml_dma_request_linux_par **req,
int aml_dma_request_linux_par_copy_init(struct aml_dma_request_linux_par *req, int uuid)
int type,
struct aml_layout *dest,
struct aml_layout *src)
{ {
assert(req != NULL); assert(req != NULL);
req->type = type; *req = calloc(1, sizeof(struct aml_dma_request_linux_par));
req->dest = dest; if (*req == NULL)
req->src = src; return -AML_ENOMEM;
(*req)->uuid = uuid;
return 0; return 0;
} }
int aml_dma_request_linux_par_copy_destroy(struct aml_dma_request_linux_par *r) void aml_dma_request_linux_par_destroy(struct aml_dma_request_linux_par **req)
{ {
assert(r != NULL); assert(req != NULL);
if (r->type == AML_DMA_REQUEST_TYPE_PTR) { free(*req);
aml_layout_dense_destroy(&r->dest); *req = NULL;
aml_layout_dense_destroy(&r->src); }
}
int aml_dma_linux_par_request_data_init(
struct aml_dma_linux_par_request_data *req,
int type,
struct aml_layout *dest,
struct aml_layout *src)
{
assert(req != NULL);
req->type = type;
req->dest = dest;
req->src = src;
return 0; return 0;
} }
...@@ -57,8 +65,8 @@ int aml_dma_request_linux_par_copy_destroy(struct aml_dma_request_linux_par *r) ...@@ -57,8 +65,8 @@ int aml_dma_request_linux_par_copy_destroy(struct aml_dma_request_linux_par *r)
void *aml_dma_linux_par_do_thread(void *arg) void *aml_dma_linux_par_do_thread(void *arg)
{ {
struct aml_dma_request_linux_par *req = struct aml_dma_linux_par_request_data *req =
(struct aml_dma_request_linux_par *)arg; (struct aml_dma_linux_par_request_data *)arg;
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
aml_copy_layout_generic(req->dest, req->src); aml_copy_layout_generic(req->dest, req->src);
...@@ -81,8 +89,8 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -81,8 +89,8 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
assert(r != NULL); assert(r != NULL);
struct aml_dma_linux_par *dma = struct aml_dma_linux_par *dma =
(struct aml_dma_linux_par *)d; (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *ret;
struct aml_dma_request_linux_par *req; struct aml_dma_linux_par_request_data *req;
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests); req = aml_vector_add(dma->data.requests);
...@@ -93,7 +101,7 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -93,7 +101,7 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
dl = va_arg(ap, struct aml_layout *); dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *); sl = va_arg(ap, struct aml_layout *);
aml_dma_request_linux_par_copy_init(req, aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT, AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl); dl, sl);
} else if (type == AML_DMA_REQUEST_TYPE_PTR) { } else if (type == AML_DMA_REQUEST_TYPE_PTR) {
...@@ -111,15 +119,18 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d, ...@@ -111,15 +119,18 @@ int aml_dma_linux_par_create_request(struct aml_dma_data *d,
&sz, NULL, NULL); &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1, aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
&sz, NULL, NULL); &sz, NULL, NULL);
aml_dma_request_linux_par_copy_init(req, aml_dma_linux_par_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR, AML_DMA_REQUEST_TYPE_PTR,
dl, sl); dl, sl);
} }
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) {
int uuid = aml_vector_getid(dma->data.requests, req);
if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
pthread_create(&req->thread, NULL, dma->ops.do_thread, req); pthread_create(&req->thread, NULL, dma->ops.do_thread, req);
*r = (struct aml_dma_request *)req; aml_dma_request_linux_par_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
}
return 0; return 0;
} }
...@@ -133,17 +144,25 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d, ...@@ -133,17 +144,25 @@ int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
struct aml_dma_request_linux_par *req = struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r; (struct aml_dma_request_linux_par *)r;
struct aml_dma_linux_par_request_data *inner_req;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
/* we cancel and join, instead of killing, for a cleaner result */ /* we cancel and join, instead of killing, for a cleaner result */
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) { if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_cancel(req->thread); pthread_cancel(inner_req->thread);
pthread_join(req->thread, NULL); pthread_join(inner_req->thread, NULL);
aml_dma_request_linux_par_copy_destroy(req); if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
} }
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, req); aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req);
return 0; return 0;
} }
...@@ -155,15 +174,24 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d, ...@@ -155,15 +174,24 @@ int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d; struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d;
struct aml_dma_request_linux_par *req = struct aml_dma_request_linux_par *req =
(struct aml_dma_request_linux_par *)r; (struct aml_dma_request_linux_par *)r;
struct aml_dma_linux_par_request_data *inner_req;
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) { inner_req = aml_vector_get(dma->data.requests, req->uuid);
pthread_join(req->thread, NULL); if (inner_req == NULL)
aml_dma_request_linux_par_copy_destroy(req); return -AML_EINVAL;
if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID) {
pthread_join(inner_req->thread, NULL);
if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
}
} }
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
aml_vector_remove(dma->data.requests, req); aml_vector_remove(dma->data.requests, inner_req);
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_par_destroy(&req);
return 0; return 0;
} }
...@@ -199,8 +227,8 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs) ...@@ -199,8 +227,8 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs)
/* allocate request array */ /* allocate request array */
aml_vector_create(&d->data.requests, nbreqs, aml_vector_create(&d->data.requests, nbreqs,
sizeof(struct aml_dma_request_linux_par), sizeof(struct aml_dma_linux_par_request_data),
offsetof(struct aml_dma_request_linux_par, type), offsetof(struct aml_dma_linux_par_request_data, type),
AML_DMA_REQUEST_TYPE_INVALID); AML_DMA_REQUEST_TYPE_INVALID);
pthread_mutex_init(&d->data.lock, NULL); pthread_mutex_init(&d->data.lock, NULL);
......
...@@ -29,23 +29,34 @@ ...@@ -29,23 +29,34 @@
* Requests: * Requests:
******************************************************************************/ ******************************************************************************/
int aml_dma_request_linux_seq_copy_init(struct aml_dma_request_linux_seq *req, int aml_dma_request_linux_seq_create(struct aml_dma_request_linux_seq **req,
int type, int uuid)
struct aml_layout *dest,
struct aml_layout *src)
{ {
assert(req != NULL); assert(req != NULL);
req->type = type; *req = calloc(1, sizeof(struct aml_dma_request_linux_seq));
/* figure out pointers */ if (*req == NULL)
req->dest = dest; return -AML_ENOMEM;
req->src = src; (*req)->uuid = uuid;
return 0; return 0;
} }
int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r) void aml_dma_request_linux_seq_destroy(struct aml_dma_request_linux_seq **req)
{ {
assert(r != NULL); assert(req != NULL);
return 0; free(*req);
*req = NULL;
}
void aml_dma_linux_seq_request_data_init(
struct aml_dma_linux_seq_request_data *req,
int type,
struct aml_layout *dest,
struct aml_layout *src)
{
assert(req != NULL);
req->type = type;
req->dest = dest;
req->src = src;
} }
/******************************************************************************* /*******************************************************************************
...@@ -53,7 +64,7 @@ int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r) ...@@ -53,7 +64,7 @@ int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r)
******************************************************************************/ ******************************************************************************/
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma, int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
struct aml_dma_request_linux_seq *req) struct aml_dma_linux_seq_request_data *req)
{ {
assert(dma != NULL); assert(dma != NULL);
assert(req != NULL); assert(req != NULL);
...@@ -77,8 +88,8 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -77,8 +88,8 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
assert(r != NULL); assert(r != NULL);
struct aml_dma_linux_seq *dma = struct aml_dma_linux_seq *dma =
(struct aml_dma_linux_seq *)d; (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *ret;
struct aml_dma_request_linux_seq *req; struct aml_dma_linux_seq_request_data *req;
pthread_mutex_lock(&dma->data.lock); pthread_mutex_lock(&dma->data.lock);
req = aml_vector_add(dma->data.requests); req = aml_vector_add(dma->data.requests);
...@@ -89,7 +100,7 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -89,7 +100,7 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
dl = va_arg(ap, struct aml_layout *); dl = va_arg(ap, struct aml_layout *);
sl = va_arg(ap, struct aml_layout *); sl = va_arg(ap, struct aml_layout *);
aml_dma_request_linux_seq_copy_init(req, aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_LAYOUT, AML_DMA_REQUEST_TYPE_LAYOUT,
dl, sl); dl, sl);
} else if (type == AML_DMA_REQUEST_TYPE_PTR) { } else if (type == AML_DMA_REQUEST_TYPE_PTR) {
...@@ -107,12 +118,16 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d, ...@@ -107,12 +118,16 @@ int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
&sz, NULL, NULL); &sz, NULL, NULL);
aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1, aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
&sz, NULL, NULL); &sz, NULL, NULL);
aml_dma_request_linux_seq_copy_init(req, aml_dma_linux_seq_request_data_init(req,
AML_DMA_REQUEST_TYPE_PTR, AML_DMA_REQUEST_TYPE_PTR,
dl, sl); dl, sl);
} }
int uuid = aml_vector_getid(dma->data.requests, req);
assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
aml_dma_request_linux_seq_create(&ret, uuid);
*r = (struct aml_dma_request *)ret;
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
*r = (struct aml_dma_request *)req;
return 0; return 0;
} }
...@@ -126,19 +141,22 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d, ...@@ -126,19 +141,22 @@ int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
struct aml_dma_request_linux_seq *req = struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r; (struct aml_dma_request_linux_seq *)r;
struct aml_dma_linux_seq_request_data *inner_req;
if (req->type == AML_DMA_REQUEST_TYPE_LAYOUT) inner_req = aml_vector_get(dma->data.requests, req->uuid);
aml_dma_request_linux_seq_copy_destroy(req); if (inner_req == NULL)
else if (req->type == AML_DMA_REQUEST_TYPE_PTR) { return -AML_EINVAL;
aml_layout_dense_destroy(&req->dest);
aml_layout_dense_destroy(&req->src); pthread_mutex_lock(&dma->data.lock);
aml_dma_request_linux_seq_copy_destroy(req); if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
aml_layout_dense_destroy(&inner_req->dest);
aml_layout_dense_destroy(&inner_req->src);
} }
/* enough to remove from request vector */ /* enough to remove from request vector */
pthread_mutex_lock(&dma->data.lock); aml_vector_remove(dma->data.requests, inner_req);
aml_vector_remove(dma->data.requests, req);
pthread_mutex_unlock(&dma->data.lock); pthread_mutex_unlock(&dma->data.lock);
aml_dma_request_linux_seq_destroy(&req);
return 0; return 0;
} }
...@@ -150,10 +168,15 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d, ...@@ -150,10 +168,15 @@ int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d; struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
struct aml_dma_request_linux_seq *req = struct aml_dma_request_linux_seq *req =
(struct aml_dma_request_linux_seq *)r; (struct aml_dma_request_linux_seq *)r;
struct aml_dma_linux_seq_request_data *inner_req;
inner_req = aml_vector_get(dma->data.requests, req->uuid);
if (inner_req == NULL)
return -AML_EINVAL;
/* execute */ /* execute */
if (req->type != AML_DMA_REQUEST_TYPE_INVALID) if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID)
dma->ops.do_copy(&dma->data, req); dma->ops.do_copy(&dma->data, inner_req);
/* destroy a completed request */ /* destroy a completed request */
aml_dma_linux_seq_destroy_request(d, r); aml_dma_linux_seq_destroy_request(d, r);
...@@ -191,8 +214,8 @@ int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs) ...@@ -191,8 +214,8 @@ int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs)
d->ops = aml_dma_linux_seq_inner_ops; d->ops = aml_dma_linux_seq_inner_ops;
aml_vector_create(&d->data.requests, nbreqs, aml_vector_create(&d->data.requests, nbreqs,
sizeof(struct aml_dma_request_linux_seq), sizeof(struct aml_dma_linux_seq_request_data),
offsetof(struct aml_dma_request_linux_seq, type), offsetof(struct aml_dma_linux_seq_request_data, type),
AML_DMA_REQUEST_TYPE_INVALID); AML_DMA_REQUEST_TYPE_INVALID);
pthread_mutex_init(&d->data.lock, NULL); pthread_mutex_init(&d->data.lock, NULL);
......
...@@ -24,12 +24,28 @@ ...@@ -24,12 +24,28 @@
/******************************************************************************* /*******************************************************************************
* Requests: * Requests:
******************************************************************************/ ******************************************************************************/
int aml_scratch_request_par_create(struct aml_scratch_request_par **req,
int uuid)
{
assert(req != NULL);
*req = calloc(1, sizeof(struct aml_scratch_request_par));
if (*req == NULL)
return -AML_ENOMEM;
(*req)->uuid = uuid;
return 0;
}
int aml_scratch_request_par_init(struct aml_scratch_request_par *req, int type, void aml_scratch_request_par_destroy(struct aml_scratch_request_par **req)
struct aml_scratch_par *scratch, {
void *dstptr, int dstid, void *srcptr, assert(req != NULL);
int srcid) free(*req);
*req = NULL;
}
int aml_scratch_par_request_data_init(struct aml_scratch_par_request_data *req,
int type, struct aml_scratch_par *scratch,
void *dstptr, int dstid, void *srcptr,
int srcid)
{ {
assert(req != NULL); assert(req != NULL);
req->type = type; req->type = type;
...@@ -41,19 +57,13 @@ int aml_scratch_request_par_init(struct aml_scratch_request_par *req, int type, ...@@ -41,19 +57,13 @@ int aml_scratch_request_par_init(struct aml_scratch_request_par *req, int type,
return 0; return 0;
} }
int aml_scratch_request_par_destroy(struct aml_scratch_request_par *r)
{
assert(r != NULL);
return 0;
}
/******************************************************************************* /*******************************************************************************
* Internal functions * Internal functions
******************************************************************************/ ******************************************************************************/
void *aml_scratch_par_do_thread(void *arg) void *aml_scratch_par_do_thread(void *arg)
{ {
struct aml_scratch_request_par *req = struct aml_scratch_par_request_data *req =
(struct aml_scratch_request_par *)arg; (struct aml_scratch_par_request_data *)arg;
struct aml_scratch_par *scratch = req->scratch; struct aml_scratch_par *scratch = req->scratch;
void *dest, *src; void *dest, *src;
...@@ -86,11 +96,12 @@ int aml_scratch_par_create_request(struct aml_scratch_data *d, ...@@ -86,11 +96,12 @@ int aml_scratch_par_create_request(struct aml_scratch_data *d,
assert(r != NULL); assert(r != NULL);
struct aml_scratch_par *scratch = struct aml_scratch_par *scratch =
(struct aml_scratch_par *)d; (struct aml_scratch_par *)d;
struct aml_scratch_request_par *ret;
struct aml_scratch_request_par *req; struct aml_scratch_par_request_data *req;
pthread_mutex_lock(&scratch->data.lock); pthread_mutex_lock(&scratch->data.lock);
req = aml_vector_add(scratch->data.requests); req = aml_vector_add(scratch->data.requests);
/* init the request */ /* init the request */
if (type == AML_SCRATCH_REQUEST_TYPE_PUSH) { if (type == AML_SCRATCH_REQUEST_TYPE_PUSH) {
int scratchid; int scratchid;
...@@ -110,8 +121,9 @@ int aml_scratch_par_create_request(struct aml_scratch_data *d, ...@@ -110,8 +121,9 @@ int aml_scratch_par_create_request(struct aml_scratch_data *d,
*srcid = *slot; *srcid = *slot;
/* init request */ /* init request */
aml_scratch_request_par_init(req, type, scratch, srcptr, *srcid, aml_scratch_par_request_data_init(req, type, scratch, srcptr,
scratchptr, scratchid); *srcid, scratchptr,
scratchid);
} else if (type == AML_SCRATCH_REQUEST_TYPE_PULL) { } else if (type == AML_SCRATCH_REQUEST_TYPE_PULL) {
int *scratchid; int *scratchid;
int srcid; int srcid;
...@@ -140,46 +152,54 @@ int aml_scratch_par_create_request(struct aml_scratch_data *d, ...@@ -140,46 +152,54 @@ int aml_scratch_par_create_request(struct aml_scratch_data *d,
*scratchid = slot; *scratchid = slot;
/* init request */ /* init request */
aml_scratch_request_par_init(req, type, scratch, aml_scratch_par_request_data_init(req, type, scratch,
scratchptr, *scratchid, scratchptr, *scratchid,
srcptr, srcid); srcptr, srcid);
} }
int uuid = aml_vector_getid(scratch->data.requests, req);
assert(uuid != AML_SCRATCH_REQUEST_TYPE_INVALID);
aml_scratch_request_par_create(&ret, uuid);
pthread_mutex_unlock(&scratch->data.lock); pthread_mutex_unlock(&scratch->data.lock);
/* thread creation */ /* thread creation */
if (req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) if (req->type != AML_SCRATCH_REQUEST_TYPE_NOOP)
pthread_create(&req->thread, NULL, scratch->ops.do_thread, req); pthread_create(&req->thread, NULL, scratch->ops.do_thread, req);
*r = (struct aml_scratch_request *)req; *r = (struct aml_scratch_request *)ret;
return 0; return 0;
} }
int aml_scratch_par_destroy_request(struct aml_scratch_data *d, int aml_scratch_par_destroy_request(struct aml_scratch_data *d,
struct aml_scratch_request *r) struct aml_scratch_request *r)
{ {
assert(d != NULL); assert(d != NULL);
assert(r != NULL); assert(r != NULL);
struct aml_scratch_par *scratch = struct aml_scratch_par *scratch =
(struct aml_scratch_par *)d; (struct aml_scratch_par *)d;
struct aml_scratch_par_request_data *inner_req;
struct aml_scratch_request_par *req = struct aml_scratch_request_par *req =
(struct aml_scratch_request_par *)r; (struct aml_scratch_request_par *)r;
int *tile; int *tile;
if (req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) { inner_req = aml_vector_get(scratch->data.requests, req->uuid);
pthread_cancel(req->thread); if (inner_req == NULL)
pthread_join(req->thread, NULL); return -AML_EINVAL;
if (inner_req->type != AML_SCRATCH_REQUEST_TYPE_NOOP) {
pthread_cancel(inner_req->thread);
pthread_join(inner_req->thread, NULL);
} }
aml_scratch_request_par_destroy(req);
/* destroy removes the tile from the scratch */