dma_linux_par.c 7.58 KB
Newer Older
Swann Perarnau's avatar
Swann Perarnau committed
1 2 3 4 5 6 7 8 9 10
/*******************************************************************************
 * Copyright 2019 UChicago Argonne, LLC.
 * (c.f. AUTHORS, LICENSE)
 *
 * This file is part of the AML project.
 * For more info, see https://xgitlab.cels.anl.gov/argo/aml
 *
 * SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/

11
#include "aml.h"
12 13
#include "aml/dma/linux-par.h"

14 15 16 17 18 19 20
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>

/*******************************************************************************
 * Linux-backed, paruential dma
 * The dma itself is organized into several different components
21
 * - request types: copy
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
 * - implementation of the request
 * - user API (i.e. generic request creation and call)
 * - how to init the dma
 ******************************************************************************/

/*******************************************************************************
 * Requests:
 ******************************************************************************/

int aml_dma_request_linux_par_copy_init(struct aml_dma_request_linux_par *req,
					struct aml_tiling *dt,
					void *dptr, int dtid,
					struct aml_tiling *st,
					void *sptr, int stid)
{
	assert(req != NULL);

	req->type = AML_DMA_REQUEST_TYPE_COPY;
	/* figure out pointers */
	req->dest = aml_tiling_tilestart(dt, dptr, dtid);
	req->src = aml_tiling_tilestart(st, sptr, stid);
	req->size = aml_tiling_tilesize(st, stid);
	/* TODO: assert size match */
	return 0;
}

int aml_dma_request_linux_par_copy_destroy(struct aml_dma_request_linux_par *r)
{
	assert(r != NULL);
	return 0;
}

/*******************************************************************************
 * Internal functions
 ******************************************************************************/

void *aml_dma_linux_par_do_thread(void *arg)
{
	struct aml_dma_linux_par_thread_data *data =
		(struct aml_dma_linux_par_thread_data *)arg;

63
	if (data->req->type == AML_DMA_REQUEST_TYPE_COPY)
64 65 66 67 68 69 70 71 72
		data->dma->ops.do_copy(&data->dma->data, data->req, data->tid);
	return NULL;
}

int aml_dma_linux_par_do_copy(struct aml_dma_linux_par_data *dma,
			      struct aml_dma_request_linux_par *req, int tid)
{
	assert(dma != NULL);
	assert(req != NULL);
73 74 75 76 77

	/* chunk memory */
	size_t nbthreads = dma->nbthreads;
	size_t chunksize = req->size / nbthreads;

78 79
	void *dest = (void *)((intptr_t)req->dest + tid * chunksize);
	void *src = (void *)((intptr_t)req->src + tid * chunksize);
80

81
	if (tid == nbthreads - 1 && req->size > chunksize * nbthreads)
82 83 84
		chunksize += req->size % nbthreads;

	memcpy(dest, src, chunksize);
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	return 0;
}

struct aml_dma_linux_par_ops aml_dma_linux_par_inner_ops = {
	aml_dma_linux_par_do_thread,
	aml_dma_linux_par_do_copy,
};

/*******************************************************************************
 * Public API
 ******************************************************************************/

int aml_dma_linux_par_create_request(struct aml_dma_data *d,
				     struct aml_dma_request **r,
				     int type, va_list ap)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_par *dma =
		(struct aml_dma_linux_par *)d;

	struct aml_dma_request_linux_par *req;

108
	pthread_mutex_lock(&dma->data.lock);
109
	req = aml_vector_add(&dma->data.requests);
110 111

	/* init the request */
112
	if (type == AML_DMA_REQUEST_TYPE_COPY) {
113 114 115
		struct aml_tiling *dt, *st;
		void *dptr, *sptr;
		int dtid, stid;
116

117 118 119 120 121 122 123 124 125
		dt = va_arg(ap, struct aml_tiling *);
		dptr = va_arg(ap, void *);
		dtid = va_arg(ap, int);
		st = va_arg(ap, struct aml_tiling *);
		sptr = va_arg(ap, void *);
		stid = va_arg(ap, int);
		aml_dma_request_linux_par_copy_init(req, dt, dptr, dtid,
						    st, sptr, stid);
	}
126
	pthread_mutex_unlock(&dma->data.lock);
127

128
	for (int i = 0; i < dma->data.nbthreads; i++) {
129
		struct aml_dma_linux_par_thread_data *rd = &req->thread_data[i];
130

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
		rd->req = req;
		rd->dma = dma;
		rd->tid = i;
		pthread_create(&rd->thread, NULL, dma->ops.do_thread, rd);
	}
	*r = (struct aml_dma_request *)req;
	return 0;
}

int aml_dma_linux_par_destroy_request(struct aml_dma_data *d,
				      struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_par *dma =
		(struct aml_dma_linux_par *)d;

	struct aml_dma_request_linux_par *req =
		(struct aml_dma_request_linux_par *)r;

	/* we cancel and join, instead of killing, for a cleaner result */
152
	for (int i = 0; i < dma->data.nbthreads; i++) {
153 154 155 156
		pthread_cancel(req->thread_data[i].thread);
		pthread_join(req->thread_data[i].thread, NULL);
	}

157
	if (req->type == AML_DMA_REQUEST_TYPE_COPY)
158 159
		aml_dma_request_linux_par_copy_destroy(req);

160
	pthread_mutex_lock(&dma->data.lock);
161
	aml_vector_remove(&dma->data.requests, req);
162
	pthread_mutex_unlock(&dma->data.lock);
163 164 165 166 167 168 169 170 171 172 173 174
	return 0;
}

int aml_dma_linux_par_wait_request(struct aml_dma_data *d,
				   struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_par *dma = (struct aml_dma_linux_par *)d;
	struct aml_dma_request_linux_par *req =
		(struct aml_dma_request_linux_par *)r;

175
	for (int i = 0; i < dma->data.nbthreads; i++)
176 177 178
		pthread_join(req->thread_data[i].thread, NULL);

	/* destroy a completed request */
179
	if (req->type == AML_DMA_REQUEST_TYPE_COPY)
180
		aml_dma_request_linux_par_copy_destroy(req);
181

182
	pthread_mutex_lock(&dma->data.lock);
183
	aml_vector_remove(&dma->data.requests, req);
184
	pthread_mutex_unlock(&dma->data.lock);
185 186 187 188 189 190 191 192 193 194 195 196 197
	return 0;
}

struct aml_dma_ops aml_dma_linux_par_ops = {
	aml_dma_linux_par_create_request,
	aml_dma_linux_par_destroy_request,
	aml_dma_linux_par_wait_request,
};

/*******************************************************************************
 * Init functions:
 ******************************************************************************/

198 199
int aml_dma_linux_par_create(struct aml_dma **d, size_t nbreqs,
			     size_t nbthreads)
200 201 202
{
	struct aml_dma *ret = NULL;
	intptr_t baseptr, dataptr;
203 204 205 206
	int err;

	if (d == NULL)
		return -AML_EINVAL;
207 208 209

	/* alloc */
	baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_PAR_ALLOCSIZE);
210 211 212 213
	if (baseptr == 0) {
		*d = NULL;
		return -AML_ENOMEM;
	}
214 215 216 217
	dataptr = baseptr + sizeof(struct aml_dma);

	ret = (struct aml_dma *)baseptr;
	ret->data = (struct aml_dma_data *)dataptr;
218
	ret->ops = &aml_dma_linux_par_ops;
219

220 221 222 223 224 225
	err = aml_dma_linux_par_init(ret, nbreqs, nbthreads);
	if (err) {
		*d = NULL;
		free(ret);
		return err;
	}
226 227 228 229

	*d = ret;
	return 0;
}
230 231 232

int aml_dma_linux_par_init(struct aml_dma *d, size_t nbreqs,
			   size_t nbthreads)
233
{
234
	struct aml_dma_linux_par *dma;
235

236 237 238
	if (d == NULL || d->data == NULL)
		return -AML_EINVAL;
	dma = (struct aml_dma_linux_par *)d->data;
239
	dma->ops = aml_dma_linux_par_inner_ops;
240

241 242
	/* allocate request array */
	dma->data.nbthreads = nbthreads;
243 244 245 246
	aml_vector_init(&dma->data.requests, nbreqs,
			sizeof(struct aml_dma_request_linux_par),
			offsetof(struct aml_dma_request_linux_par, type),
			AML_DMA_REQUEST_TYPE_INVALID);
247
	for (int i = 0; i < nbreqs; i++) {
248 249
		struct aml_dma_request_linux_par *req =
			aml_vector_get(&dma->data.requests, i);
250

251
		req->thread_data = calloc(dma->data.nbthreads,
252
				sizeof(struct aml_dma_linux_par_thread_data));
253
	}
254
	pthread_mutex_init(&dma->data.lock, NULL);
255 256 257
	return 0;
}

258
void aml_dma_linux_par_fini(struct aml_dma *d)
259
{
260 261 262 263 264 265
	struct aml_dma_linux_par *dma;

	if (d == NULL || d->data == NULL)
		return;
	dma = (struct aml_dma_linux_par *)d->data;
	for (int i = 0; i < aml_vector_size(&dma->data.requests); i++) {
266 267
		struct aml_dma_request_linux_par *req =
			aml_vector_get(&dma->data.requests, i);
268

269 270
		free(req->thread_data);
	}
271
	aml_vector_fini(&dma->data.requests);
272
	pthread_mutex_destroy(&dma->data.lock);
273 274 275 276 277 278 279 280 281
}

void aml_dma_linux_par_destroy(struct aml_dma **d)
{
	if (d == NULL)
		return;
	aml_dma_linux_par_fini(*d);
	free(*d);
	*d = NULL;
282
}