dma_linux_seq.c 7.34 KB
Newer Older
Swann Perarnau's avatar
Swann Perarnau committed
1 2 3 4 5 6 7 8 9 10
/*******************************************************************************
 * Copyright 2019 UChicago Argonne, LLC.
 * (c.f. AUTHORS, LICENSE)
 *
 * This file is part of the AML project.
 * For more info, see https://xgitlab.cels.anl.gov/argo/aml
 *
 * SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/

11
#include "aml.h"
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>

/*******************************************************************************
 * Linux-backed, sequential dma
 * The dma itself is organized into several different components
 * - request types: copy or move
 * - implementation of the request
 * - user API (i.e. generic request creation and call)
 * - how to init the dma
 ******************************************************************************/

/*******************************************************************************
 * Requests:
 ******************************************************************************/

29
int aml_dma_request_linux_seq_copy_init(struct aml_dma_request_linux_seq *req,
Kamil Iskra's avatar
Kamil Iskra committed
30
					const struct aml_tiling *dt,
31
					void *dptr, int dtid,
Kamil Iskra's avatar
Kamil Iskra committed
32 33
					const struct aml_tiling *st,
					const void *sptr, int stid)
34
{
35
	assert(req != NULL);
36

37
	req->type = AML_DMA_REQUEST_TYPE_COPY;
38
	/* figure out pointers */
39 40 41
	req->dest = aml_tiling_tilestart(dt, dptr, dtid);
	req->src = aml_tiling_tilestart(st, sptr, stid);
	req->size = aml_tiling_tilesize(st, stid);
42 43 44 45
	/* TODO: assert size match */
	return 0;
}

46
int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r)
47 48 49 50 51
{
	assert(r != NULL);
	return 0;
}

52
int aml_dma_request_linux_seq_move_init(struct aml_dma_request_linux_seq *req,
53
					struct aml_area *darea,
Kamil Iskra's avatar
Kamil Iskra committed
54
					const struct aml_tiling *tiling,
55 56
					void *startptr, int tileid)
{
57
	assert(req != NULL);
58 59
	struct aml_binding *binding;

60
	req->type = AML_DMA_REQUEST_TYPE_MOVE;
61
	aml_area_binding(darea, &binding);
62 63 64 65 66
	req->count = aml_binding_nbpages(binding, tiling, startptr, tileid);
	req->pages = calloc(req->count, sizeof(void *));
	req->nodes = calloc(req->count, sizeof(int));
	aml_binding_pages(binding, req->pages, tiling, startptr, tileid);
	aml_binding_nodes(binding, req->nodes, tiling, startptr, tileid);
67 68 69 70
	free(binding);
	return 0;
}

71
int aml_dma_request_linux_seq_move_destroy(struct aml_dma_request_linux_seq *req)
72
{
73 74 75
	assert(req != NULL);
	free(req->pages);
	free(req->nodes);
76 77 78 79 80 81
	return 0;
}

/*******************************************************************************
 * Internal functions
 ******************************************************************************/
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
			      struct aml_dma_request_linux_seq *req)
{
	assert(dma != NULL);
	assert(req != NULL);
	memcpy(req->dest, req->src, req->size);
	return 0;
}

int aml_dma_linux_seq_do_move(struct aml_dma_linux_seq_data *dma,
			      struct aml_dma_request_linux_seq *req)
{
	assert(dma != NULL);
	assert(req != NULL);
	int status[req->count];
	int err;
	err = move_pages(0, req->count, req->pages, req->nodes, status,
			 MPOL_MF_MOVE);
	if(err)
	{
		perror("move_pages:");
		return errno;
	}
	return 0;
}

108
struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
109 110
	aml_dma_linux_seq_do_copy,
	aml_dma_linux_seq_do_move,
111 112 113 114 115 116 117
};

/*******************************************************************************
 * Public API
 ******************************************************************************/

int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
118
				     struct aml_dma_request **r,
119 120 121 122 123 124 125
				     int type, va_list ap)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;

126
	struct aml_dma_request_linux_seq *req;
127

128
	pthread_mutex_lock(&dma->data.lock);
129
	req = aml_vector_add(&dma->data.requests);
130

131 132 133 134 135 136 137 138 139 140 141 142
	/* init the request */
	if(type == AML_DMA_REQUEST_TYPE_COPY)
	{
		struct aml_tiling *dt, *st;
		void *dptr, *sptr;
		int dtid, stid;
		dt = va_arg(ap, struct aml_tiling *);
		dptr = va_arg(ap, void *);
		dtid = va_arg(ap, int);
		st = va_arg(ap, struct aml_tiling *);
		sptr = va_arg(ap, void *);
		stid = va_arg(ap, int);
143
		aml_dma_request_linux_seq_copy_init(req, dt, dptr, dtid,
144 145 146 147 148 149 150 151
						    st, sptr, stid);
	}
	else if(type == AML_DMA_REQUEST_TYPE_MOVE)
	{
		struct aml_area *darea = va_arg(ap, struct aml_area *);
		struct aml_tiling *st = va_arg(ap, struct aml_tiling *);
		void *sptr = va_arg(ap, void *);
		int stid = va_arg(ap, int);
152 153
		aml_dma_request_linux_seq_move_init(req, darea, st, sptr,
						    stid);
154
	}
155
	pthread_mutex_unlock(&dma->data.lock);
156
	*r = (struct aml_dma_request *)req;
157 158 159 160 161 162 163 164 165 166 167
	return 0;
}

int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
				      struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;

168 169
	struct aml_dma_request_linux_seq *req =
		(struct aml_dma_request_linux_seq *)r;
170 171

	if(req->type == AML_DMA_REQUEST_TYPE_COPY)
172
		aml_dma_request_linux_seq_copy_destroy(req);
173
	else if(req->type == AML_DMA_REQUEST_TYPE_MOVE)
174
		aml_dma_request_linux_seq_move_destroy(req);
175

176
	/* enough to remove from request vector */
177
	pthread_mutex_lock(&dma->data.lock);
178
	aml_vector_remove(&dma->data.requests, req);
179
	pthread_mutex_unlock(&dma->data.lock);
180 181 182 183 184 185 186 187
	return 0;
}

int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
				   struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
188 189 190
	struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
	struct aml_dma_request_linux_seq *req =
		(struct aml_dma_request_linux_seq *)r;
191 192 193

	/* execute */
	if(req->type == AML_DMA_REQUEST_TYPE_COPY)
194
		dma->ops.do_copy(&dma->data, req);
195
	else if(req->type == AML_DMA_REQUEST_TYPE_MOVE)
196
		dma->ops.do_move(&dma->data, req);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238

	/* destroy a completed request */
	aml_dma_linux_seq_destroy_request(d, r);
	return 0;
}

struct aml_dma_ops aml_dma_linux_seq_ops = {
	aml_dma_linux_seq_create_request,
	aml_dma_linux_seq_destroy_request,
	aml_dma_linux_seq_wait_request,
};

/*******************************************************************************
 * Init functions:
 ******************************************************************************/

int aml_dma_linux_seq_create(struct aml_dma **d, ...)
{
	va_list ap;
	struct aml_dma *ret = NULL;
	intptr_t baseptr, dataptr;
	va_start(ap, d);

	/* alloc */
	baseptr = (intptr_t) calloc(1, AML_DMA_LINUX_SEQ_ALLOCSIZE);
	dataptr = baseptr + sizeof(struct aml_dma);

	ret = (struct aml_dma *)baseptr;
	ret->data = (struct aml_dma_data *)dataptr;

	aml_dma_linux_seq_vinit(ret, ap);

	va_end(ap);
	*d = ret;
	return 0;
}
int aml_dma_linux_seq_vinit(struct aml_dma *d, va_list ap)
{
	d->ops = &aml_dma_linux_seq_ops;
	struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d->data;

	dma->ops = aml_dma_linux_seq_inner_ops;
239 240 241 242 243 244 245

	/* request vector */
	size_t nbreqs = va_arg(ap, size_t);
	aml_vector_init(&dma->data.requests, nbreqs,
			sizeof(struct aml_dma_request_linux_seq),
			offsetof(struct aml_dma_request_linux_seq, type),
			AML_DMA_REQUEST_TYPE_INVALID);
246
	pthread_mutex_init(&dma->data.lock, NULL);
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	return 0;
}
int aml_dma_linux_seq_init(struct aml_dma *d, ...)
{
	int err;
	va_list ap;
	va_start(ap, d);
	err = aml_dma_linux_seq_vinit(d, ap);
	va_end(ap);
	return err;
}

int aml_dma_linux_seq_destroy(struct aml_dma *d)
{
	struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d->data;
262
	aml_vector_destroy(&dma->data.requests);
263
	pthread_mutex_destroy(&dma->data.lock);
264 265
	return 0;
}