dma_linux_seq.c 6.68 KB
Newer Older
Swann Perarnau's avatar
Swann Perarnau committed
1 2 3 4 5 6 7 8 9 10
/*******************************************************************************
 * Copyright 2019 UChicago Argonne, LLC.
 * (c.f. AUTHORS, LICENSE)
 *
 * This file is part of the AML project.
 * For more info, see https://xgitlab.cels.anl.gov/argo/aml
 *
 * SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/

11
#include "aml.h"
12
#include "aml/dma/linux-seq.h"
13
#include "aml/layout/dense.h"
14

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>

/*******************************************************************************
 * Linux-backed, sequential dma
 * The dma itself is organized into several different components
 * - request types: copy or move
 * - implementation of the request
 * - user API (i.e. generic request creation and call)
 * - how to init the dma
 ******************************************************************************/

/*******************************************************************************
 * Requests:
 ******************************************************************************/

32 33
int aml_dma_request_linux_seq_create(struct aml_dma_request_linux_seq **req,
				     int uuid)
34
{
35
	assert(req != NULL);
36 37 38 39
	*req = calloc(1, sizeof(struct aml_dma_request_linux_seq));
	if (*req == NULL)
		return -AML_ENOMEM;
	(*req)->uuid = uuid;
40 41 42
	return 0;
}

43
void aml_dma_request_linux_seq_destroy(struct aml_dma_request_linux_seq **req)
44
{
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	assert(req != NULL);
	free(*req);
	*req = NULL;
}

void aml_dma_linux_seq_request_data_init(
				struct aml_dma_linux_seq_request_data *req,
				int type,
				struct aml_layout *dest,
				struct aml_layout *src)
{
	assert(req != NULL);
	req->type = type;
	req->dest = dest;
	req->src = src;
60 61 62 63 64
}

/*******************************************************************************
 * Internal functions
 ******************************************************************************/
65

66
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
67
			      struct aml_dma_linux_seq_request_data *req)
68 69 70
{
	assert(dma != NULL);
	assert(req != NULL);
71
	aml_copy_layout_generic(req->dest, req->src);
72 73 74
	return 0;
}

75
struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
76
	aml_dma_linux_seq_do_copy,
77 78 79 80 81 82 83
};

/*******************************************************************************
 * Public API
 ******************************************************************************/

int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
84
				     struct aml_dma_request **r,
85 86 87 88 89 90
				     int type, va_list ap)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;
91 92
	struct aml_dma_request_linux_seq *ret;
	struct aml_dma_linux_seq_request_data *req;
93

94
	pthread_mutex_lock(&dma->data.lock);
95
	req = aml_vector_add(dma->data.requests);
96

97
	/* init the request */
98 99 100 101 102
	if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
		struct aml_layout *dl, *sl;

		dl = va_arg(ap, struct aml_layout *);
		sl = va_arg(ap, struct aml_layout *);
103
		aml_dma_linux_seq_request_data_init(req,
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
						    AML_DMA_REQUEST_TYPE_LAYOUT,
						    dl, sl);
	} else if (type == AML_DMA_REQUEST_TYPE_PTR) {
		struct aml_layout *dl, *sl;
		void *dp, *sp;
		size_t sz;

		dp = va_arg(ap, void *);
		sp = va_arg(ap, void *);
		sz = va_arg(ap, size_t);
		/* simple 1D layout, none of the parameters really matter, as
		 * long as the copy generates a single memcpy.
		 */
		aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1,
					&sz, NULL, NULL);
		aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
					&sz, NULL, NULL);
121
		aml_dma_linux_seq_request_data_init(req,
122 123
						    AML_DMA_REQUEST_TYPE_PTR,
						    dl, sl);
124
	}
125 126 127 128 129
	int uuid = aml_vector_getid(dma->data.requests, req);

	assert(uuid != AML_DMA_REQUEST_TYPE_INVALID);
	aml_dma_request_linux_seq_create(&ret, uuid);
	*r = (struct aml_dma_request *)ret;
130
	pthread_mutex_unlock(&dma->data.lock);
131 132 133 134 135 136 137 138 139 140 141
	return 0;
}

int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
				      struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;

142 143
	struct aml_dma_request_linux_seq *req =
		(struct aml_dma_request_linux_seq *)r;
144
	struct aml_dma_linux_seq_request_data *inner_req;
145

146 147 148 149 150 151 152 153
	inner_req = aml_vector_get(dma->data.requests, req->uuid);
	if (inner_req == NULL)
		return -AML_EINVAL;

	pthread_mutex_lock(&dma->data.lock);
	if (inner_req->type == AML_DMA_REQUEST_TYPE_PTR) {
		aml_layout_dense_destroy(&inner_req->dest);
		aml_layout_dense_destroy(&inner_req->src);
154
	}
155

156
	/* enough to remove from request vector */
157
	aml_vector_remove(dma->data.requests, inner_req);
158
	pthread_mutex_unlock(&dma->data.lock);
159
	aml_dma_request_linux_seq_destroy(&req);
160 161 162 163 164 165 166 167
	return 0;
}

int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
				   struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
168 169 170
	struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
	struct aml_dma_request_linux_seq *req =
		(struct aml_dma_request_linux_seq *)r;
171 172 173 174 175
	struct aml_dma_linux_seq_request_data *inner_req;

	inner_req = aml_vector_get(dma->data.requests, req->uuid);
	if (inner_req == NULL)
		return -AML_EINVAL;
176 177

	/* execute */
178 179
	if (inner_req->type != AML_DMA_REQUEST_TYPE_INVALID)
		dma->ops.do_copy(&dma->data, inner_req);
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195

	/* destroy a completed request */
	aml_dma_linux_seq_destroy_request(d, r);
	return 0;
}

struct aml_dma_ops aml_dma_linux_seq_ops = {
	aml_dma_linux_seq_create_request,
	aml_dma_linux_seq_destroy_request,
	aml_dma_linux_seq_wait_request,
};

/*******************************************************************************
 * Init functions:
 ******************************************************************************/

196
int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs)
197 198
{
	struct aml_dma *ret = NULL;
199
	struct aml_dma_linux_seq *d;
200

201
	if (dma == NULL)
202
		return -AML_EINVAL;
203

204 205
	*dma = NULL;

206
	ret = AML_INNER_MALLOC_2(struct aml_dma, struct aml_dma_linux_seq);
207
	if (ret == NULL)
208
		return -AML_ENOMEM;
209

210 211
	ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_dma,
					     struct aml_dma_linux_seq);
212 213
	ret->ops = &aml_dma_linux_seq_ops;
	d = (struct aml_dma_linux_seq *)ret->data;
214

215 216
	d->ops = aml_dma_linux_seq_inner_ops;
	aml_vector_create(&d->data.requests, nbreqs,
217 218
			  sizeof(struct aml_dma_linux_seq_request_data),
			  offsetof(struct aml_dma_linux_seq_request_data, type),
219 220
			  AML_DMA_REQUEST_TYPE_INVALID);
	pthread_mutex_init(&d->data.lock, NULL);
221

222
	*dma = ret;
223 224 225
	return 0;
}

226
void aml_dma_linux_seq_destroy(struct aml_dma **dma)
227
{
228 229
	struct aml_dma *d;
	struct aml_dma_linux_seq *l;
230

231 232 233
	if (dma == NULL)
		return;
	d = *dma;
234
	if (d == NULL)
235
		return;
236 237

	assert(d->data != NULL);
238 239 240 241 242
	l = (struct aml_dma_linux_seq *)d->data;
	aml_vector_destroy(&l->data.requests);
	pthread_mutex_destroy(&l->data.lock);
	free(d);
	*dma = NULL;
243
}