dma_linux_seq.c 5.99 KB
Newer Older
Swann Perarnau's avatar
Swann Perarnau committed
1 2 3 4 5 6 7 8 9 10
/*******************************************************************************
 * Copyright 2019 UChicago Argonne, LLC.
 * (c.f. AUTHORS, LICENSE)
 *
 * This file is part of the AML project.
 * For more info, see https://xgitlab.cels.anl.gov/argo/aml
 *
 * SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/

11
#include "aml.h"
12
#include "aml/dma/linux-seq.h"
13
#include "aml/layout/dense.h"
14

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>

/*******************************************************************************
 * Linux-backed, sequential dma
 * The dma itself is organized into several different components
 * - request types: copy or move
 * - implementation of the request
 * - user API (i.e. generic request creation and call)
 * - how to init the dma
 ******************************************************************************/

/*******************************************************************************
 * Requests:
 ******************************************************************************/

32
int aml_dma_request_linux_seq_copy_init(struct aml_dma_request_linux_seq *req,
33 34 35
					int type,
					struct aml_layout *dest,
					struct aml_layout *src)
36
{
37
	assert(req != NULL);
38
	req->type = type;
39
	/* figure out pointers */
40 41
	req->dest = dest;
	req->src = src;
42 43 44
	return 0;
}

45
int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r)
46 47 48 49 50 51 52 53
{
	assert(r != NULL);
	return 0;
}

/*******************************************************************************
 * Internal functions
 ******************************************************************************/
54

55 56 57 58 59
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
			      struct aml_dma_request_linux_seq *req)
{
	assert(dma != NULL);
	assert(req != NULL);
60
	aml_copy_layout_generic(req->dest, req->src);
61 62 63
	return 0;
}

64
struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
65
	aml_dma_linux_seq_do_copy,
66 67 68 69 70 71 72
};

/*******************************************************************************
 * Public API
 ******************************************************************************/

int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
73
				     struct aml_dma_request **r,
74 75 76 77 78 79 80
				     int type, va_list ap)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;

81
	struct aml_dma_request_linux_seq *req;
82

83
	pthread_mutex_lock(&dma->data.lock);
84
	req = aml_vector_add(dma->data.requests);
85

86
	/* init the request */
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
		struct aml_layout *dl, *sl;

		dl = va_arg(ap, struct aml_layout *);
		sl = va_arg(ap, struct aml_layout *);
		aml_dma_request_linux_seq_copy_init(req,
						    AML_DMA_REQUEST_TYPE_LAYOUT,
						    dl, sl);
	} else if (type == AML_DMA_REQUEST_TYPE_PTR) {
		struct aml_layout *dl, *sl;
		void *dp, *sp;
		size_t sz;

		dp = va_arg(ap, void *);
		sp = va_arg(ap, void *);
		sz = va_arg(ap, size_t);
		/* simple 1D layout, none of the parameters really matter, as
		 * long as the copy generates a single memcpy.
		 */
		aml_layout_dense_create(&dl, dp, 0, sizeof(size_t), 1,
					&sz, NULL, NULL);
		aml_layout_dense_create(&sl, sp, 0, sizeof(size_t), 1,
					&sz, NULL, NULL);
		aml_dma_request_linux_seq_copy_init(req,
						    AML_DMA_REQUEST_TYPE_PTR,
						    dl, sl);
113
	}
114
	pthread_mutex_unlock(&dma->data.lock);
115
	*r = (struct aml_dma_request *)req;
116 117 118 119 120 121 122 123 124 125 126
	return 0;
}

int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
				      struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;

127 128
	struct aml_dma_request_linux_seq *req =
		(struct aml_dma_request_linux_seq *)r;
129

130
	if (req->type == AML_DMA_REQUEST_TYPE_LAYOUT)
131
		aml_dma_request_linux_seq_copy_destroy(req);
132 133 134 135 136
	else if (req->type == AML_DMA_REQUEST_TYPE_PTR) {
		aml_layout_dense_destroy(&req->dest);
		aml_layout_dense_destroy(&req->src);
		aml_dma_request_linux_seq_copy_destroy(req);
	}
137

138
	/* enough to remove from request vector */
139
	pthread_mutex_lock(&dma->data.lock);
140
	aml_vector_remove(dma->data.requests, req);
141
	pthread_mutex_unlock(&dma->data.lock);
142 143 144 145 146 147 148 149
	return 0;
}

int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
				   struct aml_dma_request *r)
{
	assert(d != NULL);
	assert(r != NULL);
150 151 152
	struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
	struct aml_dma_request_linux_seq *req =
		(struct aml_dma_request_linux_seq *)r;
153 154

	/* execute */
155
	if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
156
		dma->ops.do_copy(&dma->data, req);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

	/* destroy a completed request */
	aml_dma_linux_seq_destroy_request(d, r);
	return 0;
}

struct aml_dma_ops aml_dma_linux_seq_ops = {
	aml_dma_linux_seq_create_request,
	aml_dma_linux_seq_destroy_request,
	aml_dma_linux_seq_wait_request,
};

/*******************************************************************************
 * Init functions:
 ******************************************************************************/

173
int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs)
174 175
{
	struct aml_dma *ret = NULL;
176
	struct aml_dma_linux_seq *d;
177

178
	if (dma == NULL)
179
		return -AML_EINVAL;
180

181 182
	*dma = NULL;

183
	ret = AML_INNER_MALLOC_2(struct aml_dma, struct aml_dma_linux_seq);
184
	if (ret == NULL)
185
		return -AML_ENOMEM;
186

187 188
	ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_dma,
					     struct aml_dma_linux_seq);
189 190
	ret->ops = &aml_dma_linux_seq_ops;
	d = (struct aml_dma_linux_seq *)ret->data;
191

192 193 194 195 196 197
	d->ops = aml_dma_linux_seq_inner_ops;
	aml_vector_create(&d->data.requests, nbreqs,
			  sizeof(struct aml_dma_request_linux_seq),
			  offsetof(struct aml_dma_request_linux_seq, type),
			  AML_DMA_REQUEST_TYPE_INVALID);
	pthread_mutex_init(&d->data.lock, NULL);
198

199
	*dma = ret;
200 201 202
	return 0;
}

203
void aml_dma_linux_seq_destroy(struct aml_dma **dma)
204
{
205 206
	struct aml_dma *d;
	struct aml_dma_linux_seq *l;
207

208 209 210
	if (dma == NULL)
		return;
	d = *dma;
211
	if (d == NULL)
212
		return;
213 214

	assert(d->data != NULL);
215 216 217 218 219
	l = (struct aml_dma_linux_seq *)d->data;
	aml_vector_destroy(&l->data.requests);
	pthread_mutex_destroy(&l->data.lock);
	free(d);
	*dma = NULL;
220
}