dma_linux_seq.c 5.43 KB
Newer Older
Swann Perarnau's avatar
Swann Perarnau committed
1
2
3
4
5
6
7
8
9
10
/*******************************************************************************
 * Copyright 2019 UChicago Argonne, LLC.
 * (c.f. AUTHORS, LICENSE)
 *
 * This file is part of the AML project.
 * For more info, see https://xgitlab.cels.anl.gov/argo/aml
 *
 * SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/

11
#include "aml.h"
12
#include "aml/dma/linux-seq.h"
13
#include "aml/layout/dense.h"
14

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>

/*******************************************************************************
 * Linux-backed, sequential dma
 * The dma itself is organized into several different components
 * - request types: copy or move
 * - implementation of the request
 * - user API (i.e. generic request creation and call)
 * - how to init the dma
 ******************************************************************************/

/*******************************************************************************
 * Requests:
 ******************************************************************************/

32
33
34
35
int aml_dma_request_linux_seq_copy_init(struct aml_dma_request_linux_seq *req,
					int type,
					struct aml_layout *dest,
					struct aml_layout *src)
36
{
37
	assert(req != NULL);
38
39
40
	req->type = type;
	req->dest = dest;
	req->src = src;
41
42
43
	return 0;
}

44
int aml_dma_request_linux_seq_copy_destroy(struct aml_dma_request_linux_seq *r)
45
{
46
47
	assert(r != NULL);
	return 0;
48
49
50
51
52
}

/*******************************************************************************
 * Internal functions
 ******************************************************************************/
53

54
int aml_dma_linux_seq_do_copy(struct aml_dma_linux_seq_data *dma,
55
			      struct aml_dma_request_linux_seq *req)
56
57
58
{
	assert(dma != NULL);
	assert(req != NULL);
59
	aml_copy_layout_generic(req->dest, req->src);
60
61
62
	return 0;
}

63
struct aml_dma_linux_seq_ops aml_dma_linux_seq_inner_ops = {
64
	aml_dma_linux_seq_do_copy,
65
66
67
68
69
70
71
};

/*******************************************************************************
 * Public API
 ******************************************************************************/

int aml_dma_linux_seq_create_request(struct aml_dma_data *d,
72
				     struct aml_dma_request **r,
73
74
75
76
77
78
				     int type, va_list ap)
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;
79
	struct aml_dma_request_linux_seq *req;
Swann Perarnau's avatar
Swann Perarnau committed
80
	int err = AML_SUCCESS;
81

82
	pthread_mutex_lock(&dma->data.lock);
83
	req = aml_vector_add(dma->data.requests);
84

85
	/* init the request */
86
87
88
89
90
	if (type == AML_DMA_REQUEST_TYPE_LAYOUT) {
		struct aml_layout *dl, *sl;

		dl = va_arg(ap, struct aml_layout *);
		sl = va_arg(ap, struct aml_layout *);
Swann Perarnau's avatar
Swann Perarnau committed
91
92
93
94
		if (dl == NULL || sl == NULL) {
			err = -AML_EINVAL;
			goto unlock;
		}
95
		aml_dma_request_linux_seq_copy_init(req,
96
97
						    AML_DMA_REQUEST_TYPE_LAYOUT,
						    dl, sl);
Swann Perarnau's avatar
Swann Perarnau committed
98
99
100
	} else
		err = -AML_EINVAL;
unlock:
101
	pthread_mutex_unlock(&dma->data.lock);
102
103
	if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
		*r = (struct aml_dma_request *)req;
Swann Perarnau's avatar
Swann Perarnau committed
104
	return err;
105
106
107
}

int aml_dma_linux_seq_destroy_request(struct aml_dma_data *d,
Swann Perarnau's avatar
Swann Perarnau committed
108
				      struct aml_dma_request **r)
109
110
111
112
113
{
	assert(d != NULL);
	assert(r != NULL);
	struct aml_dma_linux_seq *dma =
		(struct aml_dma_linux_seq *)d;
Swann Perarnau's avatar
Swann Perarnau committed
114
	struct aml_dma_request_linux_seq *req;
115

Swann Perarnau's avatar
Swann Perarnau committed
116
117
118
119
	if (*r == NULL)
		return -AML_EINVAL;
	req = (struct aml_dma_request_linux_seq *)*r;

120
	aml_dma_request_linux_seq_copy_destroy(req);
121
122
	pthread_mutex_lock(&dma->data.lock);
	aml_vector_remove(dma->data.requests, req);
123
	pthread_mutex_unlock(&dma->data.lock);
Swann Perarnau's avatar
Swann Perarnau committed
124
	*r = NULL;
125
126
127
128
	return 0;
}

int aml_dma_linux_seq_wait_request(struct aml_dma_data *d,
Swann Perarnau's avatar
Swann Perarnau committed
129
				   struct aml_dma_request **r)
130
131
132
{
	assert(d != NULL);
	assert(r != NULL);
133
	struct aml_dma_linux_seq *dma = (struct aml_dma_linux_seq *)d;
Swann Perarnau's avatar
Swann Perarnau committed
134
	struct aml_dma_request_linux_seq *req;
135

Swann Perarnau's avatar
Swann Perarnau committed
136
137
138
139
	if (*r == NULL)
		return -AML_EINVAL;
	req = (struct aml_dma_request_linux_seq *)*r;

140
	/* execute */
141
142
	if (req->type != AML_DMA_REQUEST_TYPE_INVALID)
		dma->ops.do_copy(&dma->data, req);
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

	/* destroy a completed request */
	aml_dma_linux_seq_destroy_request(d, r);
	return 0;
}

struct aml_dma_ops aml_dma_linux_seq_ops = {
	aml_dma_linux_seq_create_request,
	aml_dma_linux_seq_destroy_request,
	aml_dma_linux_seq_wait_request,
};

/*******************************************************************************
 * Init functions:
 ******************************************************************************/

159
int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs)
160
161
{
	struct aml_dma *ret = NULL;
162
	struct aml_dma_linux_seq *d;
163

164
	if (dma == NULL)
165
		return -AML_EINVAL;
166

167
168
	*dma = NULL;

169
	ret = AML_INNER_MALLOC_2(struct aml_dma, struct aml_dma_linux_seq);
170
	if (ret == NULL)
171
		return -AML_ENOMEM;
172

173
174
	ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_dma,
					     struct aml_dma_linux_seq);
175
176
	ret->ops = &aml_dma_linux_seq_ops;
	d = (struct aml_dma_linux_seq *)ret->data;
177

178
179
	d->ops = aml_dma_linux_seq_inner_ops;
	aml_vector_create(&d->data.requests, nbreqs,
180
181
			  sizeof(struct aml_dma_request_linux_seq),
			  offsetof(struct aml_dma_request_linux_seq, type),
182
183
			  AML_DMA_REQUEST_TYPE_INVALID);
	pthread_mutex_init(&d->data.lock, NULL);
184

185
	*dma = ret;
186
187
188
	return 0;
}

189
void aml_dma_linux_seq_destroy(struct aml_dma **dma)
190
{
191
192
	struct aml_dma *d;
	struct aml_dma_linux_seq *l;
193

194
195
196
	if (dma == NULL)
		return;
	d = *dma;
197
	if (d == NULL)
198
		return;
199
200

	assert(d->data != NULL);
201
202
203
204
205
	l = (struct aml_dma_linux_seq *)d->data;
	aml_vector_destroy(&l->data.requests);
	pthread_mutex_destroy(&l->data.lock);
	free(d);
	*dma = NULL;
206
}