Commit 4bbc38cf authored by Swann Perarnau's avatar Swann Perarnau

[ci/style] activate stylecheck on unit tests

Quite a few changes needed.
parent 3cea828c
Pipeline #8506 passed with stages
in 16 minutes and 18 seconds
......@@ -25,7 +25,7 @@ checkpatch:
tags:
- integration
script:
- git ls-files *.c *.h | grep -v -e tests -e benchmarks >> .checkpatch.conf
- git ls-files *.c *.h | grep -v -e benchmarks >> .checkpatch.conf
- nix run -f "$ARGOPKGS" checkpatch --command checkpatch.pl
make:generic:
......
......@@ -15,7 +15,8 @@
#endif
#include <assert.h>
void test_map(const struct aml_area *area){
void test_map(const struct aml_area *area)
{
assert(area != NULL);
assert(area->ops->mmap != NULL);
assert(area->ops->munmap != NULL);
......@@ -24,7 +25,7 @@ void test_map(const struct aml_area *area){
size_t s;
const size_t sizes[4] = {1, 32, 4096, 1<<20};
for(s = 0; s<sizeof(sizes)/sizeof(*sizes); s++){
for (s = 0; s < sizeof(sizes)/sizeof(*sizes); s++) {
aml_errno = AML_SUCCESS;
ptr = aml_area_mmap(area, sizes[s], NULL);
assert(aml_errno == AML_SUCCESS);
......@@ -33,14 +34,16 @@ void test_map(const struct aml_area *area){
}
}
void test_aml_area(struct aml_area *area){
void test_aml_area(struct aml_area *area)
{
test_map(area);
}
int main(int argc, char** argv){
int main(int argc, char **argv)
{
aml_init(&argc, &argv);
test_map(&aml_area_linux);
#if RUN_CUDA
#if RUN_CUDA
test_map(&aml_area_cuda);
#endif
aml_finalize();
......
......@@ -6,7 +6,7 @@
* For more info, see https://xgitlab.cels.anl.gov/argo/aml
*
* SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/
*******************************************************************************/
#include "aml.h"
#include "config.h"
#include "aml/area/cuda.h"
......@@ -27,28 +27,31 @@ void test_device_mmap(const int device)
int err;
size_t size;
assert( aml_area_cuda_create(&area, device, AML_AREA_CUDA_FLAG_DEFAULT) == AML_SUCCESS );
assert(!aml_area_cuda_create(&area, device,
AML_AREA_CUDA_FLAG_DEFAULT));
host_data = malloc(sizes[ns-1]);
assert(host_data != NULL);
host_copy = malloc(sizes[ns-1]);
host_copy = malloc(sizes[ns-1]);
assert(host_copy != NULL);
for(size_t i=0; i<ns; i++){
for (size_t i = 0; i < ns; i++) {
size = sizes[i];
memset(host_data, 0, size);
memset(host_copy, 1, size);
device_data = aml_area_mmap(area, size, NULL);
assert(device_data);
err = cudaMemcpy(device_data, host_data, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(device_data, host_data, size,
cudaMemcpyHostToDevice);
assert(err == cudaSuccess);
err = cudaMemcpy(host_copy, device_data, size, cudaMemcpyDeviceToHost);
err = cudaMemcpy(host_copy, device_data, size,
cudaMemcpyDeviceToHost);
assert(err == cudaSuccess);
assert(!memcmp(host_data, host_copy, size));
assert(aml_area_munmap(area, device_data, size) == AML_SUCCESS);
assert(!memcmp(host_data, host_copy, size));
assert(aml_area_munmap(area, device_data, size) == AML_SUCCESS);
}
free(host_data);
free(host_copy);
aml_area_cuda_destroy(&area);
......@@ -62,20 +65,21 @@ void test_host_mmap(const int device)
int ns = sizeof(sizes) / sizeof(*sizes);
size_t size;
assert( aml_area_cuda_create(&area, device, AML_AREA_CUDA_FLAG_ALLOC_HOST) == AML_SUCCESS );
host_copy = malloc(sizes[ns-1]);
assert(!aml_area_cuda_create(&area, device,
AML_AREA_CUDA_FLAG_ALLOC_HOST));
host_copy = malloc(sizes[ns-1]);
assert(host_copy != NULL);
for(size_t i=0; i<ns; i++){
for (size_t i = 0; i < ns; i++) {
size = sizes[i];
memset(host_copy, 1, size);
memset(host_copy, 1, size);
host_data = aml_area_mmap(area, size, NULL);
assert(host_data);
memcpy(host_data, host_copy, size);
assert(!memcmp(host_data, host_copy, size));
assert(aml_area_munmap(area, host_data, size) == AML_SUCCESS);
assert(aml_area_munmap(area, host_data, size) == AML_SUCCESS);
}
free(host_copy);
aml_area_cuda_destroy(&area);
}
......@@ -88,62 +92,70 @@ void test_mapped_mmap(const int device)
struct aml_area *area;
int ns = sizeof(sizes) / sizeof(*sizes);
size_t size;
struct aml_area_cuda_mmap_options options = { .device = device, .ptr = NULL, };
struct aml_area_cuda_mmap_options options = {.device = device,
.ptr = NULL, };
// Data initialization
host_copy = malloc(sizes[ns-1]);
host_copy = malloc(sizes[ns-1]);
assert(host_copy != NULL);
// Map existing host data.
host_data = malloc(sizes[ns-1]);
assert(host_data != NULL);
assert( aml_area_cuda_create(&area, device, AML_AREA_CUDA_FLAG_ALLOC_MAPPED) == AML_SUCCESS );
options.ptr = host_data;
for(size_t i=0; i<ns; i++){
assert(!aml_area_cuda_create(&area, device,
AML_AREA_CUDA_FLAG_ALLOC_MAPPED));
options.ptr = host_data;
for (size_t i = 0; i < ns; i++) {
size = sizes[i];
assert(aml_area_mmap(area, size, (struct aml_area_mmap_options *)&options) != NULL);
assert(cudaHostGetDevicePointer(&device_data, host_data, 0) == cudaSuccess);
assert(!aml_area_mmap(area, size,
(struct aml_area_mmap_options *)&options));
assert(!cudaHostGetDevicePointer(&device_data, host_data, 0));
assert(device_data);
memset(host_data, 0, size);
memset(host_copy, 1, size);
assert(cudaMemcpy(host_copy, device_data, size, cudaMemcpyDeviceToHost) == cudaSuccess);
memset(host_copy, 1, size);
assert(!cudaMemcpy(host_copy, device_data, size,
cudaMemcpyDeviceToHost));
assert(!memcmp(host_data, host_copy, size));
memset(host_data, 0, size);
memset(host_copy, 1, size);
assert(cudaMemcpy(host_copy, device_data, size, cudaMemcpyHostToDevice) == cudaSuccess);
memset(host_copy, 1, size);
assert(!cudaMemcpy(host_copy, device_data, size,
cudaMemcpyHostToDevice));
assert(!memcmp(host_data, host_copy, size));
assert(aml_area_munmap(area, host_data, size) == AML_SUCCESS);
assert(!aml_area_munmap(area, host_data, size));
}
free(host_data);
aml_area_cuda_destroy(&area);
// Map new host data.
assert( aml_area_cuda_create(&area,
assert(!aml_area_cuda_create(&area,
device,
AML_AREA_CUDA_FLAG_ALLOC_MAPPED |
AML_AREA_CUDA_FLAG_ALLOC_HOST) == AML_SUCCESS );
AML_AREA_CUDA_FLAG_ALLOC_HOST));
options.ptr = NULL;
for(size_t i=0; i<ns; i++){
for (size_t i = 0; i < ns; i++) {
size = sizes[i];
host_data = aml_area_mmap(area, size, (struct aml_area_mmap_options *)&options);
host_data = aml_area_mmap(area, size,
(struct aml_area_mmap_options *)&options);
assert(host_data != NULL);
assert(cudaHostGetDevicePointer(&device_data, host_data, 0) == cudaSuccess);
assert(!cudaHostGetDevicePointer(&device_data, host_data, 0));
assert(device_data);
memset(host_data, 0, size);
memset(host_copy, 1, size);
assert(cudaMemcpy(host_copy, device_data, size, cudaMemcpyDeviceToHost) == cudaSuccess);
memset(host_copy, 1, size);
assert(!cudaMemcpy(host_copy, device_data, size,
cudaMemcpyDeviceToHost));
assert(!memcmp(host_data, host_copy, size));
memset(host_data, 0, size);
memset(host_copy, 1, size);
assert(cudaMemcpy(host_copy, device_data, size, cudaMemcpyHostToDevice) == cudaSuccess);
memset(host_copy, 1, size);
assert(!cudaMemcpy(host_copy, device_data, size,
cudaMemcpyHostToDevice));
assert(!memcmp(host_data, host_copy, size));
assert(aml_area_munmap(area, host_data, size) == AML_SUCCESS);
assert(!aml_area_munmap(area, host_data, size));
}
free(host_copy);
aml_area_cuda_destroy(&area);
......@@ -158,34 +170,38 @@ void test_unified_mmap(const int device)
size_t size;
// Data initialization
host_copy = malloc(sizes[ns-1]);
host_copy = malloc(sizes[ns-1]);
assert(host_copy != NULL);
// Map existing host data.
assert( aml_area_cuda_create(&area, device, AML_AREA_CUDA_FLAG_ALLOC_UNIFIED) == AML_SUCCESS );
for(size_t i=0; i<ns; i++){
assert(!aml_area_cuda_create(&area, device,
AML_AREA_CUDA_FLAG_ALLOC_UNIFIED));
for (size_t i = 0; i < ns; i++) {
size = sizes[i];
unified_data = aml_area_mmap(area, size, NULL);
unified_data = aml_area_mmap(area, size, NULL);
assert(unified_data != NULL);
memset(unified_data, 0, size);
memset(host_copy, 1, size);
cudaDeviceSynchronize();
assert(cudaMemcpy(host_copy, unified_data, size, cudaMemcpyDeviceToHost) == cudaSuccess);
assert(!cudaMemcpy(host_copy, unified_data, size,
cudaMemcpyDeviceToHost));
assert(!memcmp(unified_data, host_copy, size));
memset(unified_data, 0, size);
memset(host_copy, 1, size);
cudaDeviceSynchronize();
assert(cudaMemcpy(unified_data, host_copy, size, cudaMemcpyHostToDevice) == cudaSuccess);
assert(!cudaMemcpy(unified_data, host_copy, size,
cudaMemcpyHostToDevice));
assert(!memcmp(unified_data, host_copy, size));
assert(aml_area_munmap(area, unified_data, size) == AML_SUCCESS);
assert(!aml_area_munmap(area, unified_data, size));
}
free(host_copy);
}
int main(){
int main(void)
{
int num_devices;
int flags;
int has_device_map;
......@@ -195,25 +211,23 @@ int main(){
assert(cudaGetDeviceCount(&num_devices) == cudaSuccess);
assert(cudaGetDevice(&current_device) == cudaSuccess);
for(int i = 0; i<num_devices; i++){
for (int i = 0; i < num_devices; i++) {
// check device features
assert(cudaSetDevice(i) == cudaSuccess);
assert(cudaGetDeviceFlags(&flags) == cudaSuccess);
assert(!cudaSetDevice(i));
assert(!cudaGetDeviceFlags(&flags));
has_device_map = flags & cudaDeviceMapHost;
assert(cudaSetDevice(current_device) == cudaSuccess);
assert(cudaDeviceGetAttribute(&has_unified_mem,
cudaDevAttrManagedMemory,
i) == cudaSuccess);
assert(cudaDeviceGetAttribute(&has_register_ptr,
cudaDevAttrCanUseHostPointerForRegisteredMem,
i) == cudaSuccess);
assert(!cudaSetDevice(current_device));
assert(!cudaDeviceGetAttribute(&has_unified_mem,
cudaDevAttrManagedMemory, i));
assert(!cudaDeviceGetAttribute(&has_register_ptr,
cudaDevAttrCanUseHostPointerForRegisteredMem, i));
test_device_mmap(i);
test_host_mmap(i);
if ( has_device_map && has_register_ptr )
if (has_device_map && has_register_ptr)
test_mapped_mmap(i);
if ( has_unified_mem )
if (has_unified_mem)
test_unified_mmap(i);
}
......
......@@ -27,15 +27,15 @@ const size_t sizes[3] = { 1, 1 << 12, 1 << 20 };
int num_nodes;
void test_area(struct aml_area *area, struct aml_area_mmap_options *options)
{
{
void *ptr;
for (size_t s = 0; s < sizeof(sizes) / sizeof(*sizes); s++) {
ptr = aml_area_mmap(area, sizes[s], options);
assert(ptr != NULL);
memset(ptr, 1, sizes[s]);
assert(aml_area_munmap(area, ptr, sizes[s]) == AML_SUCCESS);
}
}
}
......@@ -44,29 +44,30 @@ void test_case(const struct aml_bitmap *nodemask,
const int flags)
{
struct aml_area_linux_mmap_options options = {
.ptr = NULL,
.ptr = NULL,
.flags = MAP_ANONYMOUS | flags,
.mode = PROT_READ | PROT_WRITE,
.fd = fd,
.offset = 0,
};
struct aml_area * area;
struct aml_area *area;
if( aml_bitmap_last(nodemask) >= num_nodes ) {
assert(aml_area_linux_create(&area, nodemask, policy) == -AML_EDOM);
if (aml_bitmap_last(nodemask) >= num_nodes) {
assert(aml_area_linux_create(&area, nodemask, policy)
== -AML_EDOM);
return;
}
assert( aml_area_linux_create(&area, nodemask, policy) == AML_SUCCESS );
assert(!aml_area_linux_create(&area, nodemask, policy));
// Map anonymous test.
test_area(area, (struct aml_area_mmap_options *)(&options));
// Map file test.
options.flags = flags;
test_area(area, (struct aml_area_mmap_options *)(&options));
aml_area_linux_destroy(&area);
}
......@@ -102,7 +103,7 @@ void test_multiple_nodes(void)
aml_bitmap_zero(&bitmap);
aml_bitmap_set(&bitmap, 0);
for (int i = 1; i <= num_nodes; i++) {
aml_bitmap_set(&bitmap, i);
test_flags(&bitmap);
......@@ -115,7 +116,7 @@ int main(void)
char tmp_name[] = "test_area_linux_XXXXXX";
size_t size = sizes[sizeof(sizes)/sizeof(*sizes) - 1];
ssize_t nw = 0;
char *buf;
char *buf;
buf = malloc(size);
assert(buf);
......@@ -129,6 +130,7 @@ int main(void)
free(buf);
struct bitmask *nodeset = numa_get_mems_allowed();
num_nodes = numa_bitmask_weight(nodeset);
test_single_node();
......
......@@ -65,11 +65,13 @@ int main(int argc, char *argv[])
assert(!aml_dma_linux_par_create(&dma, 1, NULL, NULL));
struct aml_dma_request *requests[16];
struct aml_layout *layouts[16][2];
for (int i = 0; i < 16; i++) {
size_t sz = isz/16;
size_t off = i*sz;
void *dptr = (void *)&(idest[off]);
void *sptr = (void *)&(isrc[off]);
aml_layout_dense_create(&layouts[i][0], dptr, 0, sizeof(int),
1, &sz, NULL, NULL);
aml_layout_dense_create(&layouts[i][1], sptr, 0, sizeof(int),
......@@ -78,7 +80,7 @@ int main(int argc, char *argv[])
layouts[i][0], layouts[i][1]));
assert(requests[i] != NULL);
}
for(int i = 0; i < 16; i++) {
for (int i = 0; i < 16; i++) {
assert(!aml_dma_wait(dma, &requests[i]));
aml_layout_dense_destroy(&layouts[i][0]);
aml_layout_dense_destroy(&layouts[i][1]);
......
......@@ -64,11 +64,13 @@ int main(int argc, char *argv[])
assert(!aml_dma_linux_seq_create(&dma, 1, NULL, NULL));
struct aml_dma_request *requests[16];
struct aml_layout *layouts[16][2];
for (int i = 0; i < 16; i++) {
size_t sz = isz/16;
size_t off = i*sz;
void *dptr = (void *)&(idest[off]);
void *sptr = (void *)&(isrc[off]);
aml_layout_dense_create(&layouts[i][0], dptr, 0, sizeof(int),
1, &sz, NULL, NULL);
aml_layout_dense_create(&layouts[i][1], sptr, 0, sizeof(int),
......@@ -77,7 +79,7 @@ int main(int argc, char *argv[])
layouts[i][0], layouts[i][1]));
assert(requests[i] != NULL);
}
for(int i = 0; i < 16; i++) {
for (int i = 0; i < 16; i++) {
assert(!aml_dma_wait(dma, &requests[i]));
aml_layout_dense_destroy(&layouts[i][0]);
aml_layout_dense_destroy(&layouts[i][1]);
......
This diff is collapsed.
......@@ -35,6 +35,7 @@ int main(int argc, char *argv[])
/* initialize all the supporting struct */
size_t size = TILESIZE*_SC_PAGE_SIZE*NBTILES;
size_t tsize = TILESIZE*_SC_PAGE_SIZE;
src = aml_area_mmap(&aml_area_linux, size, NULL);
dst = aml_area_mmap(&aml_area_linux, size, NULL);
assert(src != NULL && dst != NULL);
......@@ -52,11 +53,13 @@ int main(int argc, char *argv[])
AML_TILING_ORDER_COLUMN_MAJOR,
scratch_layout, 1, &tsize));
size_t maxrequests = NBTILES;
assert(!aml_dma_linux_par_create(&dma, maxrequests, NULL, NULL));
/* setup some initial values in the memory */
for (size_t i = 0; i < NBTILES; i++) {
char *s, *d;
s = &((char *)src)[i * tsize];
d = &((char *)dst)[i * tsize];
memset((void *)s, (char)i, TILESIZE*_SC_PAGE_SIZE);
......@@ -67,7 +70,7 @@ int main(int argc, char *argv[])
assert(!aml_scratch_par_create(&scratch, dma, src_tiling,
scratch_tiling, maxrequests));
/* move some stuff */
for(size_t i = 0; i < NBTILES; i++) {
for (size_t i = 0; i < NBTILES; i++) {
int di, si;
void *dp, *sp;
struct aml_layout *sl, *dl;
......
......@@ -35,6 +35,7 @@ int main(int argc, char *argv[])
/* initialize all the supporting struct */
size_t size = TILESIZE*_SC_PAGE_SIZE*NBTILES;
size_t tsize = TILESIZE*_SC_PAGE_SIZE;
src = aml_area_mmap(&aml_area_linux, size, NULL);
dst = aml_area_mmap(&aml_area_linux, size, NULL);
assert(src != NULL && dst != NULL);
......@@ -52,11 +53,13 @@ int main(int argc, char *argv[])
AML_TILING_ORDER_COLUMN_MAJOR,
scratch_layout, 1, &tsize));
size_t maxrequests = NBTILES;
assert(!aml_dma_linux_par_create(&dma, maxrequests, NULL, NULL));
/* setup some initial values in the memory */
for (size_t i = 0; i < NBTILES; i++) {
char *s, *d;
s = &((char *)src)[i * tsize];
d = &((char *)dst)[i * tsize];
memset((void *)s, (char)i, TILESIZE*_SC_PAGE_SIZE);
......@@ -67,7 +70,7 @@ int main(int argc, char *argv[])
assert(!aml_scratch_seq_create(&scratch, dma, src_tiling,
scratch_tiling, maxrequests));
/* move some stuff */
for(size_t i = 0; i < NBTILES; i++) {
for (size_t i = 0; i < NBTILES; i++) {
int di, si;
void *dp, *sp;
struct aml_layout *sl, *dl;
......
This diff is collapsed.
......@@ -14,8 +14,10 @@
int main(void)
{
intptr_t *ptr = AML_INNER_MALLOC_2(void *, void *);
assert(ptr != NULL);
void *b = AML_INNER_MALLOC_NEXTPTR(ptr, void *, void *);
assert(b == &ptr[1]);
free(ptr);
return 0;
......
......@@ -23,6 +23,7 @@ int main(void)
unsigned long unused;
int key;
};
assert(!aml_vector_create(&v, 1, sizeof(struct test),
offsetof(struct test, key), -1));
......@@ -31,6 +32,7 @@ int main(void)
/* add an element and look for some */
struct test *e = aml_vector_get(v, 0);
assert(e != NULL);
e->unused = 42;
e->key = 24;
......@@ -39,6 +41,7 @@ int main(void)
/* add a second element, trigger a resize, and check it */
struct test *f = aml_vector_add(v);
assert(f != NULL && f->key == -1);
assert(aml_vector_find(v, 42) == -1);
assert(aml_vector_find(v, -1) == 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment