Commit e5d7b7cb authored by Swann Perarnau's avatar Swann Perarnau

Merge branch 'better-inner-malloc' into 'master'

Add inner-malloc utils

Closes #45

See merge request !69
parents d69071fc 82e6a73e
Pipeline #8188 passed with stages
in 27 minutes and 28 seconds
......@@ -28,7 +28,7 @@ make:generic:
- /^wip.*/
- /^WIP.*/
variables:
CFLAGS: "-std=c99 -pedantic -Wall -Wextra -Werror -Wno-pointer-arith -Wno-unused-but-set-parameter"
CFLAGS: "-std=c99 -pedantic -Wall -Wextra -Werror -Wno-unused-but-set-parameter"
script:
- ./autogen.sh
- mkdir build
......@@ -48,7 +48,7 @@ make:out-of-tree:
- /^wip.*/
- /^WIP.*/
variables:
CFLAGS: "-std=c99 -pedantic -Wall -Wextra -Werror -Wno-pointer-arith -Wno-unused-but-set-parameter"
CFLAGS: "-std=c99 -pedantic -Wall -Wextra -Werror -Wno-unused-but-set-parameter"
script:
- ./autogen.sh
- mkdir out
......
......@@ -28,8 +28,9 @@ include_aml_tiling_HEADERS = \
include_amlutilsdir=$(includedir)/aml/utils
include_amlutils_HEADERS = \
aml/utils/vector.h \
aml/utils/bitmap.h \
aml/utils/version.h \
aml/utils/error.h
aml/utils/error.h \
aml/utils/inner-malloc.h \
aml/utils/vector.h \
aml/utils/version.h
......@@ -35,6 +35,7 @@
#include "aml/utils/bitmap.h"
#include "aml/utils/error.h"
#include "aml/utils/inner-malloc.h"
#include "aml/utils/vector.h"
#include "aml/utils/version.h"
......
/*******************************************************************************
* Copyright 2019 UChicago Argonne, LLC.
* (c.f. AUTHORS, LICENSE)
*
* This file is part of the AML project.
* For more info, see https://xgitlab.cels.anl.gov/argo/aml
*
* SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/
#ifndef AML_INNER_MALLOC_H
#define AML_INNER_MALLOC_H
/**
* @defgroup aml_inner_malloc "AML Internal Allocation Management"
* @brief AML helper functions to handle inner allocations
* @{
*
* Set of macros to create properly sized allocations of our internal complex
* objects. In particular, help with the generic handle and implementation
* specific data allocation as a single allocation, with all pointer properly
* aligned.
*
* This code is all macros to handle the type specific logic we need.
**/
/** Returns the allocation size required to handle two objects side-by-side.
*
* Use an anonymous struct to ask the compiler what size an allocation should be
* so that the second object is properly aligned too.
*/
#define AML_SIZEOF_ALIGNED(a, b) \
(sizeof(struct { a __e1; b __e2; }))
/** Returns the offset of the second object when allocated side-by-side.
*
* Use the same anonymous struct trick to figure out what offset the pointer is
* at.
*/
#define AML_OFFSETOF_ALIGNED(a, b) \
(offsetof(struct { a __e1; b __e2; }, __e2))
/** Allocate a pointer that can be used to contain two types.
*
**/
#define AML_INNER_MALLOC_2(a, b) calloc(1, AML_SIZEOF_ALIGNED(a, b))
/** Allocate a pointer that can be used to contain two types plus an extra area
* aligned on a third type.
*
**/
#define AML_INNER_MALLOC_EXTRA(a, b, c, sz) \
calloc(1, AML_SIZEOF_ALIGNED(struct { a __f1; b __f2; }, c) + \
(sizeof(c)*sz))
/** Returns the next pointer after an AML_INNER_MALLOC.
*
* Can be used to iterate over the pointers we need, using the last two types as
* parameters.
**/
#define AML_INNER_MALLOC_NEXTPTR(ptr, a, b) \
(void *)(((intptr_t) ptr) + AML_OFFSETOF_ALIGNED(a, b))
/** Returns a pointer inside the extra zone after an AML_INNER_MALLOC_EXTRA.
*
* Can be used to iterate over the pointers we need.
**/
#define AML_INNER_MALLOC_EXTRA_NEXTPTR(ptr, a, b, c, off) \
(void *)(((intptr_t) ptr) + \
AML_OFFSETOF_ALIGNED(struct { a __f1; b __f2; }, c) + \
((off)*sizeof(c)))
/**
* @}
**/
#endif //AML_INNER_MALLOC_H
......@@ -197,16 +197,13 @@ int aml_area_linux_create(struct aml_area **area, const int mmap_flags,
return -AML_EINVAL;
}
ret = calloc(1, sizeof(struct aml_area));
ret = AML_INNER_MALLOC_2(struct aml_area, struct aml_area_linux_data);
if (ret == NULL)
return -AML_ENOMEM;
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_area,
struct aml_area_linux_data);
ret->ops = &aml_area_linux_ops;
ret->data = calloc(1, sizeof(struct aml_area_linux_data));
if (ret->data == NULL) {
err = -AML_ENOMEM;
goto err_f_ret;
}
data = (struct aml_area_linux_data *)ret->data;
/* set area_data and area */
......@@ -217,7 +214,7 @@ int aml_area_linux_create(struct aml_area **area, const int mmap_flags,
data->nodeset = numa_get_mems_allowed();
if (data->nodeset == NULL) {
err = -AML_ENOMEM;
goto err_f_data;
goto err_f_ret;
}
/* check if the nodemask is compatible with the nodeset */
......@@ -240,8 +237,6 @@ int aml_area_linux_create(struct aml_area **area, const int mmap_flags,
return AML_SUCCESS;
err_f_node:
numa_free_nodemask(data->nodeset);
err_f_data:
free(ret->data);
err_f_ret:
free(ret);
return err;
......@@ -255,12 +250,13 @@ void aml_area_linux_destroy(struct aml_area **area)
if (area == NULL)
return;
a = *area;
if (a == NULL || a->data == NULL)
if (a == NULL)
return;
/* with our creators it should not happen */
assert(a->data != NULL);
data = (struct aml_area_linux_data *) a->data;
numa_free_nodemask(data->nodeset);
free(data);
free(a);
*area = NULL;
}
......
......@@ -206,17 +206,13 @@ int aml_dma_linux_par_create(struct aml_dma **dma, size_t nbreqs,
*dma = NULL;
/* alloc */
ret = calloc(1, sizeof(struct aml_dma));
ret = AML_INNER_MALLOC_2(struct aml_dma, struct aml_dma_linux_par);
if (ret == NULL)
return -AML_ENOMEM;
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_dma,
struct aml_dma_linux_par);
ret->ops = &aml_dma_linux_par_ops;
ret->data = calloc(1, sizeof(struct aml_dma_linux_par));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
d = (struct aml_dma_linux_par *)ret->data;
d->ops = aml_dma_linux_par_inner_ops;
......@@ -247,8 +243,10 @@ void aml_dma_linux_par_destroy(struct aml_dma **dma)
if (dma == NULL)
return;
d = *dma;
if (d == NULL || d->data == NULL)
if (d == NULL)
return;
assert(d->data != NULL);
l = (struct aml_dma_linux_par *)d->data;
for (size_t i = 0; i < aml_vector_size(l->data.requests); i++) {
struct aml_dma_request_linux_par *req =
......@@ -259,7 +257,6 @@ void aml_dma_linux_par_destroy(struct aml_dma **dma)
aml_vector_destroy(&l->data.requests);
pthread_mutex_destroy(&l->data.lock);
free(l);
free(d);
*dma = NULL;
}
......@@ -164,16 +164,12 @@ int aml_dma_linux_seq_create(struct aml_dma **dma, size_t nbreqs)
*dma = NULL;
/* alloc */
ret = calloc(1, sizeof(struct aml_dma));
ret = AML_INNER_MALLOC_2(struct aml_dma, struct aml_dma_linux_seq);
if (ret == NULL)
return -AML_ENOMEM;
ret->data = calloc(1, sizeof(struct aml_dma_linux_seq));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_dma,
struct aml_dma_linux_seq);
ret->ops = &aml_dma_linux_seq_ops;
d = (struct aml_dma_linux_seq *)ret->data;
......@@ -196,12 +192,13 @@ void aml_dma_linux_seq_destroy(struct aml_dma **dma)
if (dma == NULL)
return;
d = *dma;
if (d == NULL || d->data == NULL)
if (d == NULL)
return;
assert(d->data != NULL);
l = (struct aml_dma_linux_seq *)d->data;
aml_vector_destroy(&l->data.requests);
pthread_mutex_destroy(&l->data.lock);
free(l);
free(d);
*dma = NULL;
}
......@@ -12,41 +12,47 @@
#include "aml/layout/native.h"
#include "aml/layout/dense.h"
static struct aml_layout *aml_layout_dense_alloc(const size_t ndims)
static int aml_layout_dense_alloc(struct aml_layout **ret,
const size_t ndims)
{
struct aml_layout_dense *data;
struct aml_layout *layout;
char *l;
layout = malloc((sizeof(struct aml_layout) +
sizeof(struct aml_layout_dense) +
(ndims * 4) * sizeof(size_t)));
struct aml_layout_dense *data;
layout = AML_INNER_MALLOC_EXTRA(struct aml_layout,
struct aml_layout_dense,
size_t, 4*ndims);
if (layout == NULL) {
perror("malloc");
return NULL;
*ret = NULL;
return -AML_ENOMEM;
}
l = (char *) layout;
l += sizeof(*layout);
data = (struct aml_layout_dense *) l;
l += sizeof(*data);
data->dims = (size_t *) l;
l += sizeof(*data->dims) * ndims;
data->stride = (size_t *) l;
data = AML_INNER_MALLOC_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_dense);
layout->data = (struct aml_layout_data *) data;
data->dims = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_dense,
size_t, 0);
data->stride = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_dense,
size_t, ndims);
for (size_t i = 0; i < ndims; i++)
data->stride[i] = 1;
l += sizeof(*data->stride) * ndims;
data->pitch = (size_t *) l;
l += sizeof(*data->pitch) * ndims;
data->cpitch = (size_t *) l;
data->pitch = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_dense,
size_t, ndims*2);
data->cpitch = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_dense,
size_t, ndims*3);
data->ptr = NULL;
data->ndims = ndims;
layout->data = (struct aml_layout_data *) data;
return layout;
*ret = layout;
return AML_SUCCESS;
}
static
......@@ -79,16 +85,16 @@ int aml_layout_dense_create(struct aml_layout **layout,
struct aml_layout *l;
struct aml_layout_dense *data;
int err;
if (layout == NULL)
return -AML_EINVAL;
l = aml_layout_dense_alloc(ndims);
if (l == NULL)
return -AML_ENOMEM;
err = aml_layout_dense_alloc(&l, ndims);
if (err)
return err;
data = (struct aml_layout_dense *)l->data;
data->ptr = ptr;
data->cpitch[0] = element_size;
......@@ -305,9 +311,9 @@ int aml_layout_column_reshape(const struct aml_layout_data *data,
d = (const struct aml_layout_dense *)data;
layout = aml_layout_dense_alloc(ndims);
if (layout == NULL)
return -AML_ENOMEM;
err = aml_layout_dense_alloc(&layout, ndims);
if (err)
return err;
err = reshape_dims(d, ndims, dims, stride, cpitch);
if (err != AML_SUCCESS) {
......@@ -336,13 +342,14 @@ int aml_layout_column_slice(const struct aml_layout_data *data,
struct aml_layout *layout;
const struct aml_layout_dense *d;
void *ptr;
int err;
d = (const struct aml_layout_dense *)data;
ptr = aml_layout_column_deref(data, offsets);
layout = aml_layout_dense_alloc(d->ndims);
if (layout == NULL)
return -AML_ENOMEM;
err = aml_layout_dense_alloc(&layout, d->ndims);
if (err)
return err;
size_t cpitch[d->ndims + 1];
size_t new_strides[d->ndims];
......@@ -432,9 +439,9 @@ int aml_layout_row_reshape(const struct aml_layout_data *data,
int err;
d = (const struct aml_layout_dense *)data;
layout = aml_layout_dense_alloc(ndims);
if (layout == NULL)
return -AML_ENOMEM;
err = aml_layout_dense_alloc(&layout, ndims);
if (err)
return err;
for (size_t i = 0; i < ndims; i++)
n_dims[ndims - i - 1] = dims[i];
......@@ -466,6 +473,7 @@ int aml_layout_row_slice(const struct aml_layout_data *data,
struct aml_layout *layout;
const struct aml_layout_dense *d;
void *ptr;
int err;
d = (const struct aml_layout_dense *)data;
......@@ -474,9 +482,9 @@ int aml_layout_row_slice(const struct aml_layout_data *data,
size_t n_dims[d->ndims];
size_t n_strides[d->ndims];
layout = aml_layout_dense_alloc(d->ndims);
if (layout == NULL)
return -AML_ENOMEM;
err = aml_layout_dense_alloc(&layout, d->ndims);
if (err)
return err;
for (size_t i = 0; i < d->ndims; i++) {
n_offsets[i] = offsets[d->ndims - i - 1];
......@@ -513,15 +521,16 @@ int aml_layout_row_slice_native(const struct aml_layout_data *data,
struct aml_layout *layout;
const struct aml_layout_dense *d;
void *ptr;
int err;
d = (const struct aml_layout_dense *)data;
size_t cpitch[d->ndims + 1];
size_t new_strides[d->ndims];
layout = aml_layout_dense_alloc(d->ndims);
if (layout == NULL)
return -AML_ENOMEM;
err = aml_layout_dense_alloc(&layout, d->ndims);
if (err)
return err;
cpitch[d->ndims] = d->cpitch[d->ndims];
for (size_t i = 0; i < d->ndims; i++) {
......
......@@ -14,35 +14,43 @@
#include "aml/layout/dense.h"
#include "aml/layout/reshape.h"
static struct aml_layout *aml_layout_reshape_alloc(const size_t ndims,
const size_t target_ndims)
static int aml_layout_reshape_alloc(struct aml_layout **ret,
const size_t ndims,
const size_t target_ndims)
{
struct aml_layout *l;
struct aml_layout_data_reshape *r;
char *c;
l = malloc(sizeof(struct aml_layout) +
sizeof(struct aml_layout_data_reshape) +
2 * ndims * sizeof(size_t) +
target_ndims * sizeof(size_t));
if (l == NULL)
return NULL;
c = (char *)l;
c += sizeof(*l);
r = (struct aml_layout_data_reshape *)c;
c += sizeof(*r);
r->dims = (size_t *)c;
c += ndims * sizeof(*r->dims);
r->coffsets = (size_t *)c;
c += ndims * sizeof(*r->coffsets);
r->target_dims = (size_t *)c;
r->target = NULL;
r->target_ndims = target_ndims;
r->ndims = ndims;
l->data = (struct aml_layout_data *) r;
return l;
struct aml_layout *layout;
struct aml_layout_data_reshape *data;
layout = AML_INNER_MALLOC_EXTRA(struct aml_layout,
struct aml_layout_data_reshape,
size_t, (2*ndims)+target_ndims);
if (layout == NULL) {
*ret = NULL;
return -AML_ENOMEM;
}
data = AML_INNER_MALLOC_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_data_reshape);
layout->data = (struct aml_layout_data *)data;
data->dims = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_data_reshape,
size_t, 0);
data->coffsets = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_data_reshape,
size_t, ndims);
data->target_dims = AML_INNER_MALLOC_EXTRA_NEXTPTR(layout,
struct aml_layout,
struct aml_layout_data_reshape,
size_t, 2*ndims);
data->target = NULL;
data->target_ndims = target_ndims;
data->ndims = ndims;
*ret = layout;
return AML_SUCCESS;
}
int aml_layout_reshape_create(struct aml_layout **layout,
......@@ -56,14 +64,15 @@ int aml_layout_reshape_create(struct aml_layout **layout,
size_t target_ndims;
size_t prod;
size_t target_prod;
int err;
if (layout == NULL || target == NULL || ndims == 0)
return -AML_EINVAL;
target_ndims = aml_layout_ndims(target);
output = aml_layout_reshape_alloc(ndims, target_ndims);
if (output == NULL)
return -AML_ENOMEM;
err = aml_layout_reshape_alloc(&output, ndims, target_ndims);
if (err)
return err;
data = (struct aml_layout_data_reshape *)output->data;
data->target = target;
......
......@@ -253,17 +253,13 @@ int aml_scratch_par_create(struct aml_scratch **scratch,
*scratch = NULL;
/* alloc */
ret = calloc(1, sizeof(struct aml_scratch));
ret = AML_INNER_MALLOC_2(struct aml_scratch, struct aml_scratch_par);
if (ret == NULL)
return -AML_ENOMEM;
ret->ops = &aml_scratch_par_ops;
ret->data = calloc(1, sizeof(struct aml_scratch_par));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_scratch,
struct aml_scratch_par);
s = (struct aml_scratch_par *)ret->data;
s->ops = aml_scratch_par_inner_ops;
......@@ -300,8 +296,10 @@ void aml_scratch_par_destroy(struct aml_scratch **scratch)
if (scratch == NULL)
return;
s = *scratch;
if (s == NULL || s->data == NULL)
if (s == NULL)
return;
assert(s->data != NULL);
inner = (struct aml_scratch_par *)s->data;
aml_vector_destroy(&inner->data.requests);
aml_vector_destroy(&inner->data.tilemap);
......@@ -309,7 +307,6 @@ void aml_scratch_par_destroy(struct aml_scratch **scratch)
inner->data.sch_ptr,
inner->data.scratch_size);
pthread_mutex_destroy(&inner->data.lock);
free(inner);
free(s);
*scratch = NULL;
}
......@@ -253,17 +253,13 @@ int aml_scratch_seq_create(struct aml_scratch **scratch,
*scratch = NULL;
/* alloc */
ret = calloc(1, sizeof(struct aml_scratch));
ret = AML_INNER_MALLOC_2(struct aml_scratch, struct aml_scratch_seq);
if (ret == NULL)
return -AML_ENOMEM;
ret->ops = &aml_scratch_seq_ops;
ret->data = calloc(1, sizeof(struct aml_scratch_seq));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_scratch,
struct aml_scratch_seq);
s = (struct aml_scratch_seq *)ret->data;
s->ops = aml_scratch_seq_inner_ops;
......@@ -300,8 +296,10 @@ void aml_scratch_seq_destroy(struct aml_scratch **scratch)
if (scratch == NULL)
return;
s = *scratch;
if (s == NULL || s->data == NULL)
if (s == NULL)
return;
assert(s->data != NULL);
inner = (struct aml_scratch_seq *)s->data;
aml_vector_destroy(&inner->data.requests);
aml_vector_destroy(&inner->data.tilemap);
......@@ -309,7 +307,6 @@ void aml_scratch_seq_destroy(struct aml_scratch **scratch)
inner->data.sch_ptr,
inner->data.scratch_size);
pthread_mutex_destroy(&inner->data.lock);
free(inner);
free(s);
*scratch = NULL;
}
......@@ -128,16 +128,14 @@ int aml_tiling_1d_create_iterator(struct aml_tiling_data *tiling,
*it = NULL;
ret = calloc(1, sizeof(struct aml_tiling_iterator));
ret = AML_INNER_MALLOC_2(struct aml_tiling_iterator,
struct aml_tiling_iterator_1d_data);
if (ret == NULL)
return -AML_ENOMEM;
ret->ops = &aml_tiling_iterator_1d_ops;
ret->data = calloc(1, sizeof(struct aml_tiling_iterator_1d_data));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_tiling_iterator,
struct aml_tiling_iterator_1d_data);
data = (struct aml_tiling_iterator_1d_data *)ret->data;
data->i = 0;
data->tiling = (struct aml_tiling_1d_data *)tiling;
......@@ -154,9 +152,8 @@ int aml_tiling_1d_destroy_iterator(struct aml_tiling_data *t,
if (iter == NULL)
return -AML_EINVAL;
it = *iter;
if (it == NULL || it->data == NULL)
if (it == NULL)
return -AML_EINVAL;
free(it->data);
free(it);
*iter = NULL;
return AML_SUCCESS;
......@@ -186,17 +183,13 @@ int aml_tiling_1d_create(struct aml_tiling **tiling,
*tiling = NULL;
/* alloc */
ret = calloc(1, sizeof(struct aml_tiling));
ret = AML_INNER_MALLOC_2(struct aml_tiling, struct aml_tiling_1d_data);
if (ret == NULL)
return -AML_ENOMEM;
ret->ops = &aml_tiling_1d_ops;
ret->data = calloc(1, sizeof(struct aml_tiling_1d_data));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_tiling,
struct aml_tiling_1d_data);
t = (struct aml_tiling_1d_data *) ret->data;
t->blocksize = tilesize;
......@@ -213,9 +206,8 @@ void aml_tiling_1d_destroy(struct aml_tiling **tiling)
if (tiling == NULL)
return;
t = *tiling;
if (t == NULL || t->data == NULL)
if (t == NULL)
return;
free(t->data);
free(t);
*tiling = NULL;
}
......
......@@ -136,16 +136,14 @@ int aml_tiling_2d_create_iterator(struct aml_tiling_data *tiling,
*it = NULL;
ret = calloc(1, sizeof(struct aml_tiling_iterator));
ret = AML_INNER_MALLOC_2(struct aml_tiling_iterator,
struct aml_tiling_iterator_2d_data);
if (ret == NULL)
return -AML_ENOMEM;
ret->ops = &aml_tiling_iterator_2d_ops;
ret->data = calloc(1, sizeof(struct aml_tiling_iterator_2d_data));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_tiling_iterator,
struct aml_tiling_iterator_2d_data);
data = (struct aml_tiling_iterator_2d_data *)ret->data;
data->i = 0;
data->tiling = (struct aml_tiling_2d_data *)tiling;
......@@ -162,9 +160,8 @@ int aml_tiling_2d_destroy_iterator(struct aml_tiling_data *t,
if (iter == NULL)
return -AML_EINVAL;
it = *iter;
if (it == NULL || it->data == NULL)
if (it == NULL)
return -AML_EINVAL;
free(it->data);
free(it);
*iter = NULL;
return AML_SUCCESS;
......@@ -211,8 +208,7 @@ int aml_tiling_2d_create(struct aml_tiling **tiling, int type,
*tiling = NULL;
/* alloc */
ret = calloc(1, sizeof(struct aml_tiling));
ret = AML_INNER_MALLOC_2(struct aml_tiling, struct aml_tiling_2d_data);
if (ret == NULL)
return -AML_ENOMEM;
......@@ -221,11 +217,8 @@ int aml_tiling_2d_create(struct aml_tiling **tiling, int type,
else
ret->ops = &aml_tiling_2d_colmajor_ops;
ret->data = calloc(1, sizeof(struct aml_tiling_2d_data));
if (ret->data == NULL) {
free(ret);
return -AML_ENOMEM;
}
ret->data = AML_INNER_MALLOC_NEXTPTR(ret, struct aml_tiling,
struct aml_tiling_2d_data);
data = (struct aml_tiling_2d_data *)ret->data;
data->blocksize = tilesize;
......@@ -244,9 +237,8 @@ void aml_tiling_2d_destroy(struct aml_tiling **tiling)
if (tiling == NULL)
return;
t = *tiling;
if (t == NULL || t->data == NULL)
if (t