diff options
author | Tavian Barnes <tavianator@tavianator.com> | 2023-11-23 13:08:04 -0500 |
---|---|---|
committer | Tavian Barnes <tavianator@tavianator.com> | 2023-11-23 13:56:03 -0500 |
commit | ae18c20d5a585ae4bc1e9ee6859230fee7f73ed8 (patch) | |
tree | 9b83e03091def0002360327a102080e1fd69e6a7 /src | |
parent | f9f43fe44f4a013aac94d5787cf827ec04b4c861 (diff) | |
download | bfs-ae18c20d5a585ae4bc1e9ee6859230fee7f73ed8.tar.xz |
alloc: New helpers for aligned reallocation
There is no aligned_realloc(), so the new xrealloc() function emulates
it by manually reallocating and copying for over-aligned types. The new
REALLOC_ARRAY() and REALLOC_FLEX() macros wrap xrealloc().
Diffstat (limited to 'src')
-rw-r--r-- | src/alloc.c | 42 | ||||
-rw-r--r-- | src/alloc.h | 24 |
2 files changed, 62 insertions, 4 deletions
diff --git a/src/alloc.c b/src/alloc.c index 8c88813..e83b273 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -69,6 +69,38 @@ void *zalloc(size_t align, size_t size) { return ret; } +void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size) { + bfs_assert(has_single_bit(align)); + bfs_assert(is_aligned(align, old_size)); + bfs_assert(is_aligned(align, new_size)); + + if (new_size == 0) { + free(ptr); + return NULL; + } else if (new_size > ALLOC_MAX) { + errno = EOVERFLOW; + return NULL; + } + + if (align <= alignof(max_align_t)) { + return realloc(ptr, new_size); + } + + // There is no aligned_realloc(), so reallocate and copy manually + void *ret = xmemalign(align, new_size); + if (!ret) { + return NULL; + } + + size_t min_size = old_size < new_size ? old_size : new_size; + if (min_size) { + memcpy(ret, ptr, min_size); + } + + free(ptr); + return ret; +} + /** * An arena allocator chunk. */ @@ -118,7 +150,8 @@ void arena_init(struct arena *arena, size_t align, size_t size) { /** Allocate a new slab. */ attr_cold static int slab_alloc(struct arena *arena) { - void **slabs = realloc(arena->slabs, sizeof_array(void *, arena->nslabs + 1)); + size_t nslabs = arena->nslabs; + void **slabs = REALLOC_ARRAY(void *, arena->slabs, nslabs, nslabs + 1); if (!slabs) { return -1; } @@ -132,7 +165,7 @@ static int slab_alloc(struct arena *arena) { // Trim off the excess size -= size % arena->size; // Double the size for every slab - size <<= arena->nslabs; + size <<= nslabs; // Allocate the slab void *slab = zalloc(arena->align, size); @@ -147,7 +180,8 @@ static int slab_alloc(struct arena *arena) { // We can rely on zero-initialized slabs, but others shouldn't sanitize_uninit(slab, size); - arena->chunks = arena->slabs[arena->nslabs++] = slab; + arena->chunks = arena->slabs[nslabs] = slab; + ++arena->nslabs; return 0; } @@ -220,7 +254,7 @@ static struct arena *varena_get(struct varena *varena, size_t count) { if (i >= varena->narenas) { size_t narenas = i + 1; - struct arena *arenas = realloc(varena->arenas, sizeof_array(struct arena, narenas)); + struct arena *arenas = REALLOC_ARRAY(struct arena, varena->arenas, varena->narenas, narenas); if (!arenas) { return NULL; } diff --git a/src/alloc.h b/src/alloc.h index 15e4983..a6dee99 100644 --- a/src/alloc.h +++ b/src/alloc.h @@ -156,6 +156,30 @@ void *zalloc(size_t align, size_t size); (type *)zalloc(alignof(type), sizeof_flex(type, member, count)) /** + * Alignment-aware realloc(). + * + * @param ptr + * The pointer to reallocate. + * @param align + * The required alignment. + * @param old_size + * The previous allocation size. + * @param new_size + * The new allocation size. + * @return + * The reallocated memory, or NULL on failure. + */ +void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size); + +/** Reallocate memory for an array. */ +#define REALLOC_ARRAY(type, ptr, old_count, new_count) \ + (type *)xrealloc((ptr), alignof(type), sizeof_array(type, old_count), sizeof_array(type, new_count)) + +/** Reallocate memory for a flexible struct. */ +#define REALLOC_FLEX(type, member, ptr, old_count, new_count) \ + (type *)xrealloc((ptr), alignof(type), sizeof_flex(type, member, old_count), sizeof_flex(type, member, new_count)) + +/** * An arena allocator for fixed-size types. * * Arena allocators are intentionally not thread safe. |