summaryrefslogtreecommitdiffstats
path: root/src/alloc.c
diff options
context:
space:
mode:
authorTavian Barnes <tavianator@tavianator.com>2023-11-23 13:08:04 -0500
committerTavian Barnes <tavianator@tavianator.com>2023-11-23 13:56:03 -0500
commitae18c20d5a585ae4bc1e9ee6859230fee7f73ed8 (patch)
tree9b83e03091def0002360327a102080e1fd69e6a7 /src/alloc.c
parentf9f43fe44f4a013aac94d5787cf827ec04b4c861 (diff)
downloadbfs-ae18c20d5a585ae4bc1e9ee6859230fee7f73ed8.tar.xz
alloc: New helpers for aligned reallocation
There is no aligned_realloc(), so the new xrealloc() function emulates it by manually reallocating and copying for over-aligned types. The new REALLOC_ARRAY() and REALLOC_FLEX() macros wrap xrealloc().
Diffstat (limited to 'src/alloc.c')
-rw-r--r--src/alloc.c42
1 files changed, 38 insertions, 4 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 8c88813..e83b273 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -69,6 +69,38 @@ void *zalloc(size_t align, size_t size) {
return ret;
}
+void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size) {
+ bfs_assert(has_single_bit(align));
+ bfs_assert(is_aligned(align, old_size));
+ bfs_assert(is_aligned(align, new_size));
+
+ if (new_size == 0) {
+ free(ptr);
+ return NULL;
+ } else if (new_size > ALLOC_MAX) {
+ errno = EOVERFLOW;
+ return NULL;
+ }
+
+ if (align <= alignof(max_align_t)) {
+ return realloc(ptr, new_size);
+ }
+
+ // There is no aligned_realloc(), so reallocate and copy manually
+ void *ret = xmemalign(align, new_size);
+ if (!ret) {
+ return NULL;
+ }
+
+ size_t min_size = old_size < new_size ? old_size : new_size;
+ if (min_size) {
+ memcpy(ret, ptr, min_size);
+ }
+
+ free(ptr);
+ return ret;
+}
+
/**
* An arena allocator chunk.
*/
@@ -118,7 +150,8 @@ void arena_init(struct arena *arena, size_t align, size_t size) {
/** Allocate a new slab. */
attr_cold
static int slab_alloc(struct arena *arena) {
- void **slabs = realloc(arena->slabs, sizeof_array(void *, arena->nslabs + 1));
+ size_t nslabs = arena->nslabs;
+ void **slabs = REALLOC_ARRAY(void *, arena->slabs, nslabs, nslabs + 1);
if (!slabs) {
return -1;
}
@@ -132,7 +165,7 @@ static int slab_alloc(struct arena *arena) {
// Trim off the excess
size -= size % arena->size;
// Double the size for every slab
- size <<= arena->nslabs;
+ size <<= nslabs;
// Allocate the slab
void *slab = zalloc(arena->align, size);
@@ -147,7 +180,8 @@ static int slab_alloc(struct arena *arena) {
// We can rely on zero-initialized slabs, but others shouldn't
sanitize_uninit(slab, size);
- arena->chunks = arena->slabs[arena->nslabs++] = slab;
+ arena->chunks = arena->slabs[nslabs] = slab;
+ ++arena->nslabs;
return 0;
}
@@ -220,7 +254,7 @@ static struct arena *varena_get(struct varena *varena, size_t count) {
if (i >= varena->narenas) {
size_t narenas = i + 1;
- struct arena *arenas = realloc(varena->arenas, sizeof_array(struct arena, narenas));
+ struct arena *arenas = REALLOC_ARRAY(struct arena, varena->arenas, varena->narenas, narenas);
if (!arenas) {
return NULL;
}