summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/alloc.c66
-rw-r--r--src/alloc.h36
2 files changed, 80 insertions, 22 deletions
diff --git a/src/alloc.c b/src/alloc.c
index e83b273..467f0f0 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -101,6 +101,34 @@ void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size) {
return ret;
}
+void *reserve(void *ptr, size_t align, size_t size, size_t count) {
+ // No need to overflow-check the current size
+ size_t old_size = size * count;
+
+ // Capacity is doubled every power of two, from 0→1, 1→2, 2→4, etc.
+ // If we stayed within the same size class, re-use ptr.
+ if (count & (count - 1)) {
+ // Tell sanitizers about the new array element
+ sanitize_alloc((char *)ptr + old_size, size);
+ errno = 0;
+ return ptr;
+ }
+
+ // No need to overflow-check; xrealloc() will fail before we overflow
+ size_t new_size = count ? 2 * old_size : size;
+ void *ret = xrealloc(ptr, align, old_size, new_size);
+ if (!ret) {
+ // errno is used to communicate success/failure to the RESERVE() macro
+ bfs_assert(errno != 0);
+ return ptr;
+ }
+
+ // Pretend we only allocated one more element
+ sanitize_free((char *)ret + old_size + size, new_size - old_size - size);
+ errno = 0;
+ return ret;
+}
+
/**
* An arena allocator chunk.
*/
@@ -150,13 +178,6 @@ void arena_init(struct arena *arena, size_t align, size_t size) {
/** Allocate a new slab. */
attr_cold
static int slab_alloc(struct arena *arena) {
- size_t nslabs = arena->nslabs;
- void **slabs = REALLOC_ARRAY(void *, arena->slabs, nslabs, nslabs + 1);
- if (!slabs) {
- return -1;
- }
- arena->slabs = slabs;
-
// Make the initial allocation size ~4K
size_t size = 4096;
if (size < arena->size) {
@@ -165,7 +186,7 @@ static int slab_alloc(struct arena *arena) {
// Trim off the excess
size -= size % arena->size;
// Double the size for every slab
- size <<= nslabs;
+ size <<= arena->nslabs;
// Allocate the slab
void *slab = zalloc(arena->align, size);
@@ -173,6 +194,13 @@ static int slab_alloc(struct arena *arena) {
return -1;
}
+ // Grow the slab array
+ void **pslab = RESERVE(void *, &arena->slabs, &arena->nslabs);
+ if (!pslab) {
+ free(slab);
+ return -1;
+ }
+
// Fix the last chunk->next offset
void *last = (char *)slab + size - arena->size;
chunk_set_next(arena, last, arena->chunks);
@@ -180,8 +208,7 @@ static int slab_alloc(struct arena *arena) {
// We can rely on zero-initialized slabs, but others shouldn't
sanitize_uninit(slab, size);
- arena->chunks = arena->slabs[nslabs] = slab;
- ++arena->nslabs;
+ arena->chunks = *pslab = slab;
return 0;
}
@@ -252,21 +279,16 @@ static size_t varena_exact_size(const struct varena *varena, size_t count) {
static struct arena *varena_get(struct varena *varena, size_t count) {
size_t i = varena_size_class(varena, count);
- if (i >= varena->narenas) {
- size_t narenas = i + 1;
- struct arena *arenas = REALLOC_ARRAY(struct arena, varena->arenas, varena->narenas, narenas);
- if (!arenas) {
+ while (i >= varena->narenas) {
+ size_t j = varena->narenas;
+ struct arena *arena = RESERVE(struct arena, &varena->arenas, &varena->narenas);
+ if (!arena) {
return NULL;
}
- for (size_t j = varena->narenas; j < narenas; ++j) {
- size_t shift = j + varena->shift;
- size_t size = varena_exact_size(varena, (size_t)1 << shift);
- arena_init(&arenas[j], varena->align, size);
- }
-
- varena->narenas = narenas;
- varena->arenas = arenas;
+ size_t shift = j + varena->shift;
+ size_t size = varena_exact_size(varena, (size_t)1 << shift);
+ arena_init(arena, varena->align, size);
}
return &varena->arenas[i];
diff --git a/src/alloc.h b/src/alloc.h
index a6dee99..7132470 100644
--- a/src/alloc.h
+++ b/src/alloc.h
@@ -9,6 +9,7 @@
#define BFS_ALLOC_H
#include "config.h"
+#include <errno.h>
#include <stdlib.h>
/** Check if a size is properly aligned. */
@@ -180,6 +181,41 @@ void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size);
(type *)xrealloc((ptr), alignof(type), sizeof_flex(type, member, old_count), sizeof_flex(type, member, new_count))
/**
+ * Reserve space for one more element in a dynamic array.
+ *
+ * @param ptr
+ * The pointer to reallocate.
+ * @param align
+ * The required alignment.
+ * @param count
+ * The current size of the array.
+ * @return
+ * The reallocated memory, on both success *and* failure. On success,
+ * errno will be set to zero, and the returned pointer will have room
+ * for (count + 1) elements. On failure, errno will be non-zero, and
+ * ptr will returned unchanged.
+ */
+void *reserve(void *ptr, size_t align, size_t size, size_t count);
+
+/**
+ * Convenience macro to grow a dynamic array.
+ *
+ * @param type
+ * The array element type.
+ * @param type **ptr
+ * A pointer to the array.
+ * @param size_t *count
+ * A pointer to the array's size.
+ * @return
+ * On success, a pointer to the newly reserved array element, i.e.
+ * `*ptr + *count++`. On failure, NULL is returned, and both *ptr and
+ * *count remain unchanged.
+ */
+#define RESERVE(type, ptr, count) \
+ ((*ptr) = reserve((*ptr), alignof(type), sizeof(type), (*count)), \
+ errno ? NULL : (*ptr) + (*count)++)
+
+/**
* An arena allocator for fixed-size types.
*
* Arena allocators are intentionally not thread safe.