summaryrefslogtreecommitdiffstats
path: root/src/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/alloc.c')
-rw-r--r--src/alloc.c158
1 files changed, 111 insertions, 47 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 0b978ba..f505eda 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -2,33 +2,42 @@
// SPDX-License-Identifier: 0BSD
#include "alloc.h"
+
+#include "bfs.h"
#include "bit.h"
#include "diag.h"
#include "sanity.h"
+
#include <errno.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
-/** Portable aligned_alloc()/posix_memalign(). */
+/** The largest possible allocation size. */
+#if PTRDIFF_MAX < SIZE_MAX / 2
+# define ALLOC_MAX ((size_t)PTRDIFF_MAX)
+#else
+# define ALLOC_MAX (SIZE_MAX / 2)
+#endif
+
+/** posix_memalign() wrapper. */
static void *xmemalign(size_t align, size_t size) {
bfs_assert(has_single_bit(align));
bfs_assert(align >= sizeof(void *));
- bfs_assert((size & (align - 1)) == 0);
-#if __APPLE__
+ // Since https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2072.htm,
+ // aligned_alloc() doesn't require the size to be a multiple of align.
+ // But the sanitizers don't know about that yet, so always use
+ // posix_memalign().
void *ptr = NULL;
errno = posix_memalign(&ptr, align, size);
return ptr;
-#else
- return aligned_alloc(align, size);
-#endif
}
void *alloc(size_t align, size_t size) {
bfs_assert(has_single_bit(align));
- bfs_assert((size & (align - 1)) == 0);
- if (size >> (SIZE_WIDTH - 1)) {
+ if (size > ALLOC_MAX) {
errno = EOVERFLOW;
return NULL;
}
@@ -42,9 +51,8 @@ void *alloc(size_t align, size_t size) {
void *zalloc(size_t align, size_t size) {
bfs_assert(has_single_bit(align));
- bfs_assert((size & (align - 1)) == 0);
- if (size >> (SIZE_WIDTH - 1)) {
+ if (size > ALLOC_MAX) {
errno = EOVERFLOW;
return NULL;
}
@@ -60,6 +68,64 @@ void *zalloc(size_t align, size_t size) {
return ret;
}
+void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size) {
+ bfs_assert(has_single_bit(align));
+
+ if (new_size == 0) {
+ free(ptr);
+ return NULL;
+ } else if (new_size > ALLOC_MAX) {
+ errno = EOVERFLOW;
+ return NULL;
+ }
+
+ if (align <= alignof(max_align_t)) {
+ return realloc(ptr, new_size);
+ }
+
+ // There is no aligned_realloc(), so reallocate and copy manually
+ void *ret = xmemalign(align, new_size);
+ if (!ret) {
+ return NULL;
+ }
+
+ size_t min_size = old_size < new_size ? old_size : new_size;
+ if (min_size) {
+ memcpy(ret, ptr, min_size);
+ }
+
+ free(ptr);
+ return ret;
+}
+
+void *reserve(void *ptr, size_t align, size_t size, size_t count) {
+ // No need to overflow-check the current size
+ size_t old_size = size * count;
+
+ // Capacity is doubled every power of two, from 0→1, 1→2, 2→4, etc.
+ // If we stayed within the same size class, reuse ptr.
+ if (count & (count - 1)) {
+ // Tell sanitizers about the new array element
+ sanitize_resize(ptr, old_size, old_size + size, bit_ceil(count) * size);
+ errno = 0;
+ return ptr;
+ }
+
+ // No need to overflow-check; xrealloc() will fail before we overflow
+ size_t new_size = count ? 2 * old_size : size;
+ void *ret = xrealloc(ptr, align, old_size, new_size);
+ if (!ret) {
+ // errno is used to communicate success/failure to the RESERVE() macro
+ bfs_assert(errno != 0);
+ return ptr;
+ }
+
+ // Pretend we only allocated one more element
+ sanitize_resize(ret, new_size, old_size + size, new_size);
+ errno = 0;
+ return ret;
+}
+
/**
* An arena allocator chunk.
*/
@@ -89,7 +155,7 @@ static void chunk_set_next(const struct arena *arena, union chunk *chunk, union
void arena_init(struct arena *arena, size_t align, size_t size) {
bfs_assert(has_single_bit(align));
- bfs_assert((size & (align - 1)) == 0);
+ bfs_assert(is_aligned(align, size));
if (align < alignof(union chunk)) {
align = alignof(union chunk);
@@ -97,7 +163,7 @@ void arena_init(struct arena *arena, size_t align, size_t size) {
if (size < sizeof(union chunk)) {
size = sizeof(union chunk);
}
- bfs_assert((size & (align - 1)) == 0);
+ bfs_assert(is_aligned(align, size));
arena->chunks = NULL;
arena->nslabs = 0;
@@ -107,13 +173,8 @@ void arena_init(struct arena *arena, size_t align, size_t size) {
}
/** Allocate a new slab. */
+_cold
static int slab_alloc(struct arena *arena) {
- void **slabs = realloc(arena->slabs, sizeof_array(void *, arena->nslabs + 1));
- if (!slabs) {
- return -1;
- }
- arena->slabs = slabs;
-
// Make the initial allocation size ~4K
size_t size = 4096;
if (size < arena->size) {
@@ -130,6 +191,13 @@ static int slab_alloc(struct arena *arena) {
return -1;
}
+ // Grow the slab array
+ void **pslab = RESERVE(void *, &arena->slabs, &arena->nslabs);
+ if (!pslab) {
+ free(slab);
+ return -1;
+ }
+
// Fix the last chunk->next offset
void *last = (char *)slab + size - arena->size;
chunk_set_next(arena, last, arena->chunks);
@@ -137,7 +205,7 @@ static int slab_alloc(struct arena *arena) {
// We can rely on zero-initialized slabs, but others shouldn't
sanitize_uninit(slab, size);
- arena->chunks = arena->slabs[arena->nslabs++] = slab;
+ arena->chunks = *pslab = slab;
return 0;
}
@@ -160,6 +228,7 @@ void arena_free(struct arena *arena, void *ptr) {
union chunk *chunk = ptr;
chunk_set_next(arena, chunk, arena->chunks);
arena->chunks = chunk;
+ sanitize_uninit(chunk, arena->size);
sanitize_free(chunk, arena->size);
}
@@ -179,7 +248,7 @@ void arena_destroy(struct arena *arena) {
sanitize_uninit(arena);
}
-void varena_init(struct varena *varena, size_t align, size_t min, size_t offset, size_t size) {
+void varena_init(struct varena *varena, size_t align, size_t offset, size_t size) {
varena->align = align;
varena->offset = offset;
varena->size = size;
@@ -188,7 +257,7 @@ void varena_init(struct varena *varena, size_t align, size_t min, size_t offset,
// The smallest size class is at least as many as fit in the smallest
// aligned allocation size
- size_t min_count = (flex_size(align, min, offset, size, 1) - offset + size - 1) / size;
+ size_t min_count = (flex_size(align, offset, size, 1) - offset + size - 1) / size;
varena->shift = bit_width(min_count - 1);
}
@@ -201,28 +270,23 @@ static size_t varena_size_class(struct varena *varena, size_t count) {
/** Get the exact size of a flexible struct. */
static size_t varena_exact_size(const struct varena *varena, size_t count) {
- return flex_size(varena->align, 0, varena->offset, varena->size, count);
+ return flex_size(varena->align, varena->offset, varena->size, count);
}
/** Get the arena for the given array length. */
static struct arena *varena_get(struct varena *varena, size_t count) {
size_t i = varena_size_class(varena, count);
- if (i >= varena->narenas) {
- size_t narenas = i + 1;
- struct arena *arenas = realloc(varena->arenas, sizeof_array(struct arena, narenas));
- if (!arenas) {
+ while (i >= varena->narenas) {
+ size_t j = varena->narenas;
+ struct arena *arena = RESERVE(struct arena, &varena->arenas, &varena->narenas);
+ if (!arena) {
return NULL;
}
- for (size_t j = varena->narenas; j < narenas; ++j) {
- size_t shift = j + varena->shift;
- size_t size = varena_exact_size(varena, (size_t)1 << shift);
- arena_init(&arenas[j], varena->align, size);
- }
-
- varena->narenas = narenas;
- varena->arenas = arenas;
+ size_t shift = j + varena->shift;
+ size_t size = varena_exact_size(varena, (size_t)1 << shift);
+ arena_init(arena, varena->align, size);
}
return &varena->arenas[i];
@@ -240,8 +304,7 @@ void *varena_alloc(struct varena *varena, size_t count) {
}
// Tell the sanitizers the exact size of the allocated struct
- sanitize_free(ret, arena->size);
- sanitize_alloc(ret, varena_exact_size(varena, count));
+ sanitize_resize(ret, arena->size, varena_exact_size(varena, count), arena->size);
return ret;
}
@@ -253,15 +316,14 @@ void *varena_realloc(struct varena *varena, void *ptr, size_t old_count, size_t
return NULL;
}
- size_t new_exact_size = varena_exact_size(varena, new_count);
- size_t old_exact_size = varena_exact_size(varena, old_count);
+ size_t old_size = old_arena->size;
+ size_t new_size = new_arena->size;
if (new_arena == old_arena) {
- if (new_count < old_count) {
- sanitize_free((char *)ptr + new_exact_size, old_exact_size - new_exact_size);
- } else if (new_count > old_count) {
- sanitize_alloc((char *)ptr + old_exact_size, new_exact_size - old_exact_size);
- }
+ sanitize_resize(ptr,
+ varena_exact_size(varena, old_count),
+ varena_exact_size(varena, new_count),
+ new_size);
return ptr;
}
@@ -270,16 +332,18 @@ void *varena_realloc(struct varena *varena, void *ptr, size_t old_count, size_t
return NULL;
}
- size_t old_size = old_arena->size;
- sanitize_alloc((char *)ptr + old_exact_size, old_size - old_exact_size);
+ // Non-sanitized builds don't bother computing exact sizes, and just use
+ // the potentially-larger arena size for each size class instead. To
+ // allow the below memcpy() to work with the less-precise sizes, expand
+ // the old allocation to its full capacity.
+ sanitize_resize(ptr, varena_exact_size(varena, old_count), old_size, old_size);
- size_t new_size = new_arena->size;
size_t min_size = new_size < old_size ? new_size : old_size;
memcpy(ret, ptr, min_size);
arena_free(old_arena, ptr);
- sanitize_free((char *)ret + new_exact_size, new_size - new_exact_size);
+ sanitize_resize(ret, new_size, varena_exact_size(varena, new_count), new_size);
return ret;
}