summaryrefslogtreecommitdiffstats
path: root/src/alloc.c
diff options
context:
space:
mode:
authorTavian Barnes <tavianator@tavianator.com>2023-06-19 13:43:46 -0400
committerTavian Barnes <tavianator@tavianator.com>2023-06-20 14:26:09 -0400
commiteb83f2a91a615c5fa3788d41c3ec80b43bf5ed28 (patch)
tree089ba5ee90f6b0e2805c6b8cfe2378a5c9d045b7 /src/alloc.c
parent90ded13e589b0089167ef25ca3d26be599dfec9b (diff)
downloadbfs-eb83f2a91a615c5fa3788d41c3ec80b43bf5ed28.tar.xz
alloc: Implement an arena allocator
Diffstat (limited to 'src/alloc.c')
-rw-r--r--src/alloc.c111
1 files changed, 111 insertions, 0 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 0003108..b437975 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -4,6 +4,7 @@
#include "alloc.h"
#include "bit.h"
#include "diag.h"
+#include "sanity.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
@@ -48,3 +49,113 @@ void *zalloc(size_t align, size_t size) {
}
return ret;
}
+
+/**
+ * An arena allocator chunk.
+ */
+union chunk {
+ /**
+ * Free chunks are stored in a singly linked list. The pointer to the
+ * next chunk is represented by an offset from the chunk immediately
+ * after this one in memory, so that zalloc() correctly initializes a
+ * linked list of chunks (except for the last one).
+ */
+ uintptr_t next;
+
+ // char object[];
+};
+
+/** Decode the next chunk. */
+static union chunk *chunk_next(const struct arena *arena, const union chunk *chunk) {
+ uintptr_t base = (uintptr_t)chunk + arena->size;
+ return (union chunk *)(base + chunk->next);
+}
+
+/** Encode the next chunk. */
+static void chunk_set_next(const struct arena *arena, union chunk *chunk, union chunk *next) {
+ uintptr_t base = (uintptr_t)chunk + arena->size;
+ chunk->next = (uintptr_t)next - base;
+}
+
+void arena_init(struct arena *arena, size_t align, size_t size) {
+ bfs_assert(has_single_bit(align));
+ bfs_assert((size & (align - 1)) == 0);
+
+ if (align < alignof(union chunk)) {
+ align = alignof(union chunk);
+ }
+ if (size < sizeof(union chunk)) {
+ size = sizeof(union chunk);
+ }
+ bfs_assert((size & (align - 1)) == 0);
+
+ arena->chunks = NULL;
+ arena->nslabs = 0;
+ arena->slabs = NULL;
+ arena->align = align;
+ arena->size = size;
+}
+
+/** Allocate a new slab. */
+static int slab_alloc(struct arena *arena) {
+ void **slabs = realloc(arena->slabs, sizeof_array(void *, arena->nslabs + 1));
+ if (!slabs) {
+ return -1;
+ }
+ arena->slabs = slabs;
+
+ // Make the initial allocation size ~4K
+ size_t size = 4096;
+ if (size < arena->size) {
+ size = arena->size;
+ }
+ // Trim off the excess
+ size -= size % arena->size;
+ // Double the size for every slab
+ size <<= arena->nslabs;
+
+ // Allocate the slab
+ void *slab = zalloc(arena->align, size);
+ if (!slab) {
+ return -1;
+ }
+
+ // Fix the last chunk->next offset
+ void *last = (char *)slab + size - arena->size;
+ chunk_set_next(arena, last, arena->chunks);
+
+ // We can rely on zero-initialized slabs, but others shouldn't
+ sanitize_uninit(slab, size);
+
+ arena->chunks = arena->slabs[arena->nslabs++] = slab;
+ return 0;
+}
+
+void *arena_alloc(struct arena *arena) {
+ if (!arena->chunks && slab_alloc(arena) != 0) {
+ return NULL;
+ }
+
+ union chunk *chunk = arena->chunks;
+ sanitize_alloc(chunk, arena->size);
+
+ sanitize_init(chunk);
+ arena->chunks = chunk_next(arena, chunk);
+ sanitize_uninit(chunk, arena->size);
+
+ return chunk;
+}
+
+void arena_free(struct arena *arena, void *ptr) {
+ union chunk *chunk = ptr;
+ chunk_set_next(arena, chunk, arena->chunks);
+ arena->chunks = chunk;
+ sanitize_free(chunk, arena->size);
+}
+
+void arena_destroy(struct arena *arena) {
+ for (size_t i = 0; i < arena->nslabs; ++i) {
+ free(arena->slabs[i]);
+ }
+ sanitize_uninit(arena);
+}