summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTavian Barnes <tavianator@tavianator.com>2023-06-19 13:43:46 -0400
committerTavian Barnes <tavianator@tavianator.com>2023-06-20 14:26:09 -0400
commiteb83f2a91a615c5fa3788d41c3ec80b43bf5ed28 (patch)
tree089ba5ee90f6b0e2805c6b8cfe2378a5c9d045b7
parent90ded13e589b0089167ef25ca3d26be599dfec9b (diff)
downloadbfs-eb83f2a91a615c5fa3788d41c3ec80b43bf5ed28.tar.xz
alloc: Implement an arena allocator
-rw-r--r--src/alloc.c111
-rw-r--r--src/alloc.h44
2 files changed, 155 insertions, 0 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 0003108..b437975 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -4,6 +4,7 @@
#include "alloc.h"
#include "bit.h"
#include "diag.h"
+#include "sanity.h"
#include <errno.h>
#include <stdlib.h>
#include <string.h>
@@ -48,3 +49,113 @@ void *zalloc(size_t align, size_t size) {
}
return ret;
}
+
+/**
+ * An arena allocator chunk.
+ */
+union chunk {
+ /**
+ * Free chunks are stored in a singly linked list. The pointer to the
+ * next chunk is represented by an offset from the chunk immediately
+ * after this one in memory, so that zalloc() correctly initializes a
+ * linked list of chunks (except for the last one).
+ */
+ uintptr_t next;
+
+ // char object[];
+};
+
+/** Decode the next chunk. */
+static union chunk *chunk_next(const struct arena *arena, const union chunk *chunk) {
+ uintptr_t base = (uintptr_t)chunk + arena->size;
+ return (union chunk *)(base + chunk->next);
+}
+
+/** Encode the next chunk. */
+static void chunk_set_next(const struct arena *arena, union chunk *chunk, union chunk *next) {
+ uintptr_t base = (uintptr_t)chunk + arena->size;
+ chunk->next = (uintptr_t)next - base;
+}
+
+void arena_init(struct arena *arena, size_t align, size_t size) {
+ bfs_assert(has_single_bit(align));
+ bfs_assert((size & (align - 1)) == 0);
+
+ if (align < alignof(union chunk)) {
+ align = alignof(union chunk);
+ }
+ if (size < sizeof(union chunk)) {
+ size = sizeof(union chunk);
+ }
+ bfs_assert((size & (align - 1)) == 0);
+
+ arena->chunks = NULL;
+ arena->nslabs = 0;
+ arena->slabs = NULL;
+ arena->align = align;
+ arena->size = size;
+}
+
+/** Allocate a new slab. */
+static int slab_alloc(struct arena *arena) {
+ void **slabs = realloc(arena->slabs, sizeof_array(void *, arena->nslabs + 1));
+ if (!slabs) {
+ return -1;
+ }
+ arena->slabs = slabs;
+
+ // Make the initial allocation size ~4K
+ size_t size = 4096;
+ if (size < arena->size) {
+ size = arena->size;
+ }
+ // Trim off the excess
+ size -= size % arena->size;
+ // Double the size for every slab
+ size <<= arena->nslabs;
+
+ // Allocate the slab
+ void *slab = zalloc(arena->align, size);
+ if (!slab) {
+ return -1;
+ }
+
+ // Fix the last chunk->next offset
+ void *last = (char *)slab + size - arena->size;
+ chunk_set_next(arena, last, arena->chunks);
+
+ // We can rely on zero-initialized slabs, but others shouldn't
+ sanitize_uninit(slab, size);
+
+ arena->chunks = arena->slabs[arena->nslabs++] = slab;
+ return 0;
+}
+
+void *arena_alloc(struct arena *arena) {
+ if (!arena->chunks && slab_alloc(arena) != 0) {
+ return NULL;
+ }
+
+ union chunk *chunk = arena->chunks;
+ sanitize_alloc(chunk, arena->size);
+
+ sanitize_init(chunk);
+ arena->chunks = chunk_next(arena, chunk);
+ sanitize_uninit(chunk, arena->size);
+
+ return chunk;
+}
+
+void arena_free(struct arena *arena, void *ptr) {
+ union chunk *chunk = ptr;
+ chunk_set_next(arena, chunk, arena->chunks);
+ arena->chunks = chunk;
+ sanitize_free(chunk, arena->size);
+}
+
+void arena_destroy(struct arena *arena) {
+ for (size_t i = 0; i < arena->nslabs; ++i) {
+ free(arena->slabs[i]);
+ }
+ sanitize_uninit(arena);
+}
diff --git a/src/alloc.h b/src/alloc.h
index 899a4ec..65edb92 100644
--- a/src/alloc.h
+++ b/src/alloc.h
@@ -146,4 +146,48 @@ void *zalloc(size_t align, size_t size);
#define ZALLOC_FLEX(type, member, count) \
(type *)zalloc(alignof(type), sizeof_flex(type, member, count))
+/**
+ * An arena allocator for fixed-size types.
+ *
+ * Arena allocators are intentionally not thread safe.
+ */
+struct arena {
+ /** The list of free chunks. */
+ void *chunks;
+ /** The number of allocated slabs. */
+ size_t nslabs;
+ /** The array of slabs. */
+ void **slabs;
+ /** Chunk alignment. */
+ size_t align;
+ /** Chunk size. */
+ size_t size;
+};
+
+/**
+ * Initialize an arena for chunks of the given size and alignment.
+ */
+void arena_init(struct arena *arena, size_t align, size_t size);
+
+/**
+ * Initialize an arena for the given type.
+ */
+#define ARENA_INIT(arena, type) \
+ arena_init((arena), alignof(type), sizeof(type))
+
+/**
+ * Allocate an object out of the arena.
+ */
+void *arena_alloc(struct arena *arena);
+
+/**
+ * Free an object from the arena.
+ */
+void arena_free(struct arena *arena, void *ptr);
+
+/**
+ * Destroy an arena, freeing all allocations.
+ */
+void arena_destroy(struct arena *arena);
+
#endif // BFS_ALLOC_H