summaryrefslogtreecommitdiffstats
path: root/src/alloc.h
blob: 4d6811102cd0fb0d67b2852f20077006bfe86b5e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
// Copyright © Tavian Barnes <tavianator@tavianator.com>
// SPDX-License-Identifier: 0BSD

/**
 * Memory allocation.
 */

#ifndef BFS_ALLOC_H
#define BFS_ALLOC_H

#include "bfs.h"

#include <errno.h>
#include <stddef.h>
#include <stdlib.h>

#define IS_ALIGNED(align, size) \
	(((size) & ((align) - 1)) == 0)

/** Check if a size is properly aligned. */
static inline bool is_aligned(size_t align, size_t size) {
	return IS_ALIGNED(align, size);
}

#define ALIGN_FLOOR(align, size) \
	((size) & ~((align) - 1))

/** Round down to a multiple of an alignment. */
static inline size_t align_floor(size_t align, size_t size) {
	return ALIGN_FLOOR(align, size);
}

#define ALIGN_CEIL(align, size) \
	((((size) - 1) | ((align) - 1)) + 1)

/** Round up to a multiple of an alignment. */
static inline size_t align_ceil(size_t align, size_t size) {
	return ALIGN_CEIL(align, size);
}

/**
 * Saturating size addition.
 */
static inline size_t size_add(size_t lhs, size_t rhs) {
	size_t ret = lhs + rhs;
	return ret >= lhs ? ret : (size_t)-1;
}

/**
 * Saturating size multiplication.
 */
static inline size_t size_mul(size_t size, size_t count) {
	size_t ret = size * count;
	return ret / size == count ? ret : (size_t)-1;
}

/** Saturating array sizeof. */
#define sizeof_array(type, count) \
	size_mul(sizeof(type), count)

/** Size of a struct/union field. */
#define sizeof_member(type, member) \
	sizeof(((type *)NULL)->member)

/**
 * @internal
 * Our flexible struct size calculations assume that structs have the minimum
 * trailing padding to align the type properly.  A pathological ABI that adds
 * extra padding would result in us under-allocating space for those structs,
 * so we static_assert() that no such padding exists.
 */
#define ASSERT_FLEX_ABI(type, member) \
	ASSERT_FLEX_ABI_( \
		ALIGN_CEIL(alignof(type), offsetof(type, member)) >= sizeof(type), \
		"Unexpected tail padding in " #type)

/**
 * @internal
 * The contortions here allow static_assert() to be used in expressions, rather
 * than just declarations.
 */
#define ASSERT_FLEX_ABI_(...) \
	((void)sizeof(struct { char _; static_assert(__VA_ARGS__); }))

/**
 * Saturating flexible struct size.
 *
 * @align
 *         Struct alignment.
 * @offset
 *         Flexible array member offset.
 * @size
 *         Flexible array element size.
 * @count
 *         Flexible array element count.
 * @return
 *         The size of the struct with count flexible array elements.  Saturates
 *         to the maximum aligned value on overflow.
 */
static inline size_t flex_size(size_t align, size_t offset, size_t size, size_t count) {
	size_t ret = size_mul(size, count);
	ret = size_add(ret, offset + align - 1);
	ret = align_floor(align, ret);
	return ret;
}

/**
 * Computes the size of a flexible struct.
 *
 * @type
 *         The type of the struct containing the flexible array.
 * @member
 *         The name of the flexible array member.
 * @count
 *         The length of the flexible array.
 * @return
 *         The size of the struct with count flexible array elements.  Saturates
 *         to the maximum aligned value on overflow.
 */
#define sizeof_flex(type, member, count) \
	(ASSERT_FLEX_ABI(type, member), flex_size( \
		alignof(type), offsetof(type, member), sizeof_member(type, member[0]), count))

/**
 * General memory allocator.
 *
 * @align
 *         The required alignment.
 * @size
 *         The size of the allocation.
 * @return
 *         The allocated memory, or NULL on failure.
 */
_malloc(free, 1)
_aligned_alloc(1, 2)
void *alloc(size_t align, size_t size);

/**
 * Zero-initialized memory allocator.
 *
 * @align
 *         The required alignment.
 * @size
 *         The size of the allocation.
 * @return
 *         The allocated memory, or NULL on failure.
 */
_malloc(free, 1)
_aligned_alloc(1, 2)
void *zalloc(size_t align, size_t size);

/** Allocate memory for the given type. */
#define ALLOC(type) \
	(type *)alloc(alignof(type), sizeof(type))

/** Allocate zeroed memory for the given type. */
#define ZALLOC(type) \
	(type *)zalloc(alignof(type), sizeof(type))

/** Allocate memory for an array. */
#define ALLOC_ARRAY(type, count) \
	(type *)alloc(alignof(type), sizeof_array(type, count))

/** Allocate zeroed memory for an array. */
#define ZALLOC_ARRAY(type, count) \
	(type *)zalloc(alignof(type), sizeof_array(type, count))

/** Allocate memory for a flexible struct. */
#define ALLOC_FLEX(type, member, count) \
	(type *)alloc(alignof(type), sizeof_flex(type, member, count))

/** Allocate zeroed memory for a flexible struct. */
#define ZALLOC_FLEX(type, member, count) \
	(type *)zalloc(alignof(type), sizeof_flex(type, member, count))

/**
 * Alignment-aware realloc().
 *
 * @ptr
 *         The pointer to reallocate.
 * @align
 *         The required alignment.
 * @old_size
 *         The previous allocation size.
 * @new_size
 *         The new allocation size.
 * @return
 *         The reallocated memory, or NULL on failure.
 */
_aligned_alloc(2, 4)
_nodiscard
void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size);

/** Reallocate memory for an array. */
#define REALLOC_ARRAY(type, ptr, old_count, new_count) \
	(type *)xrealloc((ptr), alignof(type), sizeof_array(type, old_count), sizeof_array(type, new_count))

/** Reallocate memory for a flexible struct. */
#define REALLOC_FLEX(type, member, ptr, old_count, new_count) \
	(type *)xrealloc((ptr), alignof(type), sizeof_flex(type, member, old_count), sizeof_flex(type, member, new_count))

/**
 * Reserve space for one more element in a dynamic array.
 *
 * @ptr
 *         The pointer to reallocate.
 * @align
 *         The required alignment.
 * @count
 *         The current size of the array.
 * @return
 *         The reallocated memory, on both success *and* failure.  On success,
 *         errno will be set to zero, and the returned pointer will have room
 *         for (count + 1) elements.  On failure, errno will be non-zero, and
 *         ptr will returned unchanged.
 */
_nodiscard
void *reserve(void *ptr, size_t align, size_t size, size_t count);

/**
 * Convenience macro to grow a dynamic array.
 *
 * @type
 *         The array element type.
 * @type **ptr
 *         A pointer to the array.
 * @size_t *count
 *         A pointer to the array's size.
 * @return
 *         On success, a pointer to the newly reserved array element, i.e.
 *         `*ptr + *count++`.  On failure, NULL is returned, and both *ptr and
 *         *count remain unchanged.
 */
#define RESERVE(type, ptr, count) \
	((*ptr) = reserve((*ptr), alignof(type), sizeof(type), (*count)), \
	 errno ? NULL : (*ptr) + (*count)++)

/**
 * An arena allocator for fixed-size types.
 *
 * Arena allocators are intentionally not thread safe.
 */
struct arena {
	/** The number of allocated slabs. */
	size_t nslabs;
	/** The array of slabs. */
	struct slab **slabs;
	/** Chunk alignment. */
	size_t align;
	/** Chunk size. */
	size_t size;
};

/**
 * Initialize an arena for chunks of the given size and alignment.
 */
void arena_init(struct arena *arena, size_t align, size_t size);

/**
 * Initialize an arena for the given type.
 */
#define ARENA_INIT(arena, type) \
	arena_init((arena), alignof(type), sizeof(type))

/**
 * Free an object from the arena.
 */
void arena_free(struct arena *arena, void *ptr);

/**
 * Allocate an object out of the arena.
 */
_malloc(arena_free, 2)
void *arena_alloc(struct arena *arena);

/**
 * Free all allocations from an arena.
 */
void arena_clear(struct arena *arena);

/**
 * Destroy an arena, freeing all allocations.
 */
void arena_destroy(struct arena *arena);

/**
 * Loop over every allocated object in an arena.
 *
 * @type
 *         The object type.
 * @item
 *         The induction variable name.
 * @arena
 *         The arena to loop over.
 */
#define for_arena(type, item, arena) \
	for_arena_ (type, _i_##item, item, (arena))

#define for_arena_(type, i, item, arena) \
	for (size_t i = 0; i < arena->nslabs; ++i) \
		for (type *item = NULL; (item = slab_next(arena->slabs[i], item));)

/**
 * @internal
 * Get the next allocated object in a slab.
 */
void *slab_next(struct slab *slab, void *ptr);

/**
 * An arena allocator for flexibly-sized types.
 */
struct varena {
	/** The alignment of the struct. */
	size_t align;
	/** The offset of the flexible array. */
	size_t offset;
	/** The size of the flexible array elements. */
	size_t size;
	/** Shift amount for the smallest size class. */
	size_t shift;
	/** The number of arenas of different sizes. */
	size_t narenas;
	/** The array of differently-sized arenas. */
	struct arena *arenas;
};

/**
 * Initialize a varena for a struct with the given layout.
 *
 * @varena
 *         The varena to initialize.
 * @align
 *         alignof(type)
 * @offset
 *         offsetof(type, flexible_array)
 * @size
 *         sizeof(flexible_array[i])
 */
void varena_init(struct varena *varena, size_t align, size_t offset, size_t size);

/**
 * Initialize a varena for the given type and flexible array.
 *
 * @varena
 *         The varena to initialize.
 * @type
 *         A struct type containing a flexible array.
 * @member
 *         The name of the flexible array member.
 */
#define VARENA_INIT(varena, type, member) \
	(ASSERT_FLEX_ABI(type, member), varena_init( \
		varena, alignof(type), offsetof(type, member), sizeof_member(type, member[0])))

/**
 * Free an arena-allocated flexible struct.
 *
 * @varena
 *         The that allocated the object.
 * @ptr
 *         The object to free.
 */
void varena_free(struct varena *varena, void *ptr);

/**
 * Arena-allocate a flexible struct.
 *
 * @varena
 *         The varena to allocate from.
 * @count
 *         The length of the flexible array.
 * @return
 *         The allocated struct, or NULL on failure.
 */
_malloc(varena_free, 2)
void *varena_alloc(struct varena *varena, size_t count);

/**
 * Resize a flexible struct.
 *
 * @varena
 *         The varena to allocate from.
 * @ptr
 *         The object to resize.
 * @count
 *         The new array length.
 * @return
 *         The resized struct, or NULL on failure.
 */
_nodiscard
void *varena_realloc(struct varena *varena, void *ptr, size_t count);

/**
 * Grow a flexible struct by an arbitrary amount.
 *
 * @varena
 *         The varena to allocate from.
 * @ptr
 *         The object to resize.
 * @count[in,out]
 *         Pointer to the flexible array length.
 * @return
 *         The resized struct, or NULL on failure.
 */
_nodiscard
void *varena_grow(struct varena *varena, void *ptr, size_t *count);

/**
 * Free all allocations from a varena.
 */
void varena_clear(struct varena *varena);

/**
 * Destroy a varena, freeing all allocations.
 */
void varena_destroy(struct varena *varena);

/**
 * Loop over every allocated object in a varena.
 *
 * @type
 *         The object type.
 * @item
 *         The induction variable name.
 * @varena
 *         The varena to loop over.
 */
#define for_varena(type, item, varena) \
	for_varena_ (type, _j_##item, item, (varena))

#define for_varena_(type, j, item, varena) \
	for (size_t j = 0; j < varena->narenas; ++j) \
		for_arena (type, item, &varena->arenas[j])

#endif // BFS_ALLOC_H