diff options
author | Tavian Barnes <tavianator@tavianator.com> | 2014-06-02 00:05:18 -0400 |
---|---|---|
committer | Tavian Barnes <tavianator@tavianator.com> | 2014-06-02 00:05:18 -0400 |
commit | 9cc0cdecd8ac7f504400596d0227ca9033d87af9 (patch) | |
tree | b418604ab02420bfec1fab382ec017f518514a8d /libdimension | |
parent | aca752532bf343a31268d96f99d431c77842eff5 (diff) | |
download | dimension-9cc0cdecd8ac7f504400596d0227ca9033d87af9.tar.xz |
Use C11 atomics.
Diffstat (limited to 'libdimension')
-rw-r--r-- | libdimension/malloc.c | 11 | ||||
-rw-r--r-- | libdimension/pool.c | 22 |
2 files changed, 16 insertions, 17 deletions
diff --git a/libdimension/malloc.c b/libdimension/malloc.c index d7816c3..6868802 100644 --- a/libdimension/malloc.c +++ b/libdimension/malloc.c @@ -26,9 +26,10 @@ #include "dimension-internal.h" #include <stdlib.h> #include <string.h> +#include <stdatomic.h> #ifndef NDEBUG -static size_t dmnsn_allocs = 0; +static atomic_size_t dmnsn_allocs = 0; #endif void * @@ -40,7 +41,7 @@ dmnsn_malloc(size_t size) } #ifndef NDEBUG - __sync_fetch_and_add(&dmnsn_allocs, 1); + atomic_fetch_add(&dmnsn_allocs, 1); #endif return ptr; @@ -51,7 +52,7 @@ dmnsn_realloc(void *ptr, size_t size) { #ifndef NDEBUG if (!ptr) { - __sync_fetch_and_add(&dmnsn_allocs, 1); + atomic_fetch_add(&dmnsn_allocs, 1); } #endif @@ -75,7 +76,7 @@ dmnsn_free(void *ptr) { #ifndef NDEBUG if (ptr) { - __sync_fetch_and_sub(&dmnsn_allocs, 1); + atomic_fetch_sub(&dmnsn_allocs, 1); } #endif @@ -86,7 +87,7 @@ dmnsn_free(void *ptr) DMNSN_LATE_DESTRUCTOR static void dmnsn_leak_check(void) { - if (dmnsn_allocs > 0) { + if (atomic_load_explicit(&dmnsn_allocs, memory_order_relaxed) > 0) { dmnsn_warning("Leaking memory."); } } diff --git a/libdimension/pool.c b/libdimension/pool.c index db6be14..c969bca 100644 --- a/libdimension/pool.c +++ b/libdimension/pool.c @@ -24,6 +24,7 @@ */ #include "dimension-internal.h" +#include <stdatomic.h> /** A single allocation and associated destructor. */ typedef struct dmnsn_allocation { @@ -48,11 +49,8 @@ typedef struct dmnsn_pool_block { struct dmnsn_pool { /** Thread-local block. */ pthread_key_t thread_block; - /** Global chain of pools. */ - dmnsn_pool_block *chain; - /** Mutex guarding the global chain. */ - pthread_mutex_t mutex; + _Atomic(dmnsn_pool_block *) chain; }; dmnsn_pool * @@ -60,8 +58,7 @@ dmnsn_new_pool(void) { dmnsn_pool *pool = DMNSN_MALLOC(dmnsn_pool); dmnsn_key_create(&pool->thread_block, NULL); - pool->chain = NULL; - dmnsn_initialize_mutex(&pool->mutex); + atomic_init(&pool->chain, NULL); return pool; } @@ -90,10 +87,12 @@ dmnsn_palloc_tidy(dmnsn_pool *pool, size_t size, dmnsn_callback_fn *cleanup_fn) if (dmnsn_unlikely(new_block != old_block)) { dmnsn_setspecific(pool->thread_block, new_block); - dmnsn_lock_mutex(&pool->mutex); - new_block->prev = pool->chain; - pool->chain = new_block; - dmnsn_unlock_mutex(&pool->mutex); + /* Atomically update pool->chain */ + dmnsn_pool_block *chain; + do { + chain = atomic_load(&pool->chain); + } while (!atomic_compare_exchange_weak(&pool->chain, &chain, new_block)); + new_block->prev = chain; } return result; @@ -106,7 +105,7 @@ dmnsn_delete_pool(dmnsn_pool *pool) return; } - dmnsn_pool_block *block = pool->chain; + dmnsn_pool_block *block = atomic_load_explicit(&pool->chain, memory_order_relaxed); while (block) { /* Free all the allocations in reverse order */ for (size_t i = block->i; i-- > 0;) { @@ -123,7 +122,6 @@ dmnsn_delete_pool(dmnsn_pool *pool) dmnsn_free(saved); } - dmnsn_destroy_mutex(&pool->mutex); dmnsn_key_delete(pool->thread_block); dmnsn_free(pool); } |