summaryrefslogtreecommitdiffstats
path: root/tests/ioq.c
diff options
context:
space:
mode:
authorTavian Barnes <tavianator@tavianator.com>2024-05-07 15:42:46 -0400
committerTavian Barnes <tavianator@tavianator.com>2024-05-07 15:42:46 -0400
commit452d6697e0f92326ab139eed4eadd9c2fd8b55ca (patch)
tree0feeb3722dcf6debb6c33c5175342bf1d70a1dba /tests/ioq.c
parenta4299f9bc1d3e60a7e628561e8d650c2a241e1c2 (diff)
parentc5cf2cf90834f2f56b2940d2a499a1a614ebfd21 (diff)
downloadbfs-find2fd.tar.xz
Merge branch 'main' into find2fdfind2fd
Diffstat (limited to 'tests/ioq.c')
-rw-r--r--tests/ioq.c77
1 files changed, 77 insertions, 0 deletions
diff --git a/tests/ioq.c b/tests/ioq.c
new file mode 100644
index 0000000..ef5ee3b
--- /dev/null
+++ b/tests/ioq.c
@@ -0,0 +1,77 @@
+// Copyright © Tavian Barnes <tavianator@tavianator.com>
+// SPDX-License-Identifier: 0BSD
+
+#include "prelude.h"
+#include "tests.h"
+#include "ioq.h"
+#include "bfstd.h"
+#include "diag.h"
+#include "dir.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+/**
+ * Test for blocking within ioq_slot_push().
+ *
+ * struct ioqq only supports non-blocking reads; if a write encounters a full
+ * slot, it must block until someone pops from that slot:
+ *
+ * Reader Writer
+ * ────────────────────────── ─────────────────────────
+ * tail: 0 → 1
+ * slots[0]: empty → full
+ * tail: 1 → 0
+ * slots[1]: empty → full
+ * tail: 0 → 1
+ * slots[0]: full → full* (IOQ_BLOCKED)
+ * ioq_slot_wait() ...
+ * head: 0 → 1
+ * slots[0]: full* → empty
+ * ioq_slot_wake()
+ * ...
+ * slots[0]: empty → full
+ *
+ * To reproduce this unlikely scenario, we must fill up the ready queue, then
+ * call ioq_cancel() which pushes an additional sentinel IOQ_STOP operation.
+ */
+static void check_ioq_push_block(void) {
+ // Must be a power of two to fill the entire queue
+ const size_t depth = 2;
+
+ struct ioq *ioq = ioq_create(depth, 1);
+ bfs_verify(ioq, "ioq_create(): %s", xstrerror(errno));
+
+ // Push enough operations to fill the queue
+ for (size_t i = 0; i < depth; ++i) {
+ struct bfs_dir *dir = bfs_allocdir();
+ bfs_verify(dir, "bfs_allocdir(): %s", xstrerror(errno));
+
+ int ret = ioq_opendir(ioq, dir, AT_FDCWD, ".", 0, NULL);
+ bfs_verify(ret == 0, "ioq_opendir(): %s", xstrerror(errno));
+ }
+ bfs_verify(ioq_capacity(ioq) == 0);
+
+ // Now cancel the queue, pushing an additional IOQ_STOP message
+ ioq_cancel(ioq);
+
+ // Drain the queue
+ for (size_t i = 0; i < depth; ++i) {
+ struct ioq_ent *ent = ioq_pop(ioq, true);
+ bfs_verify(ent && ent->op == IOQ_OPENDIR);
+
+ if (ent->result >= 0) {
+ bfs_closedir(ent->opendir.dir);
+ }
+ free(ent->opendir.dir);
+ ioq_free(ioq, ent);
+ }
+ bfs_verify(!ioq_pop(ioq, true));
+
+ ioq_destroy(ioq);
+}
+
+bool check_ioq(void) {
+ check_ioq_push_block();
+ return true;
+}