diff options
author | Tavian Barnes <tavianator@tavianator.com> | 2023-07-18 11:56:41 -0400 |
---|---|---|
committer | Tavian Barnes <tavianator@tavianator.com> | 2023-07-18 12:02:02 -0400 |
commit | 0d5bcc9e5c53f64024afbad19b1a01ae9b2937af (patch) | |
tree | b45cda7d4507f7beba43e00365e0447137002468 /src | |
parent | 815798e1eea7fc8dacd5acab40202ec4d251d517 (diff) | |
download | bfs-0d5bcc9e5c53f64024afbad19b1a01ae9b2937af.tar.xz |
bftw: Use a larger ioq depth
Now that the dirlimit provides backpressure on the number of open
directories, we can use a uniformly larger queue depth for increased
performance. The current parameters were tuned with a small grid search
on my workstation.
Diffstat (limited to 'src')
-rw-r--r-- | src/bftw.c | 34 |
1 files changed, 12 insertions, 22 deletions
@@ -462,42 +462,32 @@ static int bftw_state_init(struct bftw_state *state, const struct bftw_args *arg errno = EMFILE; return -1; } - - state->path = dstralloc(0); - if (!state->path) { - return -1; - } - bftw_cache_init(&state->cache, args->nopenfd); - size_t qdepth = args->nopenfd - 1; - if (qdepth > 1024) { - qdepth = 1024; - } - - size_t nthreads = args->nthreads; - if (nthreads > qdepth) { - nthreads = qdepth; - } - - state->ioq = NULL; - if (nthreads > 0) { - state->ioq = ioq_create(qdepth, nthreads); + state->nthreads = args->nthreads; + if (state->nthreads > 0) { + state->ioq = ioq_create(4096, state->nthreads); if (!state->ioq) { - dstrfree(state->path); return -1; } + } else { + state->ioq = NULL; } - state->nthreads = nthreads; SLIST_INIT(&state->to_open); SLIST_INIT(&state->to_read); SLIST_INIT(&state->to_close); - state->dirlimit = qdepth; + + size_t dirlimit = args->nopenfd - 1; + if (dirlimit > 1024) { + dirlimit = 1024; + } + state->dirlimit = dirlimit; SLIST_INIT(&state->to_visit); SLIST_INIT(&state->batch); + state->path = NULL; state->file = NULL; state->previous = NULL; |