summaryrefslogtreecommitdiffstats
path: root/libdimension/bvh
diff options
context:
space:
mode:
Diffstat (limited to 'libdimension/bvh')
-rw-r--r--libdimension/bvh/bvh.c402
-rw-r--r--libdimension/bvh/prtree.c372
2 files changed, 774 insertions, 0 deletions
diff --git a/libdimension/bvh/bvh.c b/libdimension/bvh/bvh.c
new file mode 100644
index 0000000..eab2c28
--- /dev/null
+++ b/libdimension/bvh/bvh.c
@@ -0,0 +1,402 @@
+/*************************************************************************
+ * Copyright (C) 2012-2014 Tavian Barnes <tavianator@tavianator.com> *
+ * *
+ * This file is part of The Dimension Library. *
+ * *
+ * The Dimension Library is free software; you can redistribute it and/ *
+ * or modify it under the terms of the GNU Lesser General Public License *
+ * as published by the Free Software Foundation; either version 3 of the *
+ * License, or (at your option) any later version. *
+ * *
+ * The Dimension Library is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty *
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * Lesser General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU Lesser General Public *
+ * License along with this program. If not, see *
+ * <http://www.gnu.org/licenses/>. *
+ *************************************************************************/
+
+/**
+ * @file
+ * BVH implementation. These are the hottest code paths in libdimension.
+ */
+
+#include "internal/bvh.h"
+#include "internal/concurrency.h"
+#include "internal/prtree.h"
+#include <pthread.h>
+
+/// Implementation for DMNSN_BVH_NONE: just stick all objects in one node.
+static dmnsn_bvh_node *
+dmnsn_new_stupid_bvh(const dmnsn_array *objects)
+{
+ dmnsn_bvh_node *root = dmnsn_new_bvh_node(dmnsn_array_size(objects));
+
+ DMNSN_ARRAY_FOREACH (dmnsn_object **, object, objects) {
+ dmnsn_bvh_node *leaf = dmnsn_new_bvh_leaf_node(*object);
+ dmnsn_bvh_node_add(root, leaf);
+ }
+
+ return root;
+}
+
+// Implementation of opaque dmnsn_bvh type.
+struct dmnsn_bvh {
+ dmnsn_array *unbounded; ///< The unbounded objects.
+ dmnsn_array *bounded; ///< The BVH of the bounded objects.
+ pthread_key_t intersection_cache; ///< The thread-local intersection cache.
+};
+
+/// A flat BVH node for storing in an array for fast pre-order traversal.
+typedef struct dmnsn_flat_bvh_node {
+ dmnsn_aabb aabb; ///< The bounding box of this node.
+ dmnsn_object *object; ///< The referenced object, for leaf nodes.
+ ptrdiff_t skip; ///< Displacement to the next sibling.
+} dmnsn_flat_bvh_node;
+
+/// Add an object or its children, if any, to an array.
+static void
+dmnsn_split_add_object(dmnsn_array *objects, const dmnsn_object *object)
+{
+ if (object->split_children) {
+ DMNSN_ARRAY_FOREACH (const dmnsn_object **, child, object->children) {
+ dmnsn_split_add_object(objects, *child);
+ }
+ } else {
+ dmnsn_array_push(objects, &object);
+ }
+}
+
+/// Split unions to create the input for the BVH.
+static dmnsn_array *
+dmnsn_split_objects(const dmnsn_array *objects)
+{
+ dmnsn_array *split = DMNSN_NEW_ARRAY(dmnsn_object *);
+ DMNSN_ARRAY_FOREACH (const dmnsn_object **, object, objects) {
+ dmnsn_split_add_object(split, *object);
+ }
+ return split;
+}
+
+/// Split unbounded objects into a new array.
+static dmnsn_array *
+dmnsn_split_unbounded(dmnsn_array *objects)
+{
+ dmnsn_array *unbounded = DMNSN_NEW_ARRAY(dmnsn_object *);
+
+ dmnsn_object **array = dmnsn_array_first(objects);
+ size_t i, skip;
+ for (i = 0, skip = 0; i < dmnsn_array_size(objects); ++i) {
+ if (dmnsn_aabb_is_infinite(array[i]->aabb)) {
+ dmnsn_array_push(unbounded, &array[i]);
+ ++skip;
+ } else {
+ array[i - skip] = array[i];
+ }
+ }
+ dmnsn_array_resize(objects, i - skip);
+
+ return unbounded;
+}
+
+/// Recursively flatten a BVH into an array of flat nodes.
+static void
+dmnsn_flatten_bvh_recursive(dmnsn_bvh_node *node, dmnsn_array *flat)
+{
+ size_t currenti = dmnsn_array_size(flat);
+ dmnsn_array_resize(flat, currenti + 1);
+ dmnsn_flat_bvh_node *flatnode = dmnsn_array_at(flat, currenti);
+
+ flatnode->aabb = node->aabb;
+ flatnode->object = node->object;
+
+ for (size_t i = 0; i < node->nchildren && node->children[i]; ++i) {
+ dmnsn_flatten_bvh_recursive(node->children[i], flat);
+ }
+
+ // Array could have been realloc()'d somewhere else above
+ flatnode = dmnsn_array_at(flat, currenti);
+ flatnode->skip = dmnsn_array_size(flat) - currenti;
+}
+
+/// Flatten a BVH into an array of flat nodes.
+static dmnsn_array *
+dmnsn_flatten_bvh(dmnsn_bvh_node *root)
+{
+ dmnsn_array *flat = DMNSN_NEW_ARRAY(dmnsn_flat_bvh_node);
+ if (root) {
+ dmnsn_flatten_bvh_recursive(root, flat);
+ }
+ return flat;
+}
+
+dmnsn_bvh *dmnsn_new_bvh(const dmnsn_array *objects, dmnsn_bvh_kind kind)
+{
+ dmnsn_bvh *bvh = DMNSN_MALLOC(dmnsn_bvh);
+
+ dmnsn_array *bounded = dmnsn_split_objects(objects);
+ bvh->unbounded = dmnsn_split_unbounded(bounded);
+
+ dmnsn_bvh_node *root = NULL;
+ if (dmnsn_array_size(bounded) > 0) {
+ switch (kind) {
+ case DMNSN_BVH_NONE:
+ root = dmnsn_new_stupid_bvh(bounded);
+ break;
+ case DMNSN_BVH_PRTREE:
+ root = dmnsn_new_prtree(bounded);
+ break;
+ default:
+ dmnsn_unreachable("Invalid BVH kind.");
+ }
+ }
+ bvh->bounded = dmnsn_flatten_bvh(root);
+
+ dmnsn_delete_bvh_node(root);
+ dmnsn_delete_array(bounded);
+
+ dmnsn_key_create(&bvh->intersection_cache, dmnsn_free);
+
+ return bvh;
+}
+
+void
+dmnsn_delete_bvh(dmnsn_bvh *bvh)
+{
+ if (bvh) {
+ dmnsn_free(pthread_getspecific(bvh->intersection_cache));
+ dmnsn_key_delete(bvh->intersection_cache);
+ dmnsn_delete_array(bvh->bounded);
+ dmnsn_delete_array(bvh->unbounded);
+ dmnsn_free(bvh);
+ }
+}
+
+/// A ray with pre-calculated reciprocals to avoid divisions.
+typedef struct dmnsn_optimized_ray {
+ dmnsn_vector x0; ///< The origin of the ray.
+ dmnsn_vector n_inv; ///< The inverse of each component of the ray's slope
+} dmnsn_optimized_ray;
+
+/// Precompute inverses for faster ray-box intersection tests.
+static inline dmnsn_optimized_ray
+dmnsn_optimize_ray(dmnsn_ray ray)
+{
+ dmnsn_optimized_ray optray = {
+ .x0 = ray.x0,
+ .n_inv = dmnsn_new_vector(1.0/ray.n.x, 1.0/ray.n.y, 1.0/ray.n.z)
+ };
+ return optray;
+}
+
+/// Ray-AABB intersection test, by the slab method. Highly optimized.
+static inline bool
+dmnsn_ray_box_intersection(dmnsn_optimized_ray optray, dmnsn_aabb box, double t)
+{
+ // This is actually correct, even though it appears not to handle edge cases
+ // (ray.n.{x,y,z} == 0). It works because the infinities that result from
+ // dividing by zero will still behave correctly in the comparisons. Rays
+ // which are parallel to an axis and outside the box will have tmin == inf
+ // or tmax == -inf, while rays inside the box will have tmin and tmax
+ // unchanged.
+
+ double tx1 = (box.min.x - optray.x0.x)*optray.n_inv.x;
+ double tx2 = (box.max.x - optray.x0.x)*optray.n_inv.x;
+
+ double tmin = dmnsn_min(tx1, tx2);
+ double tmax = dmnsn_max(tx1, tx2);
+
+ double ty1 = (box.min.y - optray.x0.y)*optray.n_inv.y;
+ double ty2 = (box.max.y - optray.x0.y)*optray.n_inv.y;
+
+ tmin = dmnsn_max(tmin, dmnsn_min(ty1, ty2));
+ tmax = dmnsn_min(tmax, dmnsn_max(ty1, ty2));
+
+ double tz1 = (box.min.z - optray.x0.z)*optray.n_inv.z;
+ double tz2 = (box.max.z - optray.x0.z)*optray.n_inv.z;
+
+ tmin = dmnsn_max(tmin, dmnsn_min(tz1, tz2));
+ tmax = dmnsn_min(tmax, dmnsn_max(tz1, tz2));
+
+ return tmax >= dmnsn_max(0.0, tmin) && tmin < t;
+}
+
+/// The number of intersections to cache.
+#define DMNSN_INTERSECTION_CACHE_SIZE 32
+
+/// An array of cached intersections.
+typedef struct dmnsn_intersection_cache {
+ size_t i;
+ dmnsn_object *objects[DMNSN_INTERSECTION_CACHE_SIZE];
+} dmnsn_intersection_cache;
+
+static dmnsn_intersection_cache *
+dmnsn_get_intersection_cache(const dmnsn_bvh *bvh)
+{
+ dmnsn_intersection_cache *cache
+ = pthread_getspecific(bvh->intersection_cache);
+
+ if (!cache) {
+ cache = DMNSN_MALLOC(dmnsn_intersection_cache);
+ cache->i = 0;
+ for (size_t i = 0; i < DMNSN_INTERSECTION_CACHE_SIZE; ++i) {
+ cache->objects[i] = NULL;
+ }
+ dmnsn_setspecific(bvh->intersection_cache, cache);
+ }
+
+ return cache;
+}
+
+/// Test for a closer object intersection than we've found so far.
+static inline bool
+dmnsn_closer_intersection(dmnsn_object *object, dmnsn_ray ray, dmnsn_intersection *intersection, double *t)
+{
+ dmnsn_intersection local_intersection;
+ if (dmnsn_object_intersection(object, ray, &local_intersection)) {
+ if (local_intersection.t < *t) {
+ *intersection = local_intersection;
+ *t = local_intersection.t;
+ return true;
+ }
+ }
+ return false;
+}
+
+DMNSN_HOT bool
+dmnsn_bvh_intersection(const dmnsn_bvh *bvh, dmnsn_ray ray, dmnsn_intersection *intersection, bool reset)
+{
+ double t = INFINITY;
+
+ // Search the unbounded objects
+ DMNSN_ARRAY_FOREACH (dmnsn_object **, object, bvh->unbounded) {
+ dmnsn_closer_intersection(*object, ray, intersection, &t);
+ }
+
+ // Precalculate 1.0/ray.n.{x,y,z} to save time in intersection tests
+ dmnsn_optimized_ray optray = dmnsn_optimize_ray(ray);
+
+ // Search the intersection cache
+ dmnsn_intersection_cache *cache = dmnsn_get_intersection_cache(bvh);
+ if (dmnsn_unlikely(reset)) {
+ cache->i = 0;
+ }
+ dmnsn_object *cached = NULL, *found = NULL;
+ if (dmnsn_likely(cache->i < DMNSN_INTERSECTION_CACHE_SIZE)) {
+ cached = cache->objects[cache->i];
+ }
+ if (cached && dmnsn_ray_box_intersection(optray, cached->aabb, t)) {
+ if (dmnsn_closer_intersection(cached, ray, intersection, &t)) {
+ found = cached;
+ }
+ }
+
+ // Search the bounded objects
+ dmnsn_flat_bvh_node *node = dmnsn_array_first(bvh->bounded);
+ dmnsn_flat_bvh_node *last = dmnsn_array_last(bvh->bounded);
+ while (node <= last) {
+ if (dmnsn_ray_box_intersection(optray, node->aabb, t)) {
+ if (node->object && node->object != cached) {
+ if (dmnsn_closer_intersection(node->object, ray, intersection, &t)) {
+ found = node->object;
+ }
+ }
+ ++node;
+ } else {
+ node += node->skip;
+ }
+ }
+
+ // Update the cache
+ if (dmnsn_likely(cache->i < DMNSN_INTERSECTION_CACHE_SIZE)) {
+ cache->objects[cache->i] = found;
+ ++cache->i;
+ }
+
+ return !isinf(t);
+}
+
+DMNSN_HOT bool
+dmnsn_bvh_inside(const dmnsn_bvh *bvh, dmnsn_vector point)
+{
+ // Search the unbounded objects
+ DMNSN_ARRAY_FOREACH (dmnsn_object **, object, bvh->unbounded) {
+ if (dmnsn_object_inside(*object, point))
+ return true;
+ }
+
+ // Search the bounded objects
+ dmnsn_flat_bvh_node *node = dmnsn_array_first(bvh->bounded);
+ dmnsn_flat_bvh_node *last = dmnsn_array_last(bvh->bounded);
+ while (node <= last) {
+ if (dmnsn_aabb_contains(node->aabb, point)) {
+ if (node->object && dmnsn_object_inside(node->object, point)) {
+ return true;
+ }
+ ++node;
+ } else {
+ node += node->skip;
+ }
+ }
+
+ return false;
+}
+
+dmnsn_aabb
+dmnsn_bvh_aabb(const dmnsn_bvh *bvh)
+{
+ if (dmnsn_array_size(bvh->unbounded) > 0) {
+ return dmnsn_infinite_aabb();
+ } else if (dmnsn_array_size(bvh->bounded) > 0) {
+ dmnsn_flat_bvh_node *root = dmnsn_array_first(bvh->bounded);
+ return root->aabb;
+ } else {
+ return dmnsn_zero_aabb();
+ }
+}
+
+dmnsn_bvh_node *
+dmnsn_new_bvh_node(unsigned int max_children)
+{
+ dmnsn_bvh_node *node = dmnsn_malloc(sizeof(dmnsn_bvh_node) + max_children*sizeof(dmnsn_bvh_node *));
+ node->aabb = dmnsn_zero_aabb();
+ node->object = NULL;
+ node->nchildren = 0;
+ node->max_children = max_children;
+ return node;
+}
+
+dmnsn_bvh_node *
+dmnsn_new_bvh_leaf_node(dmnsn_object *object)
+{
+ dmnsn_bvh_node *node = DMNSN_MALLOC(dmnsn_bvh_node);
+ node->aabb = object->aabb;
+ node->object = object;
+ node->nchildren = 0;
+ node->max_children = 0;
+ return node;
+}
+
+void
+dmnsn_delete_bvh_node(dmnsn_bvh_node *node)
+{
+ if (node) {
+ for (size_t i = 0; i < node->nchildren; ++i) {
+ dmnsn_delete_bvh_node(node->children[i]);
+ }
+ dmnsn_free(node);
+ }
+}
+
+void
+dmnsn_bvh_node_add(dmnsn_bvh_node *parent, dmnsn_bvh_node *child)
+{
+ dmnsn_assert(parent->nchildren < parent->max_children,
+ "Too many BVH children inserted.");
+
+ parent->aabb.min = dmnsn_vector_min(parent->aabb.min, child->aabb.min);
+ parent->aabb.max = dmnsn_vector_max(parent->aabb.max, child->aabb.max);
+ parent->children[parent->nchildren++] = child;
+}
diff --git a/libdimension/bvh/prtree.c b/libdimension/bvh/prtree.c
new file mode 100644
index 0000000..c8e4e54
--- /dev/null
+++ b/libdimension/bvh/prtree.c
@@ -0,0 +1,372 @@
+/*************************************************************************
+ * Copyright (C) 2010-2014 Tavian Barnes <tavianator@tavianator.com> *
+ * *
+ * This file is part of The Dimension Library. *
+ * *
+ * The Dimension Library is free software; you can redistribute it and/ *
+ * or modify it under the terms of the GNU Lesser General Public License *
+ * as published by the Free Software Foundation; either version 3 of the *
+ * License, or (at your option) any later version. *
+ * *
+ * The Dimension Library is distributed in the hope that it will be *
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty *
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * Lesser General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU Lesser General Public *
+ * License along with this program. If not, see *
+ * <http://www.gnu.org/licenses/>. *
+ *************************************************************************/
+
+/**
+ * @file
+ * Priority R-tree implementation.
+ */
+
+#include "internal/platform.h"
+#include "internal/prtree.h"
+#include "internal/concurrency.h"
+#include <stdlib.h>
+
+/// Number of children per PR-node.
+#define DMNSN_PRTREE_B 8
+/// Number of priority leaves per pseudo-PR-node (must be 2*ndimensions).
+#define DMNSN_PSEUDO_B 6
+
+/// The side of the split that a node ended up on.
+typedef enum dmnsn_prnode_color {
+ DMNSN_PRTREE_LEAF, ///< Priority leaf.
+ DMNSN_PRTREE_LEFT, ///< Left child.
+ DMNSN_PRTREE_RIGHT ///< Right child.
+} dmnsn_prnode_color;
+
+/**
+ * A BVH node with associated color. Compared to storing the color in the
+ * \p dmnsn_bvh_node itself, this method has decreased cache performance during
+ * sorting (due to an extra pointer chase), but increased performance during
+ * tree building (because it's much smaller than a full \p dmnsn_bvh_node).
+ * Overall it gives about a 25% improvement.
+ */
+typedef struct dmnsn_colored_prnode {
+ dmnsn_prnode_color color;
+ dmnsn_bvh_node *node;
+} dmnsn_colored_prnode;
+
+/// Construct an empty PR-node.
+static inline dmnsn_bvh_node *
+dmnsn_new_prnode(void)
+{
+ return dmnsn_new_bvh_node(DMNSN_PRTREE_B);
+}
+
+/// Comparator types.
+enum {
+ DMNSN_XMIN,
+ DMNSN_YMIN,
+ DMNSN_ZMIN,
+ DMNSN_XMAX,
+ DMNSN_YMAX,
+ DMNSN_ZMAX
+};
+
+// List sorting comparators
+
+static int
+dmnsn_xmin_comp(const void *l, const void *r)
+{
+ double lval = (*(const dmnsn_colored_prnode **)l)->node->aabb.min.x;
+ double rval = (*(const dmnsn_colored_prnode **)r)->node->aabb.min.x;
+ return (lval > rval) - (lval < rval);
+}
+
+static int
+dmnsn_ymin_comp(const void *l, const void *r)
+{
+ double lval = (*(const dmnsn_colored_prnode **)l)->node->aabb.min.y;
+ double rval = (*(const dmnsn_colored_prnode **)r)->node->aabb.min.y;
+ return (lval > rval) - (lval < rval);
+}
+
+static int
+dmnsn_zmin_comp(const void *l, const void *r)
+{
+ double lval = (*(const dmnsn_colored_prnode **)l)->node->aabb.min.z;
+ double rval = (*(const dmnsn_colored_prnode **)r)->node->aabb.min.z;
+ return (lval > rval) - (lval < rval);
+}
+
+static int
+dmnsn_xmax_comp(const void *l, const void *r)
+{
+ double lval = (*(const dmnsn_colored_prnode **)l)->node->aabb.max.x;
+ double rval = (*(const dmnsn_colored_prnode **)r)->node->aabb.max.x;
+ return (lval < rval) - (lval > rval);
+}
+
+static int
+dmnsn_ymax_comp(const void *l, const void *r)
+{
+ double lval = (*(const dmnsn_colored_prnode **)l)->node->aabb.max.y;
+ double rval = (*(const dmnsn_colored_prnode **)r)->node->aabb.max.y;
+ return (lval < rval) - (lval > rval);
+}
+
+static int
+dmnsn_zmax_comp(const void *l, const void *r)
+{
+ double lval = (*(const dmnsn_colored_prnode **)l)->node->aabb.max.z;
+ double rval = (*(const dmnsn_colored_prnode **)r)->node->aabb.max.z;
+ return (lval < rval) - (lval > rval);
+}
+
+/// All comparators.
+static dmnsn_array_comparator_fn *const dmnsn_comparators[DMNSN_PSEUDO_B] = {
+ [DMNSN_XMIN] = dmnsn_xmin_comp,
+ [DMNSN_YMIN] = dmnsn_ymin_comp,
+ [DMNSN_ZMIN] = dmnsn_zmin_comp,
+ [DMNSN_XMAX] = dmnsn_xmax_comp,
+ [DMNSN_YMAX] = dmnsn_ymax_comp,
+ [DMNSN_ZMAX] = dmnsn_zmax_comp,
+};
+
+/// Add the priority leaves for this level.
+static void
+dmnsn_add_priority_leaves(dmnsn_colored_prnode **sorted_leaves[DMNSN_PSEUDO_B],
+ size_t start, size_t end,
+ dmnsn_array *new_leaves)
+{
+ for (size_t i = 0; i < DMNSN_PSEUDO_B; ++i) {
+ dmnsn_bvh_node *leaf = NULL;
+ dmnsn_colored_prnode **leaves = sorted_leaves[i];
+
+ for (size_t j = start;
+ j < end && (!leaf || leaf->nchildren < DMNSN_PRTREE_B);
+ ++j) {
+ // Skip all the previously found extreme nodes
+ if (leaves[j]->color == DMNSN_PRTREE_LEAF) {
+ continue;
+ }
+
+ if (!leaf) {
+ leaf = dmnsn_new_prnode();
+ }
+ leaves[j]->color = DMNSN_PRTREE_LEAF;
+ dmnsn_bvh_node_add(leaf, leaves[j]->node);
+ }
+
+ if (leaf) {
+ dmnsn_array_push(new_leaves, &leaf);
+ } else {
+ return;
+ }
+ }
+}
+
+/// Get rid of the extreme nodes.
+static void
+dmnsn_filter_priority_leaves(dmnsn_colored_prnode **leaves, size_t start, size_t *endp)
+{
+ size_t i, skip, end;
+ for (i = start, skip = 0, end = *endp; i < end; ++i) {
+ if (leaves[i]->color == DMNSN_PRTREE_LEAF) {
+ ++skip;
+ } else {
+ leaves[i - skip] = leaves[i];
+ }
+ }
+
+ *endp -= skip;
+}
+
+/// Split the leaves and mark the left and right child nodes.
+static void
+dmnsn_split_sorted_leaves_easy(dmnsn_colored_prnode **leaves, size_t start, size_t *midp, size_t end)
+{
+ size_t i, mid = start + (end - start + 1)/2;
+ for (i = start; i < mid; ++i) {
+ leaves[i]->color = DMNSN_PRTREE_LEFT;
+ }
+ for (; i < end; ++i) {
+ leaves[i]->color = DMNSN_PRTREE_RIGHT;
+ }
+
+ *midp = mid;
+}
+
+/// Split the leaves using the coloring from dmnsn_split_sorted_leaves_easy().
+static void
+dmnsn_split_sorted_leaves_hard(dmnsn_colored_prnode **leaves, dmnsn_colored_prnode **buffer, size_t start, size_t end)
+{
+ size_t i, j, skip;
+ for (i = start, j = 0, skip = 0; i < end; ++i) {
+ if (leaves[i]->color == DMNSN_PRTREE_LEFT) {
+ leaves[i - skip] = leaves[i];
+ } else {
+ if (leaves[i]->color == DMNSN_PRTREE_RIGHT) {
+ buffer[j] = leaves[i];
+ ++j;
+ }
+ ++skip;
+ }
+ }
+
+ size_t mid = i - skip;
+ for (i = 0; i < j; ++i) {
+ leaves[mid + i] = buffer[i];
+ }
+}
+
+/// Split the sorted lists into the left and right subtrees.
+static void
+dmnsn_split_sorted_leaves(dmnsn_colored_prnode **sorted_leaves[DMNSN_PSEUDO_B],
+ size_t start, size_t *midp, size_t *endp,
+ dmnsn_colored_prnode **buffer, int i)
+{
+ size_t orig_end = *endp;
+
+ // Filter the extreme nodes in the ith list
+ dmnsn_filter_priority_leaves(sorted_leaves[i], start, endp);
+
+ // Split the ith list
+ dmnsn_split_sorted_leaves_easy(sorted_leaves[i], start, midp, *endp);
+
+ // Split the rest of the lists
+ for (size_t j = 0; j < DMNSN_PSEUDO_B; ++j) {
+ if (j == i) {
+ continue;
+ }
+
+ dmnsn_split_sorted_leaves_hard(sorted_leaves[j], buffer, start, orig_end);
+ }
+}
+
+/// Recursively constructs an implicit pseudo-PR-tree and collects the priority
+/// leaves.
+static void
+dmnsn_priority_leaves_recursive(dmnsn_colored_prnode **sorted_leaves[DMNSN_PSEUDO_B],
+ size_t start, size_t end,
+ dmnsn_colored_prnode **buffer,
+ dmnsn_array *new_leaves,
+ int comparator)
+{
+ dmnsn_add_priority_leaves(sorted_leaves, start, end, new_leaves);
+
+ size_t mid;
+ dmnsn_split_sorted_leaves(sorted_leaves, start, &mid, &end, buffer, comparator);
+
+ int next = (comparator + 1)%DMNSN_PSEUDO_B;
+
+ if (start < mid) {
+ dmnsn_priority_leaves_recursive(sorted_leaves, start, mid, buffer, new_leaves, next);
+ }
+
+ if (mid < end) {
+ dmnsn_priority_leaves_recursive(sorted_leaves, mid, end, buffer, new_leaves, next);
+ }
+}
+
+/// Sort each dimension in parallel with more than this many leaves.
+#define DMNSN_PARALLEL_SORT_THRESHOLD 1024
+
+typedef struct {
+ dmnsn_colored_prnode *colored_leaves;
+ dmnsn_colored_prnode ***sorted_leaves;
+ size_t nleaves;
+} dmnsn_sort_leaves_payload;
+
+static dmnsn_colored_prnode **
+dmnsn_sort_leaf_array(dmnsn_colored_prnode *colored_leaves, size_t nleaves, int comparator)
+{
+ dmnsn_colored_prnode **sorted_leaves = dmnsn_malloc(nleaves*sizeof(dmnsn_colored_prnode *));
+
+ for (size_t i = 0; i < nleaves; ++i) {
+ sorted_leaves[i] = colored_leaves + i;
+ }
+
+ qsort(sorted_leaves, nleaves, sizeof(dmnsn_colored_prnode *), dmnsn_comparators[comparator]);
+
+ return sorted_leaves;
+}
+
+static int
+dmnsn_sort_leaves(void *ptr, unsigned int thread, unsigned int nthreads)
+{
+ dmnsn_sort_leaves_payload *payload = ptr;
+
+ for (unsigned int i = thread; i < DMNSN_PSEUDO_B; i += nthreads) {
+ payload->sorted_leaves[i] = dmnsn_sort_leaf_array(payload->colored_leaves, payload->nleaves, i);
+ }
+
+ return 0;
+}
+
+/// Constructs an implicit pseudo-PR-tree and returns the priority leaves.
+static dmnsn_array *
+dmnsn_priority_leaves(const dmnsn_array *leaves, unsigned int nthreads)
+{
+ dmnsn_bvh_node **leaves_arr = dmnsn_array_first(leaves);
+ size_t nleaves = dmnsn_array_size(leaves);
+
+ dmnsn_colored_prnode *colored_leaves = dmnsn_malloc(nleaves*sizeof(dmnsn_colored_prnode));
+ for (size_t i = 0; i < nleaves; ++i) {
+ colored_leaves[i].color = DMNSN_PRTREE_LEFT; // Mustn't be _LEAF
+ colored_leaves[i].node = leaves_arr[i];
+ }
+
+ dmnsn_colored_prnode **sorted_leaves[DMNSN_PSEUDO_B];
+
+ if (nleaves >= DMNSN_PARALLEL_SORT_THRESHOLD && nthreads > 1) {
+ dmnsn_sort_leaves_payload payload = {
+ .colored_leaves = colored_leaves,
+ .sorted_leaves = sorted_leaves,
+ .nleaves = nleaves,
+ };
+ dmnsn_execute_concurrently(NULL, dmnsn_sort_leaves, &payload, nthreads);
+ } else {
+ for (size_t i = 0; i < DMNSN_PSEUDO_B; ++i) {
+ sorted_leaves[i] = dmnsn_sort_leaf_array(colored_leaves, nleaves, i);
+ }
+ }
+
+ size_t buffer_size = nleaves/2;
+ dmnsn_colored_prnode **buffer = dmnsn_malloc(buffer_size*sizeof(dmnsn_colored_prnode *));
+
+ dmnsn_array *new_leaves = DMNSN_NEW_ARRAY(dmnsn_bvh_node *);
+
+ dmnsn_priority_leaves_recursive(sorted_leaves, 0, nleaves, buffer, new_leaves, 0);
+
+ dmnsn_free(buffer);
+ for (size_t i = 0; i < DMNSN_PSEUDO_B; ++i) {
+ dmnsn_free(sorted_leaves[i]);
+ }
+ dmnsn_free(colored_leaves);
+
+ return new_leaves;
+}
+
+dmnsn_bvh_node *
+dmnsn_new_prtree(const dmnsn_array *objects)
+{
+ if (dmnsn_array_size(objects) == 0) {
+ return NULL;
+ }
+
+ // Make the initial array of leaves
+ dmnsn_array *leaves = DMNSN_NEW_ARRAY(dmnsn_bvh_node *);
+ DMNSN_ARRAY_FOREACH (dmnsn_object **, object, objects) {
+ dmnsn_bvh_node *node = dmnsn_new_bvh_leaf_node(*object);
+ dmnsn_array_push(leaves, &node);
+ }
+
+ unsigned int ncpus = dmnsn_ncpus();
+ unsigned int nthreads = ncpus < DMNSN_PSEUDO_B ? ncpus : DMNSN_PSEUDO_B;
+ while (dmnsn_array_size(leaves) > 1) {
+ dmnsn_array *new_leaves = dmnsn_priority_leaves(leaves, nthreads);
+ dmnsn_delete_array(leaves);
+ leaves = new_leaves;
+ }
+
+ dmnsn_bvh_node *root = *(dmnsn_bvh_node **)dmnsn_array_first(leaves);
+ dmnsn_delete_array(leaves);
+ return root;
+}