bvh_cull.inc 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. public:
  2. // cull parameters is a convenient way of passing a bunch
  3. // of arguments through the culling functions without
  4. // writing loads of code. Not all members are used for some cull checks
  5. struct CullParams {
  6. int result_count_overall; // both trees
  7. int result_count; // this tree only
  8. int result_max;
  9. T **result_array;
  10. int *subindex_array;
  11. // nobody truly understands how masks are intended to work.
  12. uint32_t mask;
  13. uint32_t pairable_type;
  14. // optional components for different tests
  15. Vector3 point;
  16. BVHABB_CLASS abb;
  17. typename BVHABB_CLASS::ConvexHull hull;
  18. typename BVHABB_CLASS::Segment segment;
  19. // when collision testing, non pairable moving items
  20. // only need to be tested against the pairable tree.
  21. // collisions with other non pairable items are irrelevant.
  22. bool test_pairable_only;
  23. };
  24. private:
  25. void _cull_translate_hits(CullParams &p) {
  26. int num_hits = _cull_hits.size();
  27. int left = p.result_max - p.result_count_overall;
  28. if (num_hits > left) {
  29. num_hits = left;
  30. }
  31. int out_n = p.result_count_overall;
  32. for (int n = 0; n < num_hits; n++) {
  33. uint32_t ref_id = _cull_hits[n];
  34. const ItemExtra &ex = _extra[ref_id];
  35. p.result_array[out_n] = ex.userdata;
  36. if (p.subindex_array) {
  37. p.subindex_array[out_n] = ex.subindex;
  38. }
  39. out_n++;
  40. }
  41. p.result_count = num_hits;
  42. p.result_count_overall += num_hits;
  43. }
  44. public:
  45. int cull_convex(CullParams &r_params, bool p_translate_hits = true) {
  46. _cull_hits.clear();
  47. r_params.result_count = 0;
  48. for (int n = 0; n < NUM_TREES; n++) {
  49. if (_root_node_id[n] == BVHCommon::INVALID) {
  50. continue;
  51. }
  52. _cull_convex_iterative(_root_node_id[n], r_params);
  53. }
  54. if (p_translate_hits) {
  55. _cull_translate_hits(r_params);
  56. }
  57. return r_params.result_count;
  58. }
  59. int cull_segment(CullParams &r_params, bool p_translate_hits = true) {
  60. _cull_hits.clear();
  61. r_params.result_count = 0;
  62. for (int n = 0; n < NUM_TREES; n++) {
  63. if (_root_node_id[n] == BVHCommon::INVALID) {
  64. continue;
  65. }
  66. _cull_segment_iterative(_root_node_id[n], r_params);
  67. }
  68. if (p_translate_hits) {
  69. _cull_translate_hits(r_params);
  70. }
  71. return r_params.result_count;
  72. }
  73. int cull_point(CullParams &r_params, bool p_translate_hits = true) {
  74. _cull_hits.clear();
  75. r_params.result_count = 0;
  76. for (int n = 0; n < NUM_TREES; n++) {
  77. if (_root_node_id[n] == BVHCommon::INVALID) {
  78. continue;
  79. }
  80. _cull_point_iterative(_root_node_id[n], r_params);
  81. }
  82. if (p_translate_hits) {
  83. _cull_translate_hits(r_params);
  84. }
  85. return r_params.result_count;
  86. }
  87. int cull_aabb(CullParams &r_params, bool p_translate_hits = true) {
  88. _cull_hits.clear();
  89. r_params.result_count = 0;
  90. for (int n = 0; n < NUM_TREES; n++) {
  91. if (_root_node_id[n] == BVHCommon::INVALID) {
  92. continue;
  93. }
  94. if ((n == 0) && r_params.test_pairable_only) {
  95. continue;
  96. }
  97. _cull_aabb_iterative(_root_node_id[n], r_params);
  98. }
  99. if (p_translate_hits) {
  100. _cull_translate_hits(r_params);
  101. }
  102. return r_params.result_count;
  103. }
  104. bool _cull_hits_full(const CullParams &p) {
  105. // instead of checking every hit, we can do a lazy check for this condition.
  106. // it isn't a problem if we write too much _cull_hits because they only the
  107. // result_max amount will be translated and outputted. But we might as
  108. // well stop our cull checks after the maximum has been reached.
  109. return (int)_cull_hits.size() >= p.result_max;
  110. }
  111. // write this logic once for use in all routines
  112. // double check this as a possible source of bugs in future.
  113. bool _cull_pairing_mask_test_hit(uint32_t p_maskA, uint32_t p_typeA, uint32_t p_maskB, uint32_t p_typeB) const {
  114. // double check this as a possible source of bugs in future.
  115. bool A_match_B = p_maskA & p_typeB;
  116. if (!A_match_B) {
  117. bool B_match_A = p_maskB & p_typeA;
  118. if (!B_match_A) {
  119. return false;
  120. }
  121. }
  122. return true;
  123. }
  124. void _cull_hit(uint32_t p_ref_id, CullParams &p) {
  125. // take into account masks etc
  126. // this would be more efficient to do before plane checks,
  127. // but done here for ease to get started
  128. if (USE_PAIRS) {
  129. const ItemExtra &ex = _extra[p_ref_id];
  130. if (!_cull_pairing_mask_test_hit(p.mask, p.pairable_type, ex.pairable_mask, ex.pairable_type)) {
  131. return;
  132. }
  133. }
  134. _cull_hits.push_back(p_ref_id);
  135. }
  136. bool _cull_segment_iterative(uint32_t p_node_id, CullParams &r_params) {
  137. // our function parameters to keep on a stack
  138. struct CullSegParams {
  139. uint32_t node_id;
  140. };
  141. // most of the iterative functionality is contained in this helper class
  142. BVH_IterativeInfo<CullSegParams> ii;
  143. // alloca must allocate the stack from this function, it cannot be allocated in the
  144. // helper class
  145. ii.stack = (CullSegParams *)alloca(ii.get_alloca_stacksize());
  146. // seed the stack
  147. ii.get_first()->node_id = p_node_id;
  148. CullSegParams csp;
  149. // while there are still more nodes on the stack
  150. while (ii.pop(csp)) {
  151. TNode &tnode = _nodes[csp.node_id];
  152. if (tnode.is_leaf()) {
  153. // lazy check for hits full up condition
  154. if (_cull_hits_full(r_params)) {
  155. return false;
  156. }
  157. TLeaf &leaf = _node_get_leaf(tnode);
  158. // test children individually
  159. for (int n = 0; n < leaf.num_items; n++) {
  160. const BVHABB_CLASS &aabb = leaf.get_aabb(n);
  161. if (aabb.intersects_segment(r_params.segment)) {
  162. uint32_t child_id = leaf.get_item_ref_id(n);
  163. // register hit
  164. _cull_hit(child_id, r_params);
  165. }
  166. }
  167. } else {
  168. // test children individually
  169. for (int n = 0; n < tnode.num_children; n++) {
  170. uint32_t child_id = tnode.children[n];
  171. const BVHABB_CLASS &child_abb = _nodes[child_id].aabb;
  172. if (child_abb.intersects_segment(r_params.segment)) {
  173. // add to the stack
  174. CullSegParams *child = ii.request();
  175. child->node_id = child_id;
  176. }
  177. }
  178. }
  179. } // while more nodes to pop
  180. // true indicates results are not full
  181. return true;
  182. }
  183. bool _cull_point_iterative(uint32_t p_node_id, CullParams &r_params) {
  184. // our function parameters to keep on a stack
  185. struct CullPointParams {
  186. uint32_t node_id;
  187. };
  188. // most of the iterative functionality is contained in this helper class
  189. BVH_IterativeInfo<CullPointParams> ii;
  190. // alloca must allocate the stack from this function, it cannot be allocated in the
  191. // helper class
  192. ii.stack = (CullPointParams *)alloca(ii.get_alloca_stacksize());
  193. // seed the stack
  194. ii.get_first()->node_id = p_node_id;
  195. CullPointParams cpp;
  196. // while there are still more nodes on the stack
  197. while (ii.pop(cpp)) {
  198. TNode &tnode = _nodes[cpp.node_id];
  199. // no hit with this node?
  200. if (!tnode.aabb.intersects_point(r_params.point)) {
  201. continue;
  202. }
  203. if (tnode.is_leaf()) {
  204. // lazy check for hits full up condition
  205. if (_cull_hits_full(r_params)) {
  206. return false;
  207. }
  208. TLeaf &leaf = _node_get_leaf(tnode);
  209. // test children individually
  210. for (int n = 0; n < leaf.num_items; n++) {
  211. if (leaf.get_aabb(n).intersects_point(r_params.point)) {
  212. uint32_t child_id = leaf.get_item_ref_id(n);
  213. // register hit
  214. _cull_hit(child_id, r_params);
  215. }
  216. }
  217. } else {
  218. // test children individually
  219. for (int n = 0; n < tnode.num_children; n++) {
  220. uint32_t child_id = tnode.children[n];
  221. // add to the stack
  222. CullPointParams *child = ii.request();
  223. child->node_id = child_id;
  224. }
  225. }
  226. } // while more nodes to pop
  227. // true indicates results are not full
  228. return true;
  229. }
  230. bool _cull_aabb_iterative(uint32_t p_node_id, CullParams &r_params, bool p_fully_within = false) {
  231. // our function parameters to keep on a stack
  232. struct CullAABBParams {
  233. uint32_t node_id;
  234. bool fully_within;
  235. };
  236. // most of the iterative functionality is contained in this helper class
  237. BVH_IterativeInfo<CullAABBParams> ii;
  238. // alloca must allocate the stack from this function, it cannot be allocated in the
  239. // helper class
  240. ii.stack = (CullAABBParams *)alloca(ii.get_alloca_stacksize());
  241. // seed the stack
  242. ii.get_first()->node_id = p_node_id;
  243. ii.get_first()->fully_within = p_fully_within;
  244. CullAABBParams cap;
  245. // while there are still more nodes on the stack
  246. while (ii.pop(cap)) {
  247. TNode &tnode = _nodes[cap.node_id];
  248. if (tnode.is_leaf()) {
  249. // lazy check for hits full up condition
  250. if (_cull_hits_full(r_params)) {
  251. return false;
  252. }
  253. TLeaf &leaf = _node_get_leaf(tnode);
  254. // if fully within we can just add all items
  255. // as long as they pass mask checks
  256. if (cap.fully_within) {
  257. for (int n = 0; n < leaf.num_items; n++) {
  258. uint32_t child_id = leaf.get_item_ref_id(n);
  259. // register hit
  260. _cull_hit(child_id, r_params);
  261. }
  262. } else {
  263. for (int n = 0; n < leaf.num_items; n++) {
  264. const BVHABB_CLASS &aabb = leaf.get_aabb(n);
  265. if (aabb.intersects(r_params.abb)) {
  266. uint32_t child_id = leaf.get_item_ref_id(n);
  267. // register hit
  268. _cull_hit(child_id, r_params);
  269. }
  270. }
  271. } // not fully within
  272. } else {
  273. if (!cap.fully_within) {
  274. // test children individually
  275. for (int n = 0; n < tnode.num_children; n++) {
  276. uint32_t child_id = tnode.children[n];
  277. const BVHABB_CLASS &child_abb = _nodes[child_id].aabb;
  278. if (child_abb.intersects(r_params.abb)) {
  279. // is the node totally within the aabb?
  280. bool fully_within = r_params.abb.is_other_within(child_abb);
  281. // add to the stack
  282. CullAABBParams *child = ii.request();
  283. // should always return valid child
  284. child->node_id = child_id;
  285. child->fully_within = fully_within;
  286. }
  287. }
  288. } else {
  289. for (int n = 0; n < tnode.num_children; n++) {
  290. uint32_t child_id = tnode.children[n];
  291. // add to the stack
  292. CullAABBParams *child = ii.request();
  293. // should always return valid child
  294. child->node_id = child_id;
  295. child->fully_within = true;
  296. }
  297. }
  298. }
  299. } // while more nodes to pop
  300. // true indicates results are not full
  301. return true;
  302. }
  303. // returns full up with results
  304. bool _cull_convex_iterative(uint32_t p_node_id, CullParams &r_params, bool p_fully_within = false) {
  305. // our function parameters to keep on a stack
  306. struct CullConvexParams {
  307. uint32_t node_id;
  308. bool fully_within;
  309. };
  310. // most of the iterative functionality is contained in this helper class
  311. BVH_IterativeInfo<CullConvexParams> ii;
  312. // alloca must allocate the stack from this function, it cannot be allocated in the
  313. // helper class
  314. ii.stack = (CullConvexParams *)alloca(ii.get_alloca_stacksize());
  315. // seed the stack
  316. ii.get_first()->node_id = p_node_id;
  317. ii.get_first()->fully_within = p_fully_within;
  318. // preallocate these as a once off to be reused
  319. uint32_t max_planes = r_params.hull.num_planes;
  320. uint32_t *plane_ids = (uint32_t *)alloca(sizeof(uint32_t) * max_planes);
  321. CullConvexParams ccp;
  322. // while there are still more nodes on the stack
  323. while (ii.pop(ccp)) {
  324. const TNode &tnode = _nodes[ccp.node_id];
  325. if (!ccp.fully_within) {
  326. typename BVHABB_CLASS::IntersectResult res = tnode.aabb.intersects_convex(r_params.hull);
  327. switch (res) {
  328. default: {
  329. continue; // miss, just move on to the next node in the stack
  330. } break;
  331. case BVHABB_CLASS::IR_PARTIAL: {
  332. } break;
  333. case BVHABB_CLASS::IR_FULL: {
  334. ccp.fully_within = true;
  335. } break;
  336. }
  337. } // if not fully within already
  338. if (tnode.is_leaf()) {
  339. // lazy check for hits full up condition
  340. if (_cull_hits_full(r_params)) {
  341. return false;
  342. }
  343. const TLeaf &leaf = _node_get_leaf(tnode);
  344. // if fully within, simply add all items to the result
  345. // (taking into account masks)
  346. if (ccp.fully_within) {
  347. for (int n = 0; n < leaf.num_items; n++) {
  348. uint32_t child_id = leaf.get_item_ref_id(n);
  349. // register hit
  350. _cull_hit(child_id, r_params);
  351. }
  352. } else {
  353. // we can either use a naive check of all the planes against the AABB,
  354. // or an optimized check, which finds in advance which of the planes can possibly
  355. // cut the AABB, and only tests those. This can be much faster.
  356. #define BVH_CONVEX_CULL_OPTIMIZED
  357. #ifdef BVH_CONVEX_CULL_OPTIMIZED
  358. // first find which planes cut the aabb
  359. uint32_t num_planes = tnode.aabb.find_cutting_planes(r_params.hull, plane_ids);
  360. BVH_ASSERT(num_planes <= max_planes);
  361. //#define BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
  362. #ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
  363. // rigorous check
  364. uint32_t results[MAX_ITEMS];
  365. uint32_t num_results = 0;
  366. #endif
  367. // test children individually
  368. for (int n = 0; n < leaf.num_items; n++) {
  369. //const Item &item = leaf.get_item(n);
  370. const BVHABB_CLASS &aabb = leaf.get_aabb(n);
  371. if (aabb.intersects_convex_optimized(r_params.hull, plane_ids, num_planes)) {
  372. uint32_t child_id = leaf.get_item_ref_id(n);
  373. #ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
  374. results[num_results++] = child_id;
  375. #endif
  376. // register hit
  377. _cull_hit(child_id, r_params);
  378. }
  379. }
  380. #ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
  381. uint32_t test_count = 0;
  382. for (int n = 0; n < leaf.num_items; n++) {
  383. const BVHABB_CLASS &aabb = leaf.get_aabb(n);
  384. if (aabb.intersects_convex_partial(r_params.hull)) {
  385. uint32_t child_id = leaf.get_item_ref_id(n);
  386. CRASH_COND(child_id != results[test_count++]);
  387. CRASH_COND(test_count > num_results);
  388. }
  389. }
  390. #endif
  391. #else
  392. // not BVH_CONVEX_CULL_OPTIMIZED
  393. // test children individually
  394. for (int n = 0; n < leaf.num_items; n++) {
  395. const BVHABB_CLASS &aabb = leaf.get_aabb(n);
  396. if (aabb.intersects_convex_partial(r_params.hull)) {
  397. uint32_t child_id = leaf.get_item_ref_id(n);
  398. // full up with results? exit early, no point in further testing
  399. if (!_cull_hit(child_id, r_params))
  400. return false;
  401. }
  402. }
  403. #endif // BVH_CONVEX_CULL_OPTIMIZED
  404. } // if not fully within
  405. } else {
  406. for (int n = 0; n < tnode.num_children; n++) {
  407. uint32_t child_id = tnode.children[n];
  408. // add to the stack
  409. CullConvexParams *child = ii.request();
  410. // should always return valid child
  411. child->node_id = child_id;
  412. child->fully_within = ccp.fully_within;
  413. }
  414. }
  415. } // while more nodes to pop
  416. // true indicates results are not full
  417. return true;
  418. }