bvh_intersector_stream.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. // Copyright 2009-2021 Intel Corporation
  2. // SPDX-License-Identifier: Apache-2.0
  3. #include "bvh_intersector_stream.h"
  4. #include "../geometry/intersector_iterators.h"
  5. #include "../geometry/triangle_intersector.h"
  6. #include "../geometry/trianglev_intersector.h"
  7. #include "../geometry/trianglev_mb_intersector.h"
  8. #include "../geometry/trianglei_intersector.h"
  9. #include "../geometry/quadv_intersector.h"
  10. #include "../geometry/quadi_intersector.h"
  11. #include "../geometry/linei_intersector.h"
  12. #include "../geometry/subdivpatch1_intersector.h"
  13. #include "../geometry/object_intersector.h"
  14. #include "../geometry/instance_intersector.h"
  15. #include "../common/scene.h"
  16. #include <bitset>
  17. namespace embree
  18. {
  19. namespace isa
  20. {
  21. __aligned(64) static const int shiftTable[32] = {
  22. (int)1 << 0, (int)1 << 1, (int)1 << 2, (int)1 << 3, (int)1 << 4, (int)1 << 5, (int)1 << 6, (int)1 << 7,
  23. (int)1 << 8, (int)1 << 9, (int)1 << 10, (int)1 << 11, (int)1 << 12, (int)1 << 13, (int)1 << 14, (int)1 << 15,
  24. (int)1 << 16, (int)1 << 17, (int)1 << 18, (int)1 << 19, (int)1 << 20, (int)1 << 21, (int)1 << 22, (int)1 << 23,
  25. (int)1 << 24, (int)1 << 25, (int)1 << 26, (int)1 << 27, (int)1 << 28, (int)1 << 29, (int)1 << 30, (int)1 << 31
  26. };
  27. template<int N, int types, bool robust, typename PrimitiveIntersector>
  28. __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::intersect(Accel::Intersectors* __restrict__ This,
  29. RayHitN** inputPackets,
  30. size_t numOctantRays,
  31. IntersectContext* context)
  32. {
  33. /* we may traverse an empty BVH in case all geometry was invalid */
  34. BVH* __restrict__ bvh = (BVH*) This->ptr;
  35. if (bvh->root == BVH::emptyNode)
  36. return;
  37. // Only the coherent code path is implemented
  38. assert(context->isCoherent());
  39. intersectCoherent(This, (RayHitK<VSIZEL>**)inputPackets, numOctantRays, context);
  40. }
  41. template<int N, int types, bool robust, typename PrimitiveIntersector>
  42. template<int K>
  43. __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::intersectCoherent(Accel::Intersectors* __restrict__ This,
  44. RayHitK<K>** inputPackets,
  45. size_t numOctantRays,
  46. IntersectContext* context)
  47. {
  48. assert(context->isCoherent());
  49. BVH* __restrict__ bvh = (BVH*) This->ptr;
  50. __aligned(64) StackItemMaskCoherent stack[stackSizeSingle]; // stack of nodes
  51. assert(numOctantRays <= MAX_INTERNAL_STREAM_SIZE);
  52. __aligned(64) TravRayKStream<K, robust> packets[MAX_INTERNAL_STREAM_SIZE/K];
  53. __aligned(64) Frustum<robust> frustum;
  54. bool commonOctant = true;
  55. const size_t m_active = initPacketsAndFrustum((RayK<K>**)inputPackets, numOctantRays, packets, frustum, commonOctant);
  56. if (unlikely(m_active == 0)) return;
  57. /* case of non-common origin */
  58. if (unlikely(!commonOctant))
  59. {
  60. const size_t numPackets = (numOctantRays+K-1)/K;
  61. for (size_t i = 0; i < numPackets; i++)
  62. This->intersect(inputPackets[i]->tnear() <= inputPackets[i]->tfar, *inputPackets[i], context);
  63. return;
  64. }
  65. stack[0].mask = m_active;
  66. stack[0].parent = 0;
  67. stack[0].child = bvh->root;
  68. ///////////////////////////////////////////////////////////////////////////////////
  69. ///////////////////////////////////////////////////////////////////////////////////
  70. ///////////////////////////////////////////////////////////////////////////////////
  71. StackItemMaskCoherent* stackPtr = stack + 1;
  72. while (1) pop:
  73. {
  74. if (unlikely(stackPtr == stack)) break;
  75. STAT3(normal.trav_stack_pop,1,1,1);
  76. stackPtr--;
  77. /*! pop next node */
  78. NodeRef cur = NodeRef(stackPtr->child);
  79. size_t m_trav_active = stackPtr->mask;
  80. assert(m_trav_active);
  81. NodeRef parent = stackPtr->parent;
  82. while (1)
  83. {
  84. if (unlikely(cur.isLeaf())) break;
  85. const AABBNode* __restrict__ const node = cur.getAABBNode();
  86. parent = cur;
  87. __aligned(64) size_t maskK[N];
  88. for (size_t i = 0; i < N; i++)
  89. maskK[i] = m_trav_active;
  90. vfloat<N> dist;
  91. const size_t m_node_hit = traverseCoherentStream(m_trav_active, packets, node, frustum, maskK, dist);
  92. if (unlikely(m_node_hit == 0)) goto pop;
  93. BVHNNodeTraverserStreamHitCoherent<N, types>::traverseClosestHit(cur, m_trav_active, vbool<N>((int)m_node_hit), dist, (size_t*)maskK, stackPtr);
  94. assert(m_trav_active);
  95. }
  96. /* non-root and leaf => full culling test for all rays */
  97. if (unlikely(parent != 0 && cur.isLeaf()))
  98. {
  99. const AABBNode* __restrict__ const node = parent.getAABBNode();
  100. size_t boxID = 0xff;
  101. for (size_t i = 0; i < N; i++)
  102. if (node->child(i) == cur) { boxID = i; break; }
  103. assert(boxID < N);
  104. assert(cur == node->child(boxID));
  105. m_trav_active = intersectAABBNodePacket(m_trav_active, packets, node, boxID, frustum.nf);
  106. }
  107. /*! this is a leaf node */
  108. assert(cur != BVH::emptyNode);
  109. STAT3(normal.trav_leaves, 1, 1, 1);
  110. size_t num; PrimitiveK<K>* prim = (PrimitiveK<K>*)cur.leaf(num);
  111. size_t bits = m_trav_active;
  112. /*! intersect stream of rays with all primitives */
  113. size_t lazy_node = 0;
  114. #if defined(__SSE4_2__)
  115. STAT_USER(1,(popcnt(bits)+K-1)/K*4);
  116. #endif
  117. while(bits)
  118. {
  119. size_t i = bsf(bits) / K;
  120. const size_t m_isec = ((((size_t)1 << K)-1) << (i*K));
  121. assert(m_isec & bits);
  122. bits &= ~m_isec;
  123. TravRayKStream<K, robust>& p = packets[i];
  124. vbool<K> m_valid = p.tnear <= p.tfar;
  125. PrimitiveIntersectorK<K>::intersectK(m_valid, This, *inputPackets[i], context, prim, num, lazy_node);
  126. p.tfar = min(p.tfar, inputPackets[i]->tfar);
  127. };
  128. } // traversal + intersection
  129. }
  130. template<int N, int types, bool robust, typename PrimitiveIntersector>
  131. __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::occluded(Accel::Intersectors* __restrict__ This,
  132. RayN** inputPackets,
  133. size_t numOctantRays,
  134. IntersectContext* context)
  135. {
  136. /* we may traverse an empty BVH in case all geometry was invalid */
  137. BVH* __restrict__ bvh = (BVH*) This->ptr;
  138. if (bvh->root == BVH::emptyNode)
  139. return;
  140. if (unlikely(context->isCoherent()))
  141. occludedCoherent(This, (RayK<VSIZEL>**)inputPackets, numOctantRays, context);
  142. else
  143. occludedIncoherent(This, (RayK<VSIZEX>**)inputPackets, numOctantRays, context);
  144. }
  145. template<int N, int types, bool robust, typename PrimitiveIntersector>
  146. template<int K>
  147. __noinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::occludedCoherent(Accel::Intersectors* __restrict__ This,
  148. RayK<K>** inputPackets,
  149. size_t numOctantRays,
  150. IntersectContext* context)
  151. {
  152. assert(context->isCoherent());
  153. BVH* __restrict__ bvh = (BVH*)This->ptr;
  154. __aligned(64) StackItemMaskCoherent stack[stackSizeSingle]; // stack of nodes
  155. assert(numOctantRays <= MAX_INTERNAL_STREAM_SIZE);
  156. /* inactive rays should have been filtered out before */
  157. __aligned(64) TravRayKStream<K, robust> packets[MAX_INTERNAL_STREAM_SIZE/K];
  158. __aligned(64) Frustum<robust> frustum;
  159. bool commonOctant = true;
  160. size_t m_active = initPacketsAndFrustum(inputPackets, numOctantRays, packets, frustum, commonOctant);
  161. /* valid rays */
  162. if (unlikely(m_active == 0)) return;
  163. /* case of non-common origin */
  164. if (unlikely(!commonOctant))
  165. {
  166. const size_t numPackets = (numOctantRays+K-1)/K;
  167. for (size_t i = 0; i < numPackets; i++)
  168. This->occluded(inputPackets[i]->tnear() <= inputPackets[i]->tfar, *inputPackets[i], context);
  169. return;
  170. }
  171. stack[0].mask = m_active;
  172. stack[0].parent = 0;
  173. stack[0].child = bvh->root;
  174. ///////////////////////////////////////////////////////////////////////////////////
  175. ///////////////////////////////////////////////////////////////////////////////////
  176. ///////////////////////////////////////////////////////////////////////////////////
  177. StackItemMaskCoherent* stackPtr = stack + 1;
  178. while (1) pop:
  179. {
  180. if (unlikely(stackPtr == stack)) break;
  181. STAT3(normal.trav_stack_pop,1,1,1);
  182. stackPtr--;
  183. /*! pop next node */
  184. NodeRef cur = NodeRef(stackPtr->child);
  185. size_t m_trav_active = stackPtr->mask & m_active;
  186. if (unlikely(!m_trav_active)) continue;
  187. assert(m_trav_active);
  188. NodeRef parent = stackPtr->parent;
  189. while (1)
  190. {
  191. if (unlikely(cur.isLeaf())) break;
  192. const AABBNode* __restrict__ const node = cur.getAABBNode();
  193. parent = cur;
  194. __aligned(64) size_t maskK[N];
  195. for (size_t i = 0; i < N; i++)
  196. maskK[i] = m_trav_active;
  197. vfloat<N> dist;
  198. const size_t m_node_hit = traverseCoherentStream(m_trav_active, packets, node, frustum, maskK, dist);
  199. if (unlikely(m_node_hit == 0)) goto pop;
  200. BVHNNodeTraverserStreamHitCoherent<N, types>::traverseAnyHit(cur, m_trav_active, vbool<N>((int)m_node_hit), (size_t*)maskK, stackPtr);
  201. assert(m_trav_active);
  202. }
  203. /* non-root and leaf => full culling test for all rays */
  204. if (unlikely(parent != 0 && cur.isLeaf()))
  205. {
  206. const AABBNode* __restrict__ const node = parent.getAABBNode();
  207. size_t boxID = 0xff;
  208. for (size_t i = 0; i < N; i++)
  209. if (node->child(i) == cur) { boxID = i; break; }
  210. assert(boxID < N);
  211. assert(cur == node->child(boxID));
  212. m_trav_active = intersectAABBNodePacket(m_trav_active, packets, node, boxID, frustum.nf);
  213. }
  214. /*! this is a leaf node */
  215. assert(cur != BVH::emptyNode);
  216. STAT3(normal.trav_leaves, 1, 1, 1);
  217. size_t num; PrimitiveK<K>* prim = (PrimitiveK<K>*)cur.leaf(num);
  218. size_t bits = m_trav_active & m_active;
  219. /*! intersect stream of rays with all primitives */
  220. size_t lazy_node = 0;
  221. #if defined(__SSE4_2__)
  222. STAT_USER(1,(popcnt(bits)+K-1)/K*4);
  223. #endif
  224. while (bits)
  225. {
  226. size_t i = bsf(bits) / K;
  227. const size_t m_isec = ((((size_t)1 << K)-1) << (i*K));
  228. assert(m_isec & bits);
  229. bits &= ~m_isec;
  230. TravRayKStream<K, robust>& p = packets[i];
  231. vbool<K> m_valid = p.tnear <= p.tfar;
  232. vbool<K> m_hit = PrimitiveIntersectorK<K>::occludedK(m_valid, This, *inputPackets[i], context, prim, num, lazy_node);
  233. inputPackets[i]->tfar = select(m_hit & m_valid, vfloat<K>(neg_inf), inputPackets[i]->tfar);
  234. m_active &= ~((size_t)movemask(m_hit) << (i*K));
  235. }
  236. } // traversal + intersection
  237. }
  238. template<int N, int types, bool robust, typename PrimitiveIntersector>
  239. template<int K>
  240. __forceinline void BVHNIntersectorStream<N, types, robust, PrimitiveIntersector>::occludedIncoherent(Accel::Intersectors* __restrict__ This,
  241. RayK<K>** inputPackets,
  242. size_t numOctantRays,
  243. IntersectContext* context)
  244. {
  245. assert(!context->isCoherent());
  246. assert(types & BVH_FLAG_ALIGNED_NODE);
  247. __aligned(64) TravRayKStream<K,robust> packet[MAX_INTERNAL_STREAM_SIZE/K];
  248. assert(numOctantRays <= 32);
  249. const size_t numPackets = (numOctantRays+K-1)/K;
  250. size_t m_active = 0;
  251. for (size_t i = 0; i < numPackets; i++)
  252. {
  253. const vfloat<K> tnear = inputPackets[i]->tnear();
  254. const vfloat<K> tfar = inputPackets[i]->tfar;
  255. vbool<K> m_valid = (tnear <= tfar) & (tnear >= 0.0f);
  256. m_active |= (size_t)movemask(m_valid) << (K*i);
  257. const Vec3vf<K>& org = inputPackets[i]->org;
  258. const Vec3vf<K>& dir = inputPackets[i]->dir;
  259. vfloat<K> packet_min_dist = max(tnear, 0.0f);
  260. vfloat<K> packet_max_dist = select(m_valid, tfar, neg_inf);
  261. new (&packet[i]) TravRayKStream<K,robust>(org, dir, packet_min_dist, packet_max_dist);
  262. }
  263. BVH* __restrict__ bvh = (BVH*)This->ptr;
  264. StackItemMaskT<NodeRef> stack[stackSizeSingle]; // stack of nodes
  265. StackItemMaskT<NodeRef>* stackPtr = stack + 1; // current stack pointer
  266. stack[0].ptr = bvh->root;
  267. stack[0].mask = m_active;
  268. size_t terminated = ~m_active;
  269. /* near/far offsets based on first ray */
  270. const NearFarPrecalculations nf(Vec3fa(packet[0].rdir.x[0], packet[0].rdir.y[0], packet[0].rdir.z[0]), N);
  271. while (1) pop:
  272. {
  273. if (unlikely(stackPtr == stack)) break;
  274. STAT3(shadow.trav_stack_pop,1,1,1);
  275. stackPtr--;
  276. NodeRef cur = NodeRef(stackPtr->ptr);
  277. size_t cur_mask = stackPtr->mask & (~terminated);
  278. if (unlikely(cur_mask == 0)) continue;
  279. while (true)
  280. {
  281. /*! stop if we found a leaf node */
  282. if (unlikely(cur.isLeaf())) break;
  283. const AABBNode* __restrict__ const node = cur.getAABBNode();
  284. const vint<N> vmask = traverseIncoherentStream(cur_mask, packet, node, nf, shiftTable);
  285. size_t mask = movemask(vmask != vint<N>(zero));
  286. if (unlikely(mask == 0)) goto pop;
  287. __aligned(64) unsigned int child_mask[N];
  288. vint<N>::storeu(child_mask, vmask); // this explicit store here causes much better code generation
  289. /*! one child is hit, continue with that child */
  290. size_t r = bscf(mask);
  291. assert(r < N);
  292. cur = node->child(r);
  293. BVHN<N>::prefetch(cur,types);
  294. cur_mask = child_mask[r];
  295. /* simple in order sequence */
  296. assert(cur != BVH::emptyNode);
  297. if (likely(mask == 0)) continue;
  298. stackPtr->ptr = cur;
  299. stackPtr->mask = cur_mask;
  300. stackPtr++;
  301. for (; ;)
  302. {
  303. r = bscf(mask);
  304. assert(r < N);
  305. cur = node->child(r);
  306. BVHN<N>::prefetch(cur,types);
  307. cur_mask = child_mask[r];
  308. assert(cur != BVH::emptyNode);
  309. if (likely(mask == 0)) break;
  310. stackPtr->ptr = cur;
  311. stackPtr->mask = cur_mask;
  312. stackPtr++;
  313. }
  314. }
  315. /*! this is a leaf node */
  316. assert(cur != BVH::emptyNode);
  317. STAT3(shadow.trav_leaves,1,1,1);
  318. size_t num; PrimitiveK<K>* prim = (PrimitiveK<K>*)cur.leaf(num);
  319. size_t bits = cur_mask;
  320. size_t lazy_node = 0;
  321. for (; bits != 0;)
  322. {
  323. const size_t rayID = bscf(bits);
  324. RayK<K> &ray = *inputPackets[rayID / K];
  325. const size_t k = rayID % K;
  326. if (PrimitiveIntersectorK<K>::occluded(This, ray, k, context, prim, num, lazy_node))
  327. {
  328. ray.tfar[k] = neg_inf;
  329. terminated |= (size_t)1 << rayID;
  330. }
  331. /* lazy node */
  332. if (unlikely(lazy_node))
  333. {
  334. stackPtr->ptr = lazy_node;
  335. stackPtr->mask = cur_mask;
  336. stackPtr++;
  337. }
  338. }
  339. if (unlikely(terminated == (size_t)-1)) break;
  340. }
  341. }
  342. ////////////////////////////////////////////////////////////////////////////////
  343. /// ArrayIntersectorKStream Definitions
  344. ////////////////////////////////////////////////////////////////////////////////
  345. template<bool filter>
  346. struct Triangle4IntersectorStreamMoeller {
  347. template<int K> using Type = ArrayIntersectorKStream<K,TriangleMIntersectorKMoeller<4 COMMA K COMMA true>>;
  348. };
  349. template<bool filter>
  350. struct Triangle4vIntersectorStreamPluecker {
  351. template<int K> using Type = ArrayIntersectorKStream<K,TriangleMvIntersectorKPluecker<4 COMMA K COMMA true>>;
  352. };
  353. template<bool filter>
  354. struct Triangle4iIntersectorStreamMoeller {
  355. template<int K> using Type = ArrayIntersectorKStream<K,TriangleMiIntersectorKMoeller<4 COMMA K COMMA true>>;
  356. };
  357. template<bool filter>
  358. struct Triangle4iIntersectorStreamPluecker {
  359. template<int K> using Type = ArrayIntersectorKStream<K,TriangleMiIntersectorKPluecker<4 COMMA K COMMA true>>;
  360. };
  361. template<bool filter>
  362. struct Quad4vIntersectorStreamMoeller {
  363. template<int K> using Type = ArrayIntersectorKStream<K,QuadMvIntersectorKMoeller<4 COMMA K COMMA true>>;
  364. };
  365. template<bool filter>
  366. struct Quad4iIntersectorStreamMoeller {
  367. template<int K> using Type = ArrayIntersectorKStream<K,QuadMiIntersectorKMoeller<4 COMMA K COMMA true>>;
  368. };
  369. template<bool filter>
  370. struct Quad4vIntersectorStreamPluecker {
  371. template<int K> using Type = ArrayIntersectorKStream<K,QuadMvIntersectorKPluecker<4 COMMA K COMMA true>>;
  372. };
  373. template<bool filter>
  374. struct Quad4iIntersectorStreamPluecker {
  375. template<int K> using Type = ArrayIntersectorKStream<K,QuadMiIntersectorKPluecker<4 COMMA K COMMA true>>;
  376. };
  377. struct ObjectIntersectorStream {
  378. template<int K> using Type = ArrayIntersectorKStream<K,ObjectIntersectorK<K COMMA false>>;
  379. };
  380. struct InstanceIntersectorStream {
  381. template<int K> using Type = ArrayIntersectorKStream<K,InstanceIntersectorK<K>>;
  382. };
  383. // =====================================================================================================
  384. // =====================================================================================================
  385. // =====================================================================================================
  386. template<int N>
  387. void BVHNIntersectorStreamPacketFallback<N>::intersect(Accel::Intersectors* __restrict__ This,
  388. RayHitN** inputRays,
  389. size_t numTotalRays,
  390. IntersectContext* context)
  391. {
  392. if (unlikely(context->isCoherent()))
  393. intersectK(This, (RayHitK<VSIZEL>**)inputRays, numTotalRays, context);
  394. else
  395. intersectK(This, (RayHitK<VSIZEX>**)inputRays, numTotalRays, context);
  396. }
  397. template<int N>
  398. void BVHNIntersectorStreamPacketFallback<N>::occluded(Accel::Intersectors* __restrict__ This,
  399. RayN** inputRays,
  400. size_t numTotalRays,
  401. IntersectContext* context)
  402. {
  403. if (unlikely(context->isCoherent()))
  404. occludedK(This, (RayK<VSIZEL>**)inputRays, numTotalRays, context);
  405. else
  406. occludedK(This, (RayK<VSIZEX>**)inputRays, numTotalRays, context);
  407. }
  408. template<int N>
  409. template<int K>
  410. __noinline void BVHNIntersectorStreamPacketFallback<N>::intersectK(Accel::Intersectors* __restrict__ This,
  411. RayHitK<K>** inputRays,
  412. size_t numTotalRays,
  413. IntersectContext* context)
  414. {
  415. /* fallback to packets */
  416. for (size_t i = 0; i < numTotalRays; i += K)
  417. {
  418. const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
  419. vbool<K> valid = vi < vint<K>(int(numTotalRays));
  420. RayHitK<K>& ray = *(inputRays[i / K]);
  421. valid &= ray.tnear() <= ray.tfar;
  422. This->intersect(valid, ray, context);
  423. }
  424. }
  425. template<int N>
  426. template<int K>
  427. __noinline void BVHNIntersectorStreamPacketFallback<N>::occludedK(Accel::Intersectors* __restrict__ This,
  428. RayK<K>** inputRays,
  429. size_t numTotalRays,
  430. IntersectContext* context)
  431. {
  432. /* fallback to packets */
  433. for (size_t i = 0; i < numTotalRays; i += K)
  434. {
  435. const vint<K> vi = vint<K>(int(i)) + vint<K>(step);
  436. vbool<K> valid = vi < vint<K>(int(numTotalRays));
  437. RayK<K>& ray = *(inputRays[i / K]);
  438. valid &= ray.tnear() <= ray.tfar;
  439. This->occluded(valid, ray, context);
  440. }
  441. }
  442. }
  443. }