subdivpatch1cached_intersector.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. // ======================================================================== //
  2. // Copyright 2009-2017 Intel Corporation //
  3. // //
  4. // Licensed under the Apache License, Version 2.0 (the "License"); //
  5. // you may not use this file except in compliance with the License. //
  6. // You may obtain a copy of the License at //
  7. // //
  8. // http://www.apache.org/licenses/LICENSE-2.0 //
  9. // //
  10. // Unless required by applicable law or agreed to in writing, software //
  11. // distributed under the License is distributed on an "AS IS" BASIS, //
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
  13. // See the License for the specific language governing permissions and //
  14. // limitations under the License. //
  15. // ======================================================================== //
  16. #pragma once
  17. #include "subdivpatch1cached.h"
  18. #include "grid_soa_intersector1.h"
  19. #include "grid_soa_intersector.h"
  20. #include "../common/ray.h"
  21. namespace embree
  22. {
  23. namespace isa
  24. {
  25. template<typename T, bool cached>
  26. class SubdivPatch1CachedPrecalculations : public T
  27. {
  28. public:
  29. __forceinline SubdivPatch1CachedPrecalculations (const Ray& ray, const void* ptr, unsigned numTimeSteps)
  30. : T(ray,ptr,numTimeSteps) {}
  31. __forceinline ~SubdivPatch1CachedPrecalculations() {
  32. if (cached && this->grid) SharedLazyTessellationCache::sharedLazyTessellationCache.unlock();
  33. }
  34. };
  35. template<int K, typename T, bool cached>
  36. class SubdivPatch1CachedPrecalculationsK : public T
  37. {
  38. public:
  39. __forceinline SubdivPatch1CachedPrecalculationsK (const vbool<K>& valid, RayK<K>& ray, unsigned numTimeSteps)
  40. : T(valid,ray,numTimeSteps) {}
  41. __forceinline ~SubdivPatch1CachedPrecalculationsK() {
  42. if (cached && this->grid) SharedLazyTessellationCache::sharedLazyTessellationCache.unlock();
  43. }
  44. };
  45. template<bool cached>
  46. class SubdivPatch1CachedIntersector1
  47. {
  48. public:
  49. typedef SubdivPatch1Cached Primitive;
  50. typedef SubdivPatch1CachedPrecalculations<GridSOAIntersector1::Precalculations,cached> Precalculations;
  51. static __forceinline bool processLazyNode(Precalculations& pre, IntersectContext* context, const Primitive* prim_i, size_t& lazy_node)
  52. {
  53. Primitive* prim = (Primitive*) prim_i;
  54. GridSOA* grid = nullptr;
  55. if (cached)
  56. {
  57. Scene* scene = context->scene;
  58. if (pre.grid) SharedLazyTessellationCache::sharedLazyTessellationCache.unlock();
  59. grid = (GridSOA*) SharedLazyTessellationCache::lookup(prim->entry(),scene->commitCounterSubdiv,[&] () {
  60. auto alloc = [] (const size_t bytes) { return SharedLazyTessellationCache::sharedLazyTessellationCache.malloc(bytes); };
  61. return GridSOA::create((SubdivPatch1Base*)prim,1,1,scene,alloc);
  62. });
  63. }
  64. else {
  65. grid = (GridSOA*) prim->root_ref.get();
  66. }
  67. lazy_node = grid->root(0);
  68. pre.grid = grid;
  69. return false;
  70. }
  71. /*! Intersect a ray with the primitive. */
  72. static __forceinline void intersect(Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  73. {
  74. if (likely(ty == 0)) GridSOAIntersector1::intersect(pre,ray,context,prim,ty,lazy_node);
  75. else processLazyNode(pre,context,prim,lazy_node);
  76. }
  77. static __forceinline void intersect(Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, size_t& lazy_node) {
  78. intersect(pre,ray,context,prim,ty,lazy_node);
  79. }
  80. /*! Test if the ray is occluded by the primitive */
  81. static __forceinline bool occluded(Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  82. {
  83. if (likely(ty == 0)) return GridSOAIntersector1::occluded(pre,ray,context,prim,ty,lazy_node);
  84. else return processLazyNode(pre,context,prim,lazy_node);
  85. }
  86. static __forceinline bool occluded(Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, size_t& lazy_node) {
  87. return occluded(pre,ray,context,prim,ty,lazy_node);
  88. }
  89. };
  90. template<bool cached>
  91. class SubdivPatch1MBlurCachedIntersector1
  92. {
  93. public:
  94. typedef SubdivPatch1Cached Primitive;
  95. typedef SubdivPatch1CachedPrecalculations<GridSOAMBlurIntersector1::Precalculations,cached> Precalculations;
  96. static __forceinline bool processLazyNode(Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim_i, size_t& lazy_node)
  97. {
  98. Primitive* prim = (Primitive*) prim_i;
  99. GridSOA* grid = nullptr;
  100. if (cached)
  101. {
  102. Scene* scene = context->scene;
  103. if (pre.grid) SharedLazyTessellationCache::sharedLazyTessellationCache.unlock();
  104. grid = (GridSOA*) SharedLazyTessellationCache::lookup(prim->entry(),scene->commitCounterSubdiv,[&] () {
  105. auto alloc = [] (const size_t bytes) { return SharedLazyTessellationCache::sharedLazyTessellationCache.malloc(bytes); };
  106. return GridSOA::create((SubdivPatch1Base*)prim,(unsigned)scene->get<SubdivMesh>(prim->geom)->numTimeSteps,pre.numTimeSteps(),scene,alloc);
  107. });
  108. }
  109. else {
  110. grid = (GridSOA*) prim->root_ref.get();
  111. }
  112. lazy_node = grid->root(pre.itime());
  113. pre.grid = grid;
  114. return false;
  115. }
  116. /*! Intersect a ray with the primitive. */
  117. static __forceinline void intersect(Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  118. {
  119. if (likely(ty == 0)) GridSOAMBlurIntersector1::intersect(pre,ray,context,prim,ty,lazy_node);
  120. else processLazyNode(pre,ray,context,prim,lazy_node);
  121. }
  122. static __forceinline void intersect(Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, size_t& lazy_node) {
  123. intersect(pre,ray,context,prim,ty,lazy_node);
  124. }
  125. /*! Test if the ray is occluded by the primitive */
  126. static __forceinline bool occluded(Precalculations& pre, Ray& ray, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  127. {
  128. if (likely(ty == 0)) return GridSOAMBlurIntersector1::occluded(pre,ray,context,prim,ty,lazy_node);
  129. else return processLazyNode(pre,ray,context,prim,lazy_node);
  130. }
  131. static __forceinline bool occluded(Precalculations& pre, Ray& ray, IntersectContext* context, size_t ty0, const Primitive* prim, size_t ty, size_t& lazy_node) {
  132. return occluded(pre,ray,context,prim,ty,lazy_node);
  133. }
  134. };
  135. template <int K, bool cached>
  136. struct SubdivPatch1CachedIntersectorK
  137. {
  138. typedef SubdivPatch1Cached Primitive;
  139. typedef SubdivPatch1CachedPrecalculationsK<K,typename GridSOAIntersectorK<K>::Precalculations,cached> Precalculations;
  140. static __forceinline bool processLazyNode(Precalculations& pre, IntersectContext* context, const Primitive* prim_i, size_t& lazy_node)
  141. {
  142. Primitive* prim = (Primitive*) prim_i;
  143. GridSOA* grid = nullptr;
  144. if (cached)
  145. {
  146. Scene* scene = context->scene;
  147. if (pre.grid) SharedLazyTessellationCache::sharedLazyTessellationCache.unlock();
  148. grid = (GridSOA*) SharedLazyTessellationCache::lookup(prim->entry(),scene->commitCounterSubdiv,[&] () {
  149. auto alloc = [] (const size_t bytes) { return SharedLazyTessellationCache::sharedLazyTessellationCache.malloc(bytes); };
  150. return GridSOA::create((SubdivPatch1Base*)prim,1,1,scene,alloc);
  151. });
  152. }
  153. else {
  154. grid = (GridSOA*) prim->root_ref.get();
  155. }
  156. lazy_node = grid->root(0);
  157. pre.grid = grid;
  158. return false;
  159. }
  160. static __forceinline void intersect(const vbool<K>& valid, Precalculations& pre, RayK<K>& ray, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  161. {
  162. if (likely(ty == 0)) GridSOAIntersectorK<K>::intersect(valid,pre,ray,context,prim,ty,lazy_node);
  163. else processLazyNode(pre,context,prim,lazy_node);
  164. }
  165. static __forceinline vbool<K> occluded(const vbool<K>& valid, Precalculations& pre, RayK<K>& ray, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  166. {
  167. if (likely(ty == 0)) return GridSOAIntersectorK<K>::occluded(valid,pre,ray,context,prim,ty,lazy_node);
  168. else return processLazyNode(pre,context,prim,lazy_node);
  169. }
  170. static __forceinline void intersect(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  171. {
  172. if (likely(ty == 0)) GridSOAIntersectorK<K>::intersect(pre,ray,k,context,prim,ty,lazy_node);
  173. else processLazyNode(pre,context,prim,lazy_node);
  174. }
  175. static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  176. {
  177. if (likely(ty == 0)) return GridSOAIntersectorK<K>::occluded(pre,ray,k,context,prim,ty,lazy_node);
  178. else return processLazyNode(pre,context,prim,lazy_node);
  179. }
  180. };
  181. typedef SubdivPatch1CachedIntersectorK<4,false> SubdivPatch1Intersector4;
  182. typedef SubdivPatch1CachedIntersectorK<8,false> SubdivPatch1Intersector8;
  183. typedef SubdivPatch1CachedIntersectorK<16,false> SubdivPatch1Intersector16;
  184. typedef SubdivPatch1CachedIntersectorK<4,true> SubdivPatch1CachedIntersector4;
  185. typedef SubdivPatch1CachedIntersectorK<8,true> SubdivPatch1CachedIntersector8;
  186. typedef SubdivPatch1CachedIntersectorK<16,true> SubdivPatch1CachedIntersector16;
  187. template <int K, bool cached>
  188. struct SubdivPatch1MBlurCachedIntersectorK
  189. {
  190. typedef SubdivPatch1Cached Primitive;
  191. typedef SubdivPatch1CachedPrecalculationsK<K,typename GridSOAMBlurIntersectorK<K>::Precalculations,cached> Precalculations;
  192. static __forceinline bool processLazyNode(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim_i, size_t& lazy_node)
  193. {
  194. Primitive* prim = (Primitive*) prim_i;
  195. GridSOA* grid = nullptr;
  196. if (cached)
  197. {
  198. Scene* scene = context->scene;
  199. if (pre.grid) SharedLazyTessellationCache::sharedLazyTessellationCache.unlock();
  200. grid = (GridSOA*) SharedLazyTessellationCache::lookup(prim->entry(),scene->commitCounterSubdiv,[&] () {
  201. auto alloc = [] (const size_t bytes) { return SharedLazyTessellationCache::sharedLazyTessellationCache.malloc(bytes); };
  202. return GridSOA::create((SubdivPatch1Base*)prim,(unsigned)scene->get<SubdivMesh>(prim->geom)->numTimeSteps,pre.numTimeSteps(),scene,alloc);
  203. });
  204. }
  205. else {
  206. grid = (GridSOA*) prim->root_ref.get();
  207. }
  208. lazy_node = grid->root(pre.itime(k));
  209. pre.grid = grid;
  210. return false;
  211. }
  212. static __forceinline void intersect(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  213. {
  214. if (likely(ty == 0)) GridSOAMBlurIntersectorK<K>::intersect(pre,ray,k,context,prim,ty,lazy_node);
  215. else processLazyNode(pre,ray,k,context,prim,lazy_node);
  216. }
  217. static __forceinline bool occluded(Precalculations& pre, RayK<K>& ray, size_t k, IntersectContext* context, const Primitive* prim, size_t ty, size_t& lazy_node)
  218. {
  219. if (likely(ty == 0)) return GridSOAMBlurIntersectorK<K>::occluded(pre,ray,k,context,prim,ty,lazy_node);
  220. else return processLazyNode(pre,ray,k,context,prim,lazy_node);
  221. }
  222. };
  223. typedef SubdivPatch1MBlurCachedIntersectorK<4,false> SubdivPatch1MBlurIntersector4;
  224. typedef SubdivPatch1MBlurCachedIntersectorK<8,false> SubdivPatch1MBlurIntersector8;
  225. typedef SubdivPatch1MBlurCachedIntersectorK<16,false> SubdivPatch1MBlurIntersector16;
  226. typedef SubdivPatch1MBlurCachedIntersectorK<4,true> SubdivPatch1MBlurCachedIntersector4;
  227. typedef SubdivPatch1MBlurCachedIntersectorK<8,true> SubdivPatch1MBlurCachedIntersector8;
  228. typedef SubdivPatch1MBlurCachedIntersectorK<16,true> SubdivPatch1MBlurCachedIntersector16;
  229. }
  230. }