BsRendererCamera.cpp 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. //********************************** Banshee Engine (www.banshee3d.com) **************************************************//
  2. //**************** Copyright (c) 2016 Marko Pintera ([email protected]). All rights reserved. **********************//
  3. #include "BsRendererCamera.h"
  4. #include "BsCamera.h"
  5. #include "BsRenderable.h"
  6. #include "BsMaterial.h"
  7. #include "BsShader.h"
  8. #include "BsRenderTargets.h"
  9. namespace BansheeEngine
  10. {
  11. RendererCamera::RendererCamera()
  12. :mCamera(nullptr), mUsingRenderTargets(false)
  13. { }
  14. RendererCamera::RendererCamera(const CameraCore* camera, StateReduction reductionMode)
  15. :mCamera(camera), mUsingRenderTargets(false)
  16. {
  17. update(reductionMode);
  18. }
  19. void RendererCamera::update(StateReduction reductionMode)
  20. {
  21. mOpaqueQueue = bs_shared_ptr_new<RenderQueue>(reductionMode);
  22. StateReduction transparentStateReduction = reductionMode;
  23. if (transparentStateReduction == StateReduction::Material)
  24. transparentStateReduction = StateReduction::Distance; // Transparent object MUST be sorted by distance
  25. mTransparentQueue = bs_shared_ptr_new<RenderQueue>(transparentStateReduction);
  26. updatePP();
  27. }
  28. void RendererCamera::updatePP()
  29. {
  30. mPostProcessInfo.settings = mCamera->getPostProcessSettings();
  31. mPostProcessInfo.settingDirty = true;
  32. }
  33. void RendererCamera::beginRendering(bool useGBuffer)
  34. {
  35. if (useGBuffer)
  36. {
  37. SPtr<ViewportCore> viewport = mCamera->getViewport();
  38. bool useHDR = mCamera->getFlags().isSet(CameraFlag::HDR);
  39. UINT32 msaaCount = mCamera->getMSAACount();
  40. // Render scene objects to g-buffer
  41. bool createGBuffer = mRenderTargets == nullptr ||
  42. mRenderTargets->getHDR() != useHDR ||
  43. mRenderTargets->getNumSamples() != msaaCount;
  44. if (createGBuffer)
  45. mRenderTargets = RenderTargets::create(viewport, useHDR, msaaCount);
  46. mRenderTargets->allocate();
  47. mUsingRenderTargets = true;
  48. }
  49. }
  50. void RendererCamera::endRendering()
  51. {
  52. mOpaqueQueue->clear();
  53. mTransparentQueue->clear();
  54. if(mUsingRenderTargets)
  55. {
  56. mRenderTargets->release();
  57. mUsingRenderTargets = false;
  58. }
  59. }
  60. void RendererCamera::determineVisible(Vector<RendererObject>& renderables, const Vector<Bounds>& renderableBounds)
  61. {
  62. bool isOverlayCamera = mCamera->getFlags().isSet(CameraFlag::Overlay);
  63. if (isOverlayCamera)
  64. return;
  65. UINT64 cameraLayers = mCamera->getLayers();
  66. ConvexVolume worldFrustum = mCamera->getWorldFrustum();
  67. // Update per-object param buffers and queue render elements
  68. for (auto& renderableData : renderables)
  69. {
  70. RenderableCore* renderable = renderableData.renderable;
  71. UINT32 rendererId = renderable->getRendererId();
  72. if ((renderable->getLayer() & cameraLayers) == 0)
  73. continue;
  74. // Do frustum culling
  75. // Note: This is bound to be a bottleneck at some point. When it is ensure that intersect methods use vector
  76. // operations, as it is trivial to update them. Also consider spatial partitioning.
  77. const Sphere& boundingSphere = renderableBounds[rendererId].getSphere();
  78. if (worldFrustum.intersects(boundingSphere))
  79. {
  80. // More precise with the box
  81. const AABox& boundingBox = renderableBounds[rendererId].getBox();
  82. if (worldFrustum.intersects(boundingBox))
  83. {
  84. float distanceToCamera = (mCamera->getPosition() - boundingBox.getCenter()).length();
  85. for (auto& renderElem : renderableData.elements)
  86. {
  87. bool isTransparent = (renderElem.material->getShader()->getFlags() & (UINT32)ShaderFlags::Transparent) != 0;
  88. if (isTransparent)
  89. mTransparentQueue->add(&renderElem, distanceToCamera);
  90. else
  91. mOpaqueQueue->add(&renderElem, distanceToCamera);
  92. }
  93. }
  94. }
  95. }
  96. mOpaqueQueue->sort();
  97. mTransparentQueue->sort();
  98. }
  99. Vector2 RendererCamera::getDeviceZTransform(const Matrix4& projMatrix)
  100. {
  101. // Returns a set of values that will transform depth buffer values (e.g. [0, 1] in DX, [-1, 1] in GL) to a distance
  102. // in world space. This involes applying the inverse projection transform to the depth value. When you multiply
  103. // a vector with the projection matrix you get [clipX, clipY, Az + B, C * z], where we don't care about clipX/clipY.
  104. // A is [2, 2], B is [2, 3] and C is [3, 2] elements of the projection matrix (only ones that matter for our depth
  105. // value). The hardware will also automatically divide the z value with w to get the depth, therefore the final
  106. // formula is:
  107. // depth = (Az + B) / (C * z)
  108. // To get the z coordinate back we simply do the opposite:
  109. // z = B / (depth * C - A)
  110. // However some APIs will also do a transformation on the depth values before storing them to the texture
  111. // (e.g. OpenGL will transform from [-1, 1] to [0, 1]). And we need to reverse that as well. Therefore the final
  112. // formula is:
  113. // z = B / ((depth * (maxDepth - minDepth) + minDepth) * C - A)
  114. // Are we reorganize it because it needs to fit the "(1.0f / (depth + y)) * x" format used in the shader:
  115. // z = 1.0f / (depth + minDepth/(maxDepth - minDepth) - A/((maxDepth - minDepth) * C)) * B/((maxDepth - minDepth) * C)
  116. RenderAPICore& rapi = RenderAPICore::instance();
  117. const RenderAPIInfo& rapiInfo = rapi.getAPIInfo();
  118. float depthRange = rapiInfo.getMaximumDepthInputValue() - rapiInfo.getMinimumDepthInputValue();
  119. float minDepth = rapiInfo.getMinimumDepthInputValue();
  120. float a = projMatrix[2][2];
  121. float b = projMatrix[2][3];
  122. float c = projMatrix[3][2];
  123. Vector2 output;
  124. output.x = b / (depthRange * c);
  125. output.y = minDepth / depthRange - a / (depthRange * c);
  126. return output;
  127. }
  128. CameraShaderData RendererCamera::getShaderData()
  129. {
  130. CameraShaderData data;
  131. data.proj = mCamera->getProjectionMatrixRS();
  132. data.view = mCamera->getViewMatrix();
  133. data.viewProj = data.proj * data.view;
  134. data.invProj = data.proj.inverse();
  135. data.invViewProj = data.viewProj.inverse(); // Note: Calculate inverses separately (better precision possibly)
  136. // Construct a special inverse view-projection matrix that had projection entries that affect z and w eliminated.
  137. // Used to transform a vector(clip_x, clip_y, view_z, view_w), where clip_x/clip_y are in clip space, and
  138. // view_z/view_w in view space, into world space.
  139. // Only projects z/w coordinates
  140. Matrix4 projZ = Matrix4::IDENTITY;
  141. projZ[2][2] = data.proj[2][2];
  142. projZ[2][3] = data.proj[2][3];
  143. projZ[3][2] = data.proj[3][2];
  144. projZ[3][3] = 0.0f;
  145. data.screenToWorld = data.invViewProj * projZ;
  146. data.viewDir = mCamera->getForward();
  147. data.viewOrigin = mCamera->getPosition();
  148. data.deviceZToWorldZ = getDeviceZTransform(data.proj);
  149. SPtr<ViewportCore> viewport = mCamera->getViewport();
  150. SPtr<RenderTargetCore> rt = viewport->getTarget();
  151. float halfWidth = viewport->getWidth() * 0.5f;
  152. float halfHeight = viewport->getHeight() * 0.5f;
  153. float rtWidth = (float)rt->getProperties().getWidth();
  154. float rtHeight = (float)rt->getProperties().getHeight();
  155. RenderAPICore& rapi = RenderAPICore::instance();
  156. const RenderAPIInfo& rapiInfo = rapi.getAPIInfo();
  157. data.clipToUVScaleOffset.x = halfWidth / rtWidth;
  158. data.clipToUVScaleOffset.y = -halfHeight / rtHeight;
  159. data.clipToUVScaleOffset.z = viewport->getX() / rtWidth + (halfWidth + rapiInfo.getHorizontalTexelOffset()) / rtWidth;
  160. data.clipToUVScaleOffset.w = viewport->getY() / rtHeight + (halfHeight + rapiInfo.getVerticalTexelOffset()) / rtHeight;
  161. if (!rapiInfo.getNDCYAxisDown())
  162. data.clipToUVScaleOffset.y = -data.clipToUVScaleOffset.y;
  163. return data;
  164. }
  165. }