BsRendererCamera.cpp 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. //********************************** Banshee Engine (www.banshee3d.com) **************************************************//
  2. //**************** Copyright (c) 2016 Marko Pintera ([email protected]). All rights reserved. **********************//
  3. #include "BsRendererCamera.h"
  4. #include "BsCamera.h"
  5. #include "BsRenderable.h"
  6. #include "BsMaterial.h"
  7. #include "BsShader.h"
  8. #include "BsRenderTargets.h"
  9. #include "BsRendererUtility.h"
  10. #include "BsGpuParamsSet.h"
  11. namespace bs { namespace ct
  12. {
  13. PerCameraParamDef gPerCameraParamDef;
  14. SkyboxMat::SkyboxMat()
  15. {
  16. SPtr<GpuParams> params = mParamsSet->getGpuParams();
  17. params->getTextureParam(GPT_FRAGMENT_PROGRAM, "gSkyTex", mSkyTextureParam);
  18. }
  19. void SkyboxMat::_initDefines(ShaderDefines& defines)
  20. {
  21. // Do nothing
  22. }
  23. void SkyboxMat::bind(const SPtr<GpuParamBlockBuffer>& perCamera)
  24. {
  25. mParamsSet->setParamBlockBuffer("PerCamera", perCamera, true);
  26. gRendererUtility().setPass(mMaterial, 0);
  27. }
  28. void SkyboxMat::setParams(const SPtr<Texture>& texture)
  29. {
  30. mSkyTextureParam.set(texture);
  31. gRendererUtility().setPassParams(mParamsSet);
  32. }
  33. RendererCamera::RendererCamera()
  34. : mUsingRenderTargets(false)
  35. {
  36. mParamBuffer = gPerCameraParamDef.createBuffer();
  37. }
  38. RendererCamera::RendererCamera(const RENDERER_VIEW_DESC& desc)
  39. : mViewDesc(desc), mUsingRenderTargets(false)
  40. {
  41. mParamBuffer = gPerCameraParamDef.createBuffer();
  42. setStateReductionMode(desc.stateReduction);
  43. }
  44. void RendererCamera::setStateReductionMode(StateReduction reductionMode)
  45. {
  46. mOpaqueQueue = bs_shared_ptr_new<RenderQueue>(reductionMode);
  47. StateReduction transparentStateReduction = reductionMode;
  48. if (transparentStateReduction == StateReduction::Material)
  49. transparentStateReduction = StateReduction::Distance; // Transparent object MUST be sorted by distance
  50. mTransparentQueue = bs_shared_ptr_new<RenderQueue>(transparentStateReduction);
  51. }
  52. void RendererCamera::setPostProcessSettings(const SPtr<PostProcessSettings>& ppSettings)
  53. {
  54. if (mPostProcessInfo.settings == nullptr)
  55. mPostProcessInfo.settings = bs_shared_ptr_new<StandardPostProcessSettings>();
  56. SPtr<StandardPostProcessSettings> stdPPSettings = std::static_pointer_cast<StandardPostProcessSettings>(ppSettings);
  57. if (stdPPSettings != nullptr)
  58. *mPostProcessInfo.settings = *stdPPSettings;
  59. else
  60. *mPostProcessInfo.settings = StandardPostProcessSettings();
  61. mPostProcessInfo.settingDirty = true;
  62. }
  63. void RendererCamera::setTransform(const Vector3& origin, const Vector3& direction, const Matrix4& view, const Matrix4& proj)
  64. {
  65. mViewDesc.viewOrigin = origin;
  66. mViewDesc.viewDirection = direction;
  67. mViewDesc.viewTransform = view;
  68. mViewDesc.projTransform = proj;
  69. }
  70. void RendererCamera::setView(const RENDERER_VIEW_DESC& desc)
  71. {
  72. mViewDesc = desc;
  73. }
  74. void RendererCamera::beginRendering(bool useGBuffer)
  75. {
  76. if (useGBuffer)
  77. {
  78. // Render scene objects to g-buffer
  79. bool createGBuffer = mRenderTargets == nullptr ||
  80. mRenderTargets->getHDR() != mViewDesc.isHDR ||
  81. mRenderTargets->getNumSamples() != mViewDesc.target.numSamples;
  82. if (createGBuffer)
  83. mRenderTargets = RenderTargets::create(mViewDesc.target, mViewDesc.isHDR);
  84. mRenderTargets->allocate();
  85. mUsingRenderTargets = true;
  86. }
  87. }
  88. void RendererCamera::endRendering()
  89. {
  90. mOpaqueQueue->clear();
  91. mTransparentQueue->clear();
  92. if(mUsingRenderTargets)
  93. {
  94. mRenderTargets->release();
  95. mUsingRenderTargets = false;
  96. }
  97. }
  98. void RendererCamera::determineVisible(const Vector<RendererObject*>& renderables, const Vector<Bounds>& renderableBounds,
  99. Vector<bool>& visibility)
  100. {
  101. mVisibility.clear();
  102. mVisibility.resize(renderables.size(), false);
  103. if (mViewDesc.isOverlay)
  104. return;
  105. UINT64 cameraLayers = mViewDesc.visibleLayers;
  106. const ConvexVolume& worldFrustum = mViewDesc.cullFrustum;
  107. // Update per-object param buffers and queue render elements
  108. for(UINT32 i = 0; i < (UINT32)renderables.size(); i++)
  109. {
  110. Renderable* renderable = renderables[i]->renderable;
  111. UINT32 rendererId = renderable->getRendererId();
  112. if ((renderable->getLayer() & cameraLayers) == 0)
  113. continue;
  114. // Do frustum culling
  115. // Note: This is bound to be a bottleneck at some point. When it is ensure that intersect methods use vector
  116. // operations, as it is trivial to update them. Also consider spatial partitioning.
  117. const Sphere& boundingSphere = renderableBounds[rendererId].getSphere();
  118. if (worldFrustum.intersects(boundingSphere))
  119. {
  120. // More precise with the box
  121. const AABox& boundingBox = renderableBounds[rendererId].getBox();
  122. if (worldFrustum.intersects(boundingBox))
  123. {
  124. visibility[i] = true;
  125. mVisibility[i] = true;
  126. float distanceToCamera = (mViewDesc.viewOrigin - boundingBox.getCenter()).length();
  127. for (auto& renderElem : renderables[i]->elements)
  128. {
  129. bool isTransparent = (renderElem.material->getShader()->getFlags() & (UINT32)ShaderFlags::Transparent) != 0;
  130. if (isTransparent)
  131. mTransparentQueue->add(&renderElem, distanceToCamera);
  132. else
  133. mOpaqueQueue->add(&renderElem, distanceToCamera);
  134. }
  135. }
  136. }
  137. }
  138. mOpaqueQueue->sort();
  139. mTransparentQueue->sort();
  140. }
  141. Vector2 RendererCamera::getDeviceZTransform(const Matrix4& projMatrix) const
  142. {
  143. // Returns a set of values that will transform depth buffer values (e.g. [0, 1] in DX, [-1, 1] in GL) to a distance
  144. // in world space. This involes applying the inverse projection transform to the depth value. When you multiply
  145. // a vector with the projection matrix you get [clipX, clipY, Az + B, C * z], where we don't care about clipX/clipY.
  146. // A is [2, 2], B is [2, 3] and C is [3, 2] elements of the projection matrix (only ones that matter for our depth
  147. // value). The hardware will also automatically divide the z value with w to get the depth, therefore the final
  148. // formula is:
  149. // depth = (Az + B) / (C * z)
  150. // To get the z coordinate back we simply do the opposite:
  151. // z = B / (depth * C - A)
  152. // However some APIs will also do a transformation on the depth values before storing them to the texture
  153. // (e.g. OpenGL will transform from [-1, 1] to [0, 1]). And we need to reverse that as well. Therefore the final
  154. // formula is:
  155. // z = B / ((depth * (maxDepth - minDepth) + minDepth) * C - A)
  156. // Are we reorganize it because it needs to fit the "(1.0f / (depth + y)) * x" format used in the shader:
  157. // z = 1.0f / (depth + minDepth/(maxDepth - minDepth) - A/((maxDepth - minDepth) * C)) * B/((maxDepth - minDepth) * C)
  158. RenderAPI& rapi = RenderAPI::instance();
  159. const RenderAPIInfo& rapiInfo = rapi.getAPIInfo();
  160. float depthRange = rapiInfo.getMaximumDepthInputValue() - rapiInfo.getMinimumDepthInputValue();
  161. float minDepth = rapiInfo.getMinimumDepthInputValue();
  162. float a = projMatrix[2][2];
  163. float b = projMatrix[2][3];
  164. float c = projMatrix[3][2];
  165. Vector2 output;
  166. output.x = b / (depthRange * c);
  167. output.y = minDepth / depthRange - a / (depthRange * c);
  168. return output;
  169. }
  170. void RendererCamera::updatePerViewBuffer()
  171. {
  172. Matrix4 viewProj = mViewDesc.projTransform * mViewDesc.viewTransform;
  173. Matrix4 invViewProj = viewProj.inverse();
  174. gPerCameraParamDef.gMatProj.set(mParamBuffer, mViewDesc.projTransform);
  175. gPerCameraParamDef.gMatView.set(mParamBuffer, mViewDesc.viewTransform);
  176. gPerCameraParamDef.gMatViewProj.set(mParamBuffer, viewProj);
  177. gPerCameraParamDef.gMatInvViewProj.set(mParamBuffer, invViewProj); // Note: Calculate inverses separately (better precision possibly)
  178. gPerCameraParamDef.gMatInvProj.set(mParamBuffer, mViewDesc.projTransform.inverse());
  179. // Construct a special inverse view-projection matrix that had projection entries that affect z and w eliminated.
  180. // Used to transform a vector(clip_x, clip_y, view_z, view_w), where clip_x/clip_y are in clip space, and
  181. // view_z/view_w in view space, into world space.
  182. // Only projects z/w coordinates
  183. Matrix4 projZ = Matrix4::IDENTITY;
  184. projZ[2][2] = mViewDesc.projTransform[2][2];
  185. projZ[2][3] = mViewDesc.projTransform[2][3];
  186. projZ[3][2] = mViewDesc.projTransform[3][2];
  187. projZ[3][3] = 0.0f;
  188. gPerCameraParamDef.gMatScreenToWorld.set(mParamBuffer, invViewProj * projZ);
  189. gPerCameraParamDef.gViewDir.set(mParamBuffer, mViewDesc.viewDirection);
  190. gPerCameraParamDef.gViewOrigin.set(mParamBuffer, mViewDesc.viewOrigin);
  191. gPerCameraParamDef.gDeviceZToWorldZ.set(mParamBuffer, getDeviceZTransform(mViewDesc.projTransform));
  192. const Rect2I& viewRect = mViewDesc.target.viewRect;
  193. float halfWidth = viewRect.width * 0.5f;
  194. float halfHeight = viewRect.height * 0.5f;
  195. float rtWidth = mViewDesc.target.targetWidth != 0 ? (float)mViewDesc.target.targetWidth : 20.0f;
  196. float rtHeight = mViewDesc.target.targetHeight != 0 ? (float)mViewDesc.target.targetHeight : 20.0f;
  197. RenderAPI& rapi = RenderAPI::instance();
  198. const RenderAPIInfo& rapiInfo = rapi.getAPIInfo();
  199. Vector4 clipToUVScaleOffset;
  200. clipToUVScaleOffset.x = halfWidth / rtWidth;
  201. clipToUVScaleOffset.y = -halfHeight / rtHeight;
  202. clipToUVScaleOffset.z = viewRect.x / rtWidth + (halfWidth + rapiInfo.getHorizontalTexelOffset()) / rtWidth;
  203. clipToUVScaleOffset.w = viewRect.y / rtHeight + (halfHeight + rapiInfo.getVerticalTexelOffset()) / rtHeight;
  204. // Either of these flips the Y axis, but if they're both true they cancel out
  205. if (rapiInfo.getUVYAxisUp() ^ rapiInfo.getNDCYAxisDown())
  206. clipToUVScaleOffset.y = -clipToUVScaleOffset.y;
  207. gPerCameraParamDef.gClipToUVScaleOffset.set(mParamBuffer, clipToUVScaleOffset);
  208. }
  209. }}