|
@@ -556,6 +556,24 @@ namespace BansheeEngine
|
|
|
return ndcToWorldPoint(ndcPoint, depth);
|
|
return ndcToWorldPoint(ndcPoint, depth);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ Vector3 CameraBase::screenToWorldPointDeviceDepth(const Vector2I& screenPoint, float deviceDepth) const
|
|
|
|
|
+ {
|
|
|
|
|
+ Vector2 ndcPoint = screenToNdcPoint(screenPoint);
|
|
|
|
|
+ Vector4 worldPoint(ndcPoint.x, ndcPoint.y, deviceDepth, 1.0f);
|
|
|
|
|
+ worldPoint = getProjectionMatrixRS().inverse().multiply(worldPoint);
|
|
|
|
|
+ Vector3 worldPoint3D;
|
|
|
|
|
+
|
|
|
|
|
+ if (Math::abs(worldPoint.w) > 1e-7f)
|
|
|
|
|
+ {
|
|
|
|
|
+ float invW = 1.0f / worldPoint.w;
|
|
|
|
|
+
|
|
|
|
|
+ worldPoint3D.x = worldPoint.x * invW;
|
|
|
|
|
+ worldPoint3D.y = worldPoint.y * invW;
|
|
|
|
|
+ worldPoint3D.z = worldPoint.z * invW;
|
|
|
|
|
+ }
|
|
|
|
|
+ return viewToWorldPoint(worldPoint3D);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
Vector3 CameraBase::screenToViewPoint(const Vector2I& screenPoint, float depth) const
|
|
Vector3 CameraBase::screenToViewPoint(const Vector2I& screenPoint, float depth) const
|
|
|
{
|
|
{
|
|
|
Vector2 ndcPoint = screenToNdcPoint(screenPoint);
|
|
Vector2 ndcPoint = screenToNdcPoint(screenPoint);
|
|
@@ -704,45 +722,6 @@ namespace BansheeEngine
|
|
|
return Vector3(0.0f, 0.0f, 0.0f);
|
|
return Vector3(0.0f, 0.0f, 0.0f);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- Vector2 CameraBase::getDeviceZTransform() const
|
|
|
|
|
- {
|
|
|
|
|
- // Returns a set of values that will transform depth buffer values (e.g. [0, 1] in DX, [-1, 1] in GL) to a distance
|
|
|
|
|
- // in world space. This involes applying the inverse projection transform to the depth value. When you multiply
|
|
|
|
|
- // a vector with the projection matrix you get [clipX, clipY, Az + B, C * z], where we don't care about clipX/clipY.
|
|
|
|
|
- // A is [2, 2], B is [2, 3] and C is [3, 2] elements of the projection matrix (only ones that matter for our depth
|
|
|
|
|
- // value). The hardware will also automatically divide the z value with w to get the depth, therefore the final
|
|
|
|
|
- // formula is:
|
|
|
|
|
- // depth = (Az + B) / (C * z)
|
|
|
|
|
-
|
|
|
|
|
- // To get the z coordinate back we simply do the opposite:
|
|
|
|
|
- // z = B / (depth * C - A)
|
|
|
|
|
-
|
|
|
|
|
- // However some APIs will also do a transformation on the depth values before storing them to the texture
|
|
|
|
|
- // (e.g. OpenGL will transform from [-1, 1] to [0, 1]). And we need to reverse that as well. Therefore the final
|
|
|
|
|
- // formula is:
|
|
|
|
|
- // z = B / ((depth * (maxDepth - minDepth) + minDepth) * C - A)
|
|
|
|
|
-
|
|
|
|
|
- // Are we reorganize it because it needs to fit the "(1.0f / (depth + y)) * x" format used in the shader:
|
|
|
|
|
- // z = 1.0f / (depth + minDepth/(maxDepth - minDepth) - A/((maxDepth - minDepth) * C)) * B/((maxDepth - minDepth) * C)
|
|
|
|
|
-
|
|
|
|
|
- RenderAPICore& rapi = RenderAPICore::instance();
|
|
|
|
|
- const RenderAPIInfo& rapiInfo = rapi.getAPIInfo();
|
|
|
|
|
- Matrix4 projMatrix = mProjMatrix;
|
|
|
|
|
-
|
|
|
|
|
- float depthRange = rapiInfo.getMaximumDepthInputValue() - rapiInfo.getMinimumDepthInputValue();
|
|
|
|
|
- float minDepth = rapiInfo.getMinimumDepthInputValue();
|
|
|
|
|
-
|
|
|
|
|
- float a = projMatrix[2][2];
|
|
|
|
|
- float b = projMatrix[2][3];
|
|
|
|
|
- float c = projMatrix[3][2];
|
|
|
|
|
-
|
|
|
|
|
- Vector2 output;
|
|
|
|
|
- output.x = b / (depthRange * c);
|
|
|
|
|
- output.y = minDepth / depthRange - a / (depthRange * c);
|
|
|
|
|
-
|
|
|
|
|
- return output;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
CameraCore::~CameraCore()
|
|
CameraCore::~CameraCore()
|
|
|
{
|
|
{
|
|
|
RendererManager::instance().getActive()->notifyCameraRemoved(this);
|
|
RendererManager::instance().getActive()->notifyCameraRemoved(this);
|