Browse Source

Merge remote-tracking branch 'refs/remotes/raysan5/develop' into develop

victorfisac 9 years ago
parent
commit
c9c1263e6f

+ 3 - 1
.gitignore

@@ -66,8 +66,10 @@ src/libraylib.a
 src/libraylib.bc
 
 # oculus example
-!examples/oculus_glfw_sample/LibOVRRT32_1.dll
+!examples/oculus_glfw_sample/
+
 
 # external libraries DLLs
 !src/external/glfw3/lib/win32/glfw3.dll
 !src/external/openal_soft/lib/win32/OpenAL32.dll
+!src/external/OculusSDK/LibOVR/LibOVRRT32_1.dll

+ 26 - 14
examples/core_oculus_rift.c

@@ -2,6 +2,9 @@
 *
 *   raylib [core] example - Oculus Rift CV1
 *
+*   Compile example using:
+*   gcc -o $(NAME_PART).exe $(FILE_NAME) -L. -L..\src\external\OculusSDK\LibOVR -lLibOVRRT32_1 -lraylib -lglfw3 -lopengl32 -lgdi32 -std=c99
+*
 *   This example has been created using raylib 1.5 (www.raylib.com)
 *   raylib is licensed under an unmodified zlib/libpng license (View raylib.h for details)
 *
@@ -21,8 +24,8 @@ int main()
     InitWindow(screenWidth, screenHeight, "raylib [core] example - oculus rift");
     
     InitOculusDevice();
-
-    // Define the camera to look into our 3d world   
+    
+    // Define the camera to look into our 3d world
     Camera camera;
     camera.position = (Vector3){ 5.0f, 5.0f, 5.0f };    // Camera position
     camera.target = (Vector3){ 0.0f, 0.0f, 0.0f };      // Camera looking at point
@@ -30,8 +33,8 @@ int main()
     camera.fovy = 45.0f;                                // Camera field-of-view Y
     
     Vector3 cubePosition = { 0.0f, 0.0f, 0.0f };
-
-    SetTargetFPS(90);                   // Set our game to run at 90 frames-per-second
+    
+    //SetTargetFPS(90);                   // Set our game to run at 90 frames-per-second
     //--------------------------------------------------------------------------------------
 
     // Main game loop
@@ -47,15 +50,24 @@ int main()
         BeginDrawing();
         
             ClearBackground(RAYWHITE);
-
-            Begin3dMode(camera);
-
-                DrawCube(cubePosition, 2.0f, 2.0f, 2.0f, RED);
-                DrawCubeWires(cubePosition, 2.0f, 2.0f, 2.0f, MAROON);
-
-                DrawGrid(10, 1.0f);
-
-            End3dMode();
+            
+            BeginOculusDrawing();
+            
+                for (int eye = 0; eye < 2; eye++)
+                {
+                    Begin3dMode(camera);
+                
+                        SetOculusMatrix(eye);
+                        
+                        DrawCube(cubePosition, 2.0f, 2.0f, 2.0f, RED);
+                        DrawCubeWires(cubePosition, 2.0f, 2.0f, 2.0f, MAROON);
+                        
+                        DrawGrid(10, 1.0f);
+                    
+                    End3dMode();
+                }
+            
+            EndOculusDrawing();
 
         EndDrawing();
         //----------------------------------------------------------------------------------
@@ -63,7 +75,7 @@ int main()
 
     // De-Initialization
     //--------------------------------------------------------------------------------------
-    CloseOculusdevice();    // Close Oculus Rift device
+    CloseOculusDevice();    // Close Oculus Rift device
     
     CloseWindow();          // Close window and OpenGL context
     //--------------------------------------------------------------------------------------

+ 22 - 28
examples/oculus_glfw_sample/oculus_glfw_sample.c

@@ -23,8 +23,7 @@
 #include <string.h>
 #include <math.h>
 
-#define GLAD_IMPLEMENTATION
-#include "glad.h"               // Extensions loading library
+#include "glad.h"
 #include <GLFW/glfw3.h>         // Windows/Context and inputs management
 
 #define RLGL_STANDALONE
@@ -148,33 +147,34 @@ int main(void)
     glfwSwapInterval(0);
 
     // Load OpenGL 3.3 extensions
-    if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
-    {
-        TraceLog(WARNING, "GLAD: Cannot load OpenGL extensions");
-        return 3;
-    }
-    else TraceLog(INFO, "GLAD: OpenGL extensions loaded successfully");
+    rlglLoadExtensions(glfwGetProcAddress);
+    
+    // Initialize rlgl internal buffers and OpenGL state
+    rlglInit();
+    rlglInitGraphics(0, 0, screenWidth, screenHeight);
+    rlClearColor(245, 245, 245, 255);   // Define clear color
+    rlEnableDepthTest();                // Enable DEPTH_TEST for 3D
     //--------------------------------------------------------
     
 #if defined(PLATFORM_OCULUS)
     ovrResult result = ovr_Initialize(NULL);
-    if (OVR_FAILURE(result)) TraceLog(LOG_ERROR, "OVR: Could not initialize Oculus device");
+    if (OVR_FAILURE(result)) TraceLog(ERROR, "OVR: Could not initialize Oculus device");
 
     result = ovr_Create(&session, &luid);
     if (OVR_FAILURE(result))
     {
-        TraceLog(LOG_WARNING, "OVR: Could not create Oculus session");
+        TraceLog(WARNING, "OVR: Could not create Oculus session");
         ovr_Shutdown();
     }
 
     hmdDesc = ovr_GetHmdDesc(session);
     
-    TraceLog(LOG_INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
-    TraceLog(LOG_INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
-    TraceLog(LOG_INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
-    TraceLog(LOG_INFO, "OVR: Product Type: %i", hmdDesc.Type);
-    TraceLog(LOG_INFO, "OVR: Serian Number: %s", hmdDesc.SerialNumber);
-    TraceLog(LOG_INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
+    TraceLog(INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
+    TraceLog(INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
+    TraceLog(INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
+    TraceLog(INFO, "OVR: Product Type: %i", hmdDesc.Type);
+    TraceLog(INFO, "OVR: Serian Number: %s", hmdDesc.SerialNumber);
+    TraceLog(INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
     
     //screenWidth = hmdDesc.Resolution.w/2;
     //screenHeight = hmdDesc.Resolution.h/2;
@@ -188,20 +188,14 @@ int main(void)
     // Recenter OVR tracking origin
     ovr_RecenterTrackingOrigin(session);
 #endif
-
-    // Initialize rlgl internal buffers and OpenGL state
-    rlglInit();
-    rlglInitGraphics(0, 0, screenWidth, screenHeight);
-    rlClearColor(245, 245, 245, 255);   // Define clear color
-    rlEnableDepthTest();                // Enable DEPTH_TEST for 3D
-    
-    Vector3 cubePosition = { 0.0f, 0.0f, 0.0f };
     
     Camera camera;
     camera.position = (Vector3){ 5.0f, 5.0f, 5.0f };    // Camera position
     camera.target = (Vector3){ 0.0f, 0.0f, 0.0f };      // Camera looking at point
     camera.up = (Vector3){ 0.0f, 1.0f, 0.0f };          // Camera up vector (rotation towards target)
     camera.fovy = 45.0f;                                // Camera field-of-view Y
+    
+    Vector3 cubePosition = { 0.0f, 0.0f, 0.0f };
     //--------------------------------------------------------------------------------------    
 
     // Main game loop    
@@ -293,7 +287,7 @@ int main(void)
         // Get session status information
         ovrSessionStatus sessionStatus;
         ovr_GetSessionStatus(session, &sessionStatus);
-        if (sessionStatus.ShouldQuit) TraceLog(LOG_WARNING, "OVR: Session should quit...");
+        if (sessionStatus.ShouldQuit) TraceLog(WARNING, "OVR: Session should quit...");
         if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session);
 #endif
 
@@ -581,12 +575,12 @@ static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height)
 
     ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &buffer.textureChain);
     
-    if (!OVR_SUCCESS(result)) TraceLog(LOG_WARNING, "OVR: Failed to create swap textures buffer");
+    if (!OVR_SUCCESS(result)) TraceLog(WARNING, "OVR: Failed to create swap textures buffer");
 
     int textureCount = 0;
     ovr_GetTextureSwapChainLength(session, buffer.textureChain, &textureCount);
     
-    if (!OVR_SUCCESS(result) || !textureCount) TraceLog(LOG_WARNING, "OVR: Unable to count swap chain textures");
+    if (!OVR_SUCCESS(result) || !textureCount) TraceLog(WARNING, "OVR: Unable to count swap chain textures");
 
     for (int i = 0; i < textureCount; ++i)
     {
@@ -682,7 +676,7 @@ static OculusMirror LoadOculusMirror(ovrSession session, int width, int height)
     mirrorDesc.Width = mirror.width;
     mirrorDesc.Height = mirror.height;
     
-    if (!OVR_SUCCESS(ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirror.texture))) TraceLog(LOG_WARNING, "Could not create mirror texture");
+    if (!OVR_SUCCESS(ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirror.texture))) TraceLog(WARNING, "Could not create mirror texture");
 
     glGenFramebuffers(1, &mirror.fboId);
 

+ 8 - 10
examples/oculus_glfw_sample/raymath.h

@@ -47,10 +47,16 @@
     #include "raylib.h"             // Required for structs: Vector3, Matrix
 #endif
 
+#ifdef __cplusplus
+    #define RMEXTERN extern "C"     // Functions visible from other files (no name mangling of functions in C++)
+#else
+    #define RMEXTERN extern         // Functions visible from other files
+#endif
+
 #if defined(RAYMATH_EXTERN_INLINE)
-    #define RMDEF extern inline
+    #define RMDEF RMEXTERN inline   // Functions are embeded inline (compiler generated code)
 #else
-    #define RMDEF extern
+    #define RMDEF RMEXTERN
 #endif
 
 //----------------------------------------------------------------------------------
@@ -105,10 +111,6 @@ typedef struct Quaternion {
 
 #ifndef RAYMATH_EXTERN_INLINE
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 //------------------------------------------------------------------------------------
 // Functions Declaration to work with Vector3
 //------------------------------------------------------------------------------------
@@ -166,10 +168,6 @@ RMDEF Quaternion QuaternionFromAxisAngle(Vector3 axis, float angle);  // Returns
 RMDEF void QuaternionToAxisAngle(Quaternion q, Vector3 *outAxis, float *outAngle); // Returns the rotation angle and axis for a given quaternion
 RMDEF void QuaternionTransform(Quaternion *q, Matrix mat);            // Transform a quaternion given a transformation matrix
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif  // notdef RAYMATH_EXTERN_INLINE
 
 #endif  // RAYMATH_H

+ 413 - 8
examples/oculus_glfw_sample/rlgl.c

@@ -72,6 +72,10 @@
     #include "standard_shader.h"    // Standard shader to embed
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+    #include "external/OculusSDK/LibOVR/Include/OVR_CAPI_GL.h"    // Oculus SDK for OpenGL
+#endif
+
 //----------------------------------------------------------------------------------
 // Defines and Macros
 //----------------------------------------------------------------------------------
@@ -159,11 +163,45 @@ typedef struct {
 // Draw call type
 // NOTE: Used to track required draw-calls, organized by texture
 typedef struct {
-    GLuint textureId;
     int vertexCount;
-    // TODO: Store draw state -> blending mode, shader
+    GLuint vaoId;
+    GLuint textureId;
+    GLuint shaderId;
+
+    Matrix projection;
+    Matrix modelview;
+
+    // TODO: Store additional draw state data
+    //int blendMode;
+    //Guint fboId;
 } DrawCall;
 
+#if defined(RLGL_OCULUS_SUPPORT)
+typedef struct OculusBuffer {
+    ovrTextureSwapChain textureChain;
+    GLuint depthId;
+    GLuint fboId;
+    int width;
+    int height;
+} OculusBuffer;
+
+typedef struct OculusMirror {
+    ovrMirrorTexture texture;
+    GLuint fboId;
+    int width;
+    int height;
+} OculusMirror;
+
+typedef struct OculusLayer {
+    ovrViewScaleDesc viewScaleDesc;
+    ovrLayerEyeFov eyeLayer;      // layer 0
+    //ovrLayerQuad quadLayer;     // TODO: layer 1: '2D' quad for GUI
+    Matrix eyeProjections[2];
+    int width;
+    int height;
+} OculusLayer;
+#endif
+
 //----------------------------------------------------------------------------------
 // Global Variables Definition
 //----------------------------------------------------------------------------------
@@ -213,6 +251,17 @@ static Light lights[MAX_LIGHTS];            // Lights pool
 static int lightsCount;                     // Counts current enabled physic objects
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+// OVR device variables
+static ovrSession session;              // Oculus session (pointer to ovrHmdStruct)
+static ovrHmdDesc hmdDesc;              // Oculus device descriptor parameters
+static ovrGraphicsLuid luid;            // Oculus locally unique identifier for the program (64 bit)
+static OculusLayer layer;               // Oculus drawing layer (similar to photoshop)
+static OculusBuffer buffer;             // Oculus internal buffers (texture chain and fbo)
+static OculusMirror mirror;             // Oculus mirror texture and fbo
+static unsigned int frameIndex = 0;     // Oculus frames counter, used to discard frames from chain
+#endif
+
 // Compressed textures support flags
 static bool texCompDXTSupported = false;    // DDS texture compression support
 static bool npotSupported = false;          // NPOT textures full support
@@ -228,15 +277,14 @@ static PFNGLDELETEVERTEXARRAYSOESPROC glDeleteVertexArrays;
 static int blendMode = 0;
 
 // White texture useful for plain color polys (required by shader)
-// NOTE: It's required in shapes and models modules!
-unsigned int whiteTexture;
+static unsigned int whiteTexture;
 
 //----------------------------------------------------------------------------------
 // Module specific Functions Declaration
 //----------------------------------------------------------------------------------
 #if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
 static void LoadCompressedTexture(unsigned char *data, int width, int height, int mipmapCount, int compressedFormat);
-static unsigned int LoadShaderProgram(char *vShaderStr, char *fShaderStr);  // Load custom shader strings and return program id
+static unsigned int LoadShaderProgram(const char *vShaderStr, const char *fShaderStr);  // Load custom shader strings and return program id
 
 static Shader LoadDefaultShader(void);      // Load default shader (just vertex positioning and texture coloring)
 static Shader LoadStandardShader(void);     // Load standard shader (support materials and lighting)
@@ -254,6 +302,16 @@ static void SetShaderLights(Shader shader); // Sets shader uniform values for li
 static char *ReadTextFile(const char *fileName);
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)            // Oculus Rift functions
+static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height);    // Load Oculus required buffers
+static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer);            // Unload texture required buffers
+static OculusMirror LoadOculusMirror(ovrSession session, int width, int height);    // Load Oculus mirror buffers
+static void UnloadOculusMirror(ovrSession session, OculusMirror mirror);            // Unload Oculus mirror buffers
+static void BlitOculusMirror(ovrSession session, OculusMirror mirror);              // Copy Oculus screen buffer to mirror texture
+static OculusLayer InitOculusLayer(ovrSession session);                             // Init Oculus layer (similar to photoshop)
+static Matrix FromOvrMatrix(ovrMatrix4f ovrM);  // Convert from Oculus ovrMatrix4f struct to raymath Matrix struct
+#endif
+
 #if defined(GRAPHICS_API_OPENGL_11)
 static int GenerateMipmaps(unsigned char *data, int baseWidth, int baseHeight);
 static Color *GenNextMipmap(Color *srcData, int srcWidth, int srcHeight);
@@ -1146,6 +1204,23 @@ void rlglInitGraphics(int offsetX, int offsetY, int width, int height)
     TraceLog(INFO, "OpenGL graphic device initialized successfully");
 }
 
+// Load OpenGL extensions
+// NOTE: External loader function could be passed as a pointer
+void rlglLoadExtensions(void *loader)
+{
+#if defined(GRAPHICS_API_OPENGL_33)
+    // NOTE: glad is generated and contains only required OpenGL 3.3 Core extensions
+    if (!gladLoadGLLoader((GLADloadproc)loader)) TraceLog(WARNING, "GLAD: Cannot load OpenGL extensions");
+    else TraceLog(INFO, "GLAD: OpenGL extensions loaded successfully");
+
+    if (GLAD_GL_VERSION_3_3) TraceLog(INFO, "OpenGL 3.3 Core profile supported");
+    else TraceLog(ERROR, "OpenGL 3.3 Core profile not supported");
+
+    // With GLAD, we can check if an extension is supported using the GLAD_GL_xxx booleans
+    //if (GLAD_GL_ARB_vertex_array_object) // Use GL_ARB_vertex_array_object
+#endif
+}
+
 // Get world coordinates from screen coordinates
 Vector3 rlglUnproject(Vector3 source, Matrix proj, Matrix view)
 {
@@ -1177,11 +1252,13 @@ unsigned int rlglLoadTexture(void *data, int width, int height, int textureForma
     GLuint id = 0;
     
     // Check texture format support by OpenGL 1.1 (compressed textures not supported)
-    if ((rlGetVersion() == OPENGL_11) && (textureFormat >= 8))
+#if defined(GRAPHICS_API_OPENGL_11) 
+    if (textureFormat >= 8)
     {
         TraceLog(WARNING, "OpenGL 1.1 does not support GPU compressed texture formats");
         return id;
     }
+#endif
     
     if ((!texCompDXTSupported) && ((textureFormat == COMPRESSED_DXT1_RGB) || (textureFormat == COMPRESSED_DXT1_RGBA) ||
         (textureFormat == COMPRESSED_DXT3_RGBA) || (textureFormat == COMPRESSED_DXT5_RGBA)))
@@ -1795,8 +1872,13 @@ void rlglDrawMesh(Mesh mesh, Material material, Matrix transform)
     // NOTE: standard shader specific locations are got at render time to keep Shader struct as simple as possible (with just default shader locations)
     if (material.shader.id == standardShader.id)
     {
+        // Transpose and inverse model transformations matrix for fragment normal calculations
+        Matrix transInvTransform = transform;
+        MatrixTranspose(&transInvTransform);
+        MatrixInvert(&transInvTransform);
+        
         // Send model transformations matrix to shader
-        glUniformMatrix4fv(glGetUniformLocation(material.shader.id, "modelMatrix"), 1, false, MatrixToFloat(transform));
+        glUniformMatrix4fv(glGetUniformLocation(material.shader.id, "modelMatrix"), 1, false, MatrixToFloat(transInvTransform));
         
         // Send view transformation matrix to shader. View matrix 8, 9 and 10 are view direction vector axis values (target - position)
         glUniform3f(glGetUniformLocation(material.shader.id, "viewDir"), matView.m8, matView.m9, matView.m10);
@@ -2095,6 +2177,24 @@ void *rlglReadTexturePixels(Texture2D texture)
     return pixels;
 }
 
+/*
+// TODO: Record draw calls to be processed in batch
+// NOTE: Global state must be kept
+void rlglRecordDraw(void)
+{
+    // TODO: Before adding a new draw, check if anything changed from last stored draw
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    draws[drawsCounter].vaoId = currentState.vaoId;             // lines.id, trangles.id, quads.id?
+    draws[drawsCounter].textureId = currentState.textureId;     // whiteTexture?
+    draws[drawsCounter].shaderId = currentState.shaderId;       // defaultShader.id
+    draws[drawsCounter].projection = projection;
+    draws[drawsCounter].modelview = modelview;
+    draws[drawsCounter].vertexCount = currentState.vertexCount;
+    
+    drawsCounter++;
+#endif
+}
+*/
 
 //----------------------------------------------------------------------------------
 // Module Functions Definition - Shaders Functions
@@ -2361,6 +2461,130 @@ void DestroyLight(Light light)
 #endif
 }
 
+#if defined(RLGL_OCULUS_SUPPORT)
+// Init Oculus Rift device
+// NOTE: Device initialization should be done before window creation?
+void InitOculusDevice(void)
+{
+    // Initialize Oculus device
+    ovrResult result = ovr_Initialize(NULL);
+    if (OVR_FAILURE(result)) TraceLog(WARNING, "OVR: Could not initialize Oculus device");
+
+    result = ovr_Create(&session, &luid);
+    if (OVR_FAILURE(result))
+    {
+        TraceLog(WARNING, "OVR: Could not create Oculus session");
+        ovr_Shutdown();
+    }
+
+    hmdDesc = ovr_GetHmdDesc(session);
+    
+    TraceLog(INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
+    TraceLog(INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
+    TraceLog(INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
+    TraceLog(INFO, "OVR: Product Type: %i", hmdDesc.Type);
+    //TraceLog(INFO, "OVR: Serial Number: %s", hmdDesc.SerialNumber);
+    TraceLog(INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
+    
+    // NOTE: Oculus mirror is set to defined screenWidth and screenHeight...
+    // ...ideally, it should be (hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2)
+    
+    // Initialize Oculus Buffers
+    layer = InitOculusLayer(session);   
+    buffer = LoadOculusBuffer(session, layer.width, layer.height);
+    mirror = LoadOculusMirror(session, hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2);     // NOTE: hardcoded...
+    layer.eyeLayer.ColorTexture[0] = buffer.textureChain;     //SetOculusLayerTexture(eyeLayer, buffer.textureChain);
+    
+    // Recenter OVR tracking origin
+    ovr_RecenterTrackingOrigin(session);
+}
+
+// Close Oculus Rift device
+void CloseOculusDevice(void)
+{
+    UnloadOculusMirror(session, mirror);    // Unload Oculus mirror buffer
+    UnloadOculusBuffer(session, buffer);    // Unload Oculus texture buffers
+
+    ovr_Destroy(session);   // Free Oculus session data
+    ovr_Shutdown();         // Close Oculus device connection
+}
+
+// Update Oculus Rift tracking (position and orientation)
+void UpdateOculusTracking(void)
+{
+    frameIndex++;
+
+    ovrPosef eyePoses[2];
+    ovr_GetEyePoses(session, frameIndex, ovrTrue, layer.viewScaleDesc.HmdToEyeOffset, eyePoses, &layer.eyeLayer.SensorSampleTime);
+    
+    layer.eyeLayer.RenderPose[0] = eyePoses[0];
+    layer.eyeLayer.RenderPose[1] = eyePoses[1];
+}
+
+void SetOculusMatrix(int eye)
+{
+    rlViewport(layer.eyeLayer.Viewport[eye].Pos.x, layer.eyeLayer.Viewport[eye].Pos.y, layer.eyeLayer.Viewport[eye].Size.w, layer.eyeLayer.Viewport[eye].Size.h);
+
+    Quaternion eyeRPose = (Quaternion){ layer.eyeLayer.RenderPose[eye].Orientation.x, 
+                                        layer.eyeLayer.RenderPose[eye].Orientation.y, 
+                                        layer.eyeLayer.RenderPose[eye].Orientation.z, 
+                                        layer.eyeLayer.RenderPose[eye].Orientation.w };
+    QuaternionInvert(&eyeRPose);
+    Matrix eyeOrientation = QuaternionToMatrix(eyeRPose);
+    Matrix eyeTranslation = MatrixTranslate(-layer.eyeLayer.RenderPose[eye].Position.x, 
+                                            -layer.eyeLayer.RenderPose[eye].Position.y, 
+                                            -layer.eyeLayer.RenderPose[eye].Position.z);
+
+    Matrix eyeView = MatrixMultiply(eyeTranslation, eyeOrientation);
+    Matrix modelEyeView = MatrixMultiply(modelview, eyeView);  // Using internal camera modelview matrix
+
+    SetMatrixModelview(modelEyeView);
+    SetMatrixProjection(layer.eyeProjections[eye]);
+}
+
+void BeginOculusDrawing(void)
+{
+    GLuint currentTexId;
+    int currentIndex;
+    
+    ovr_GetTextureSwapChainCurrentIndex(session, buffer.textureChain, &currentIndex);
+    ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, currentIndex, &currentTexId);
+
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer.fboId);
+    glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, currentTexId, 0);
+    //glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, buffer.depthId, 0);    // Already binded
+
+    //glViewport(0, 0, buffer.width, buffer.height);        // Useful if rendering to separate framebuffers (every eye)
+    //glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);   // Same as rlClearScreenBuffers()
+    
+    // NOTE: If your application is configured to treat the texture as a linear format (e.g. GL_RGBA) 
+    // and performs linear-to-gamma conversion in GLSL or does not care about gamma-correction, then:
+    //     - Require OculusBuffer format to be OVR_FORMAT_R8G8B8A8_UNORM_SRGB
+    //     - Do NOT enable GL_FRAMEBUFFER_SRGB
+    //glEnable(GL_FRAMEBUFFER_SRGB);
+}
+
+void EndOculusDrawing(void)
+{
+    glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+    
+    ovr_CommitTextureSwapChain(session, buffer.textureChain);
+    
+    ovrLayerHeader *layers = &layer.eyeLayer.Header;
+    ovr_SubmitFrame(session, frameIndex, &layer.viewScaleDesc, &layers, 1);
+
+    // Blit mirror texture to back buffer
+    BlitOculusMirror(session, mirror);
+
+    // Get session status information
+    ovrSessionStatus sessionStatus;
+    ovr_GetSessionStatus(session, &sessionStatus);
+    if (sessionStatus.ShouldQuit) TraceLog(WARNING, "OVR: Session should quit...");
+    if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session);
+}
+#endif
+
 //----------------------------------------------------------------------------------
 // Module specific Functions Definition
 //----------------------------------------------------------------------------------
@@ -2403,7 +2627,7 @@ static void LoadCompressedTexture(unsigned char *data, int width, int height, in
 }
 
 // Load custom shader strings and return program id
-static unsigned int LoadShaderProgram(char *vShaderStr, char *fShaderStr)
+static unsigned int LoadShaderProgram(const char *vShaderStr, const char *fShaderStr)
 {
     unsigned int program = 0;
 
@@ -3341,6 +3565,187 @@ static Color *GenNextMipmap(Color *srcData, int srcWidth, int srcHeight)
 }
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+// Load Oculus required buffers: texture-swap-chain, fbo, texture-depth
+static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height)
+{
+    OculusBuffer buffer;
+    buffer.width = width;
+    buffer.height = height;
+    
+    // Create OVR texture chain
+    ovrTextureSwapChainDesc desc = {};
+    desc.Type = ovrTexture_2D;
+    desc.ArraySize = 1;
+    desc.Width = width;
+    desc.Height = height;
+    desc.MipLevels = 1;
+    desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;   // Requires glEnable(GL_FRAMEBUFFER_SRGB);
+    desc.SampleCount = 1;
+    desc.StaticImage = ovrFalse;
+
+    ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &buffer.textureChain);
+    
+    if (!OVR_SUCCESS(result)) TraceLog(WARNING, "OVR: Failed to create swap textures buffer");
+
+    int textureCount = 0;
+    ovr_GetTextureSwapChainLength(session, buffer.textureChain, &textureCount);
+    
+    if (!OVR_SUCCESS(result) || !textureCount) TraceLog(WARNING, "OVR: Unable to count swap chain textures");
+
+    for (int i = 0; i < textureCount; ++i)
+    {
+        GLuint chainTexId;
+        ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, i, &chainTexId);
+        glBindTexture(GL_TEXTURE_2D, chainTexId);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    }
+    
+    glBindTexture(GL_TEXTURE_2D, 0);
+    
+    /*
+    // Setup framebuffer object (using depth texture)
+    glGenFramebuffers(1, &buffer.fboId);
+    glGenTextures(1, &buffer.depthId);
+    glBindTexture(GL_TEXTURE_2D, buffer.depthId);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, buffer.width, buffer.height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
+    */
+    
+    // Setup framebuffer object (using depth renderbuffer)
+    glGenFramebuffers(1, &buffer.fboId);
+    glGenRenderbuffers(1, &buffer.depthId);
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer.fboId);
+    glBindRenderbuffer(GL_RENDERBUFFER, buffer.depthId);
+    glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, buffer.width, buffer.height);
+    glBindRenderbuffer(GL_RENDERBUFFER, 0);
+    glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, buffer.depthId);
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+
+    return buffer;
+}
+
+// Unload texture required buffers
+static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer)
+{
+    if (buffer.textureChain)
+    {
+        ovr_DestroyTextureSwapChain(session, buffer.textureChain);
+        buffer.textureChain = NULL;
+    }
+
+    if (buffer.depthId != 0) glDeleteTextures(1, &buffer.depthId);
+    if (buffer.fboId != 0) glDeleteFramebuffers(1, &buffer.fboId);
+}
+
+// Load Oculus mirror buffers
+static OculusMirror LoadOculusMirror(ovrSession session, int width, int height)
+{
+    OculusMirror mirror;
+    mirror.width = width;
+    mirror.height = height;
+    
+    ovrMirrorTextureDesc mirrorDesc;
+    memset(&mirrorDesc, 0, sizeof(mirrorDesc));
+    mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
+    mirrorDesc.Width = mirror.width;
+    mirrorDesc.Height = mirror.height;
+    
+    if (!OVR_SUCCESS(ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirror.texture))) TraceLog(WARNING, "Could not create mirror texture");
+
+    glGenFramebuffers(1, &mirror.fboId);
+
+    return mirror;
+}
+
+// Unload Oculus mirror buffers
+static void UnloadOculusMirror(ovrSession session, OculusMirror mirror)
+{
+    if (mirror.fboId != 0) glDeleteFramebuffers(1, &mirror.fboId);
+    if (mirror.texture) ovr_DestroyMirrorTexture(session, mirror.texture);
+}
+
+// Copy Oculus screen buffer to mirror texture
+static void BlitOculusMirror(ovrSession session, OculusMirror mirror)
+{
+    GLuint mirrorTextureId;
+    
+    ovr_GetMirrorTextureBufferGL(session, mirror.texture, &mirrorTextureId);
+    
+    glBindFramebuffer(GL_READ_FRAMEBUFFER, mirror.fboId);
+    glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0);
+    glBlitFramebuffer(0, 0, mirror.width, mirror.height, 0, mirror.height, mirror.width, 0, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+    glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+}
+
+// Init Oculus layer (similar to photoshop)
+static OculusLayer InitOculusLayer(ovrSession session)
+{
+    OculusLayer layer = { 0 };
+    
+    layer.viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
+
+    memset(&layer.eyeLayer, 0, sizeof(ovrLayerEyeFov));
+    layer.eyeLayer.Header.Type = ovrLayerType_EyeFov;
+    layer.eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;
+
+    ovrEyeRenderDesc eyeRenderDescs[2];
+    
+    for (int eye = 0; eye < 2; eye++)
+    {
+        eyeRenderDescs[eye] = ovr_GetRenderDesc(session, eye, hmdDesc.DefaultEyeFov[eye]);
+        ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(eyeRenderDescs[eye].Fov, 0.01f, 10000.0f, ovrProjection_None); //ovrProjection_ClipRangeOpenGL);
+        layer.eyeProjections[eye] = FromOvrMatrix(ovrPerspectiveProjection);      // NOTE: struct ovrMatrix4f { float M[4][4] } --> struct Matrix
+
+        layer.viewScaleDesc.HmdToEyeOffset[eye] = eyeRenderDescs[eye].HmdToEyeOffset;
+        layer.eyeLayer.Fov[eye] = eyeRenderDescs[eye].Fov;
+        
+        ovrSizei eyeSize = ovr_GetFovTextureSize(session, eye, layer.eyeLayer.Fov[eye], 1.0f);
+        layer.eyeLayer.Viewport[eye].Size = eyeSize;
+        layer.eyeLayer.Viewport[eye].Pos.x = layer.width;
+        layer.eyeLayer.Viewport[eye].Pos.y = 0;
+
+        layer.height = eyeSize.h;     //std::max(renderTargetSize.y, (uint32_t)eyeSize.h);
+        layer.width += eyeSize.w;
+    }
+    
+    return layer;
+}
+
+// Convert from Oculus ovrMatrix4f struct to raymath Matrix struct
+static Matrix FromOvrMatrix(ovrMatrix4f ovrmat)
+{
+    Matrix rmat;
+    
+    rmat.m0 = ovrmat.M[0][0];
+    rmat.m1 = ovrmat.M[1][0];
+    rmat.m2 = ovrmat.M[2][0];
+    rmat.m3 = ovrmat.M[3][0];
+    rmat.m4 = ovrmat.M[0][1];
+    rmat.m5 = ovrmat.M[1][1];
+    rmat.m6 = ovrmat.M[2][1];
+    rmat.m7 = ovrmat.M[3][1];
+    rmat.m8 = ovrmat.M[0][2];
+    rmat.m9 = ovrmat.M[1][2];
+    rmat.m10 = ovrmat.M[2][2];
+    rmat.m11 = ovrmat.M[3][2];
+    rmat.m12 = ovrmat.M[0][3];
+    rmat.m13 = ovrmat.M[1][3];
+    rmat.m14 = ovrmat.M[2][3];
+    rmat.m15 = ovrmat.M[3][3];
+    
+    MatrixTranspose(&rmat);
+    
+    return rmat;
+}
+#endif
+
 #if defined(RLGL_STANDALONE)
 // Output a trace log message
 // NOTE: Expected msgType: (0)Info, (1)Error, (2)Warning

+ 11 - 1
examples/oculus_glfw_sample/rlgl.h

@@ -48,7 +48,7 @@
 
 // Choose opengl version here or just define it at compile time: -DGRAPHICS_API_OPENGL_33
 //#define GRAPHICS_API_OPENGL_11     // Only available on PLATFORM_DESKTOP
-//#define GRAPHICS_API_OPENGL_33     // Only available on PLATFORM_DESKTOP
+//#define GRAPHICS_API_OPENGL_33     // Only available on PLATFORM_DESKTOP or Oculus Rift CV1
 //#define GRAPHICS_API_OPENGL_ES2    // Only available on PLATFORM_ANDROID or PLATFORM_RPI or PLATFORM_WEB
 
 // Security check in case no GRAPHICS_API_OPENGL_* defined
@@ -296,6 +296,7 @@ void rlglInit(void);                            // Initialize rlgl (shaders, VAO
 void rlglClose(void);                           // De-init rlgl
 void rlglDraw(void);                            // Draw VAO/VBO
 void rlglInitGraphics(int offsetX, int offsetY, int width, int height);  // Initialize Graphics (OpenGL stuff)
+void rlglLoadExtensions(void *loader);          // Load OpenGL extensions
 
 unsigned int rlglLoadTexture(void *data, int width, int height, int textureFormat, int mipmapCount);    // Load texture in GPU
 RenderTexture2D rlglLoadRenderTexture(int width, int height);   // Load a texture to be used for rendering (fbo with color and depth attachments)
@@ -346,6 +347,15 @@ void DestroyLight(Light light);                                     // Destroy a
 void TraceLog(int msgType, const char *text, ...);
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+void InitOculusDevice(void);                // Init Oculus Rift device
+void CloseOculusDevice(void);               // Close Oculus Rift device
+void UpdateOculusTracking(void);            // Update Oculus Rift tracking (position and orientation)
+void SetOculusMatrix(int eye);              // Set internal projection and modelview matrix depending on eyes tracking data
+void BeginOculusDrawing(void);              // Begin Oculus drawing configuration
+void EndOculusDrawing(void);                // End Oculus drawing process (and desktop mirror)
+#endif
+
 #ifdef __cplusplus
 }
 #endif

+ 27 - 19
examples/oculus_glfw_sample/standard_shader.h

@@ -1,6 +1,6 @@
 
 // Vertex shader definition to embed, no external file required
-const static unsigned char vStandardShaderStr[] = 
+static const char vStandardShaderStr[] = 
 #if defined(GRAPHICS_API_OPENGL_21)
 "#version 120                       \n"
 #elif defined(GRAPHICS_API_OPENGL_ES2)
@@ -37,7 +37,7 @@ const static unsigned char vStandardShaderStr[] =
 "}                                  \n";
 
 // Fragment shader definition to embed, no external file required
-const static unsigned char fStandardShaderStr[] = 
+static const char fStandardShaderStr[] = 
 #if defined(GRAPHICS_API_OPENGL_21)
 "#version 120                       \n"
 #elif defined(GRAPHICS_API_OPENGL_ES2)
@@ -85,13 +85,13 @@ const static unsigned char fStandardShaderStr[] =
 "{\n"
 "    vec3 surfacePos = vec3(modelMatrix*vec4(fragPosition, 1));\n"
 "    vec3 surfaceToLight = l.position - surfacePos;\n"
-"    float brightness = clamp(dot(n, surfaceToLight)/(length(surfaceToLight)*length(n)), 0, 1);\n"
+"    float brightness = clamp(float(dot(n, surfaceToLight)/(length(surfaceToLight)*length(n))), 0.0, 1.0);\n"
 "    float diff = 1.0/dot(surfaceToLight/l.radius, surfaceToLight/l.radius)*brightness*l.intensity;\n"
 "    float spec = 0.0;\n"
 "    if (diff > 0.0)\n"
 "    {\n"
 "        vec3 h = normalize(-l.direction + v);\n"
-"        spec = pow(dot(n, h), 3 + glossiness)*s;\n"
+"        spec = pow(dot(n, h), 3.0 + glossiness)*s;\n"
 "    }\n"
 "    return (diff*l.diffuse.rgb + spec*colSpecular.rgb);\n"
 "}\n"
@@ -99,23 +99,23 @@ const static unsigned char fStandardShaderStr[] =
 "vec3 CalcDirectionalLight(Light l, vec3 n, vec3 v, float s)\n"
 "{\n"
 "    vec3 lightDir = normalize(-l.direction);\n"
-"    float diff = clamp(dot(n, lightDir), 0.0, 1.0)*l.intensity;\n"
+"    float diff = clamp(float(dot(n, lightDir)), 0.0, 1.0)*l.intensity;\n"
 "    float spec = 0.0;\n"
 "    if (diff > 0.0)\n"
 "    {\n"
 "        vec3 h = normalize(lightDir + v);\n"
-"        spec = pow(dot(n, h), 3 + glossiness)*s;\n"
+"        spec = pow(dot(n, h), 3.0 + glossiness)*s;\n"
 "    }\n"
 "    return (diff*l.intensity*l.diffuse.rgb + spec*colSpecular.rgb);\n"
 "}\n"
 "\n"
 "vec3 CalcSpotLight(Light l, vec3 n, vec3 v, float s)\n"
 "{\n"
-"    vec3 surfacePos = vec3(modelMatrix*vec4(fragPosition, 1));\n"
+"    vec3 surfacePos = vec3(modelMatrix*vec4(fragPosition, 1.0));\n"
 "    vec3 lightToSurface = normalize(surfacePos - l.position);\n"
 "    vec3 lightDir = normalize(-l.direction);\n"
-"    float diff = clamp(dot(n, lightDir), 0.0, 1.0)*l.intensity;\n"
-"    float attenuation = clamp(dot(n, lightToSurface), 0.0, 1.0);\n"
+"    float diff = clamp(float(dot(n, lightDir)), 0.0, 1.0)*l.intensity;\n"
+"    float attenuation = clamp(float(dot(n, lightToSurface)), 0.0, 1.0);\n"
 "    attenuation = dot(lightToSurface, -lightDir);\n"
 "    float lightToSurfaceAngle = degrees(acos(attenuation));\n"
 "    if (lightToSurfaceAngle > l.coneAngle) attenuation = 0.0;\n"
@@ -125,37 +125,45 @@ const static unsigned char fStandardShaderStr[] =
 "    if (diffAttenuation > 0.0)\n"
 "    {\n"
 "        vec3 h = normalize(lightDir + v);\n"
-"        spec = pow(dot(n, h), 3 + glossiness)*s;\n"
+"        spec = pow(dot(n, h), 3.0 + glossiness)*s;\n"
 "    }\n"
 "    return (falloff*(diffAttenuation*l.diffuse.rgb + spec*colSpecular.rgb));\n"
 "}\n"
 "\n"
 "void main()\n"
 "{\n"
-"    mat3 normalMatrix = transpose(inverse(mat3(modelMatrix)));\n"
+"    mat3 normalMatrix = mat3(modelMatrix);\n"
 "    vec3 normal = normalize(normalMatrix*fragNormal);\n"
 "    vec3 n = normalize(normal);\n"
 "    vec3 v = normalize(viewDir);\n"
+#if defined(GRAPHICS_API_OPENGL_ES2) || defined(GRAPHICS_API_OPENGL_21)
+"    vec4 texelColor = texture2D(texture0, fragTexCoord);\n"
+#elif defined(GRAPHICS_API_OPENGL_33)
 "    vec4 texelColor = texture(texture0, fragTexCoord);\n"
+#endif
 "    vec3 lighting = colAmbient.rgb;\n"
 "    if (useNormal == 1)\n"
 "    {\n"
+#if defined(GRAPHICS_API_OPENGL_ES2) || defined(GRAPHICS_API_OPENGL_21)
+"        n *= texture2D(texture1, fragTexCoord).rgb;\n"
+#elif defined(GRAPHICS_API_OPENGL_33)
 "        n *= texture(texture1, fragTexCoord).rgb;\n"
+#endif
 "        n = normalize(n);\n"
 "    }\n"
 "    float spec = 1.0;\n"
+#if defined(GRAPHICS_API_OPENGL_ES2) || defined(GRAPHICS_API_OPENGL_21)
+"    if (useSpecular == 1) spec *= normalize(texture2D(texture2, fragTexCoord).r);\n"
+#elif defined(GRAPHICS_API_OPENGL_33)
 "    if (useSpecular == 1) spec *= normalize(texture(texture2, fragTexCoord).r);\n"
+#endif
 "    for (int i = 0; i < lightsCount; i++)\n"
 "    {\n"
 "        if (lights[i].enabled == 1)\n"
 "        {\n"
-"            switch (lights[i].type)\n"
-"            {\n"
-"                case 0: lighting += CalcPointLight(lights[i], n, v, spec); break;\n"
-"                case 1: lighting += CalcDirectionalLight(lights[i], n, v, spec); break;\n"
-"                case 2: lighting += CalcSpotLight(lights[i], n, v, spec); break;\n"
-"                default: break;\n"
-"            }\n"
+"            if(lights[i].type == 0) lighting += CalcPointLight(lights[i], n, v, spec);\n"
+"            else if(lights[i].type == 1) lighting += CalcDirectionalLight(lights[i], n, v, spec);\n"
+"            else if(lights[i].type == 2) lighting += CalcSpotLight(lights[i], n, v, spec);\n"
 "        }\n"
 "    }\n"
 #if defined(GRAPHICS_API_OPENGL_33)
@@ -163,4 +171,4 @@ const static unsigned char fStandardShaderStr[] =
 #elif defined(GRAPHICS_API_OPENGL_ES2) || defined(GRAPHICS_API_OPENGL_21)
 "   gl_FragColor = vec4(texelColor.rgb*lighting*colDiffuse.rgb, texelColor.a*colDiffuse.a); \n"
 #endif
-"}                                                        \n";
+"}\n";

+ 8 - 398
src/core.c

@@ -9,7 +9,7 @@
 *       PLATFORM_ANDROID - Only OpenGL ES 2.0 devices
 *       PLATFORM_RPI - Rapsberry Pi (tested on Raspbian)
 *       PLATFORM_WEB - Emscripten, HTML5
-*       PLATFORM_OCULUS - Oculus Rift CV1 (with desktop mirror)
+*       Oculus Rift CV1 (with desktop mirror) - View [rlgl] module to enable it
 *
 *   On PLATFORM_DESKTOP, the external lib GLFW3 (www.glfw.com) is used to manage graphic
 *   device, OpenGL context and input on multiple operating systems (Windows, Linux, OSX).
@@ -54,18 +54,6 @@
 #include <string.h>         // String function definitions, memset()
 #include <errno.h>          // Macros for reporting and retrieving error conditions through error codes
 
-#if defined(PLATFORM_OCULUS)
-    #define PLATFORM_DESKTOP      // Enable PLATFORM_DESKTOP code-base
-#endif
-
-#if defined(PLATFORM_DESKTOP)
-    #include "external/glad.h"    // GLAD library: Manage OpenGL headers and extensions
-#endif
-
-#if defined(PLATFORM_OCULUS)
-    #include "../examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_CAPI_GL.h"    // Oculus SDK for OpenGL
-#endif
-
 #if defined(PLATFORM_DESKTOP) || defined(PLATFORM_WEB)
     //#define GLFW_INCLUDE_NONE   // Disable the standard OpenGL header inclusion on GLFW3
     #include <GLFW/glfw3.h>       // GLFW3 library: Windows, OpenGL context and Input management
@@ -138,31 +126,7 @@
 //----------------------------------------------------------------------------------
 // Types and Structures Definition
 //----------------------------------------------------------------------------------
-#if defined(PLATFORM_OCULUS)
-typedef struct OculusBuffer {
-    ovrTextureSwapChain textureChain;
-    GLuint depthId;
-    GLuint fboId;
-    int width;
-    int height;
-} OculusBuffer;
-
-typedef struct OculusMirror {
-    ovrMirrorTexture texture;
-    GLuint fboId;
-    int width;
-    int height;
-} OculusMirror;
-
-typedef struct OculusLayer {
-    ovrViewScaleDesc viewScaleDesc;
-    ovrLayerEyeFov eyeLayer;      // layer 0
-    //ovrLayerQuad quadLayer;     // TODO: layer 1: '2D' quad for GUI
-    Matrix eyeProjections[2];
-    int width;
-    int height;
-} OculusLayer;
-#endif
+// ...
 
 //----------------------------------------------------------------------------------
 // Global Variables Definition
@@ -217,17 +181,6 @@ static uint64_t baseTime;               // Base time measure for hi-res timer
 static bool windowShouldClose = false;  // Flag to set window for closing
 #endif
 
-#if defined(PLATFORM_OCULUS)
-// OVR device variables
-static ovrSession session;              // Oculus session (pointer to ovrHmdStruct)
-static ovrHmdDesc hmdDesc;              // Oculus device descriptor parameters
-static ovrGraphicsLuid luid;            // Oculus locally unique identifier for the program (64 bit)
-static OculusLayer layer;               // Oculus drawing layer (similar to photoshop)
-static OculusBuffer buffer;             // Oculus internal buffers (texture chain and fbo)
-static OculusMirror mirror;             // Oculus mirror texture and fbo
-static unsigned int frameIndex = 0;     // Oculus frames counter, used to discard frames from chain
-#endif
-
 static unsigned int displayWidth, displayHeight;     // Display width and height (monitor, device-screen, LCD, ...)
 static int screenWidth, screenHeight;       // Screen width and height (used render area)
 static int renderWidth, renderHeight;       // Framebuffer width and height (render area)
@@ -237,7 +190,6 @@ static int renderOffsetX = 0;               // Offset X from render area (must b
 static int renderOffsetY = 0;               // Offset Y from render area (must be divided by 2)
 static bool fullscreen = false;             // Fullscreen mode (useful only for PLATFORM_DESKTOP)
 static Matrix downscaleView;                // Matrix to downscale view (in case screen size bigger than display size)
-static Matrix cameraView;                   // Store camera view matrix (required for Oculus Rift)
 
 #if defined(PLATFORM_DESKTOP) || defined(PLATFORM_RPI) || defined(PLATFORM_WEB)
 static const char *windowTitle;             // Window text title...
@@ -336,19 +288,6 @@ static void InitGamepad(void);                          // Init raw gamepad inpu
 static void *GamepadThread(void *arg);                  // Mouse reading thread
 #endif
 
-#if defined(PLATFORM_OCULUS)
-// Oculus Rift functions
-static Matrix FromOvrMatrix(ovrMatrix4f ovrM);
-static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height);
-static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer);
-static void SetOculusBuffer(ovrSession session, OculusBuffer buffer);
-static void UnsetOculusBuffer(OculusBuffer buffer);
-static OculusMirror LoadOculusMirror(ovrSession session, int width, int height);    // Load Oculus mirror buffers
-static void UnloadOculusMirror(ovrSession session, OculusMirror mirror);            // Unload Oculus mirror buffers
-static void BlitOculusMirror(ovrSession session, OculusMirror mirror);
-static OculusLayer InitOculusLayer(ovrSession session);
-#endif
-
 //----------------------------------------------------------------------------------
 // Module Functions Definition - Window and OpenGL Context Functions
 //----------------------------------------------------------------------------------
@@ -397,11 +336,6 @@ void InitWindow(int width, int height, const char *title)
     //emscripten_set_gamepaddisconnected_callback(NULL, 1, EmscriptenInputCallback);
 #endif
 
-#if defined(PLATFORM_OCULUS)
-    // Recenter OVR tracking origin
-    ovr_RecenterTrackingOrigin(session);
-#endif
-
     mousePosition.x = (float)screenWidth/2.0f;
     mousePosition.y = (float)screenHeight/2.0f;
 
@@ -516,64 +450,6 @@ void CloseWindow(void)
     TraceLog(INFO, "Window closed successfully");
 }
 
-#if defined(PLATFORM_OCULUS)
-// Init Oculus Rift device
-// NOTE: Device initialization should be done before window creation?
-void InitOculusDevice(void)
-{
-    // Initialize Oculus device
-    ovrResult result = ovr_Initialize(NULL);
-    if (OVR_FAILURE(result)) TraceLog(WARNING, "OVR: Could not initialize Oculus device");
-
-    result = ovr_Create(&session, &luid);
-    if (OVR_FAILURE(result))
-    {
-        TraceLog(WARNING, "OVR: Could not create Oculus session");
-        ovr_Shutdown();
-    }
-
-    hmdDesc = ovr_GetHmdDesc(session);
-    
-    TraceLog(INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
-    TraceLog(INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
-    TraceLog(INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
-    TraceLog(INFO, "OVR: Product Type: %i", hmdDesc.Type);
-    //TraceLog(INFO, "OVR: Serial Number: %s", hmdDesc.SerialNumber);
-    TraceLog(INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
-    
-    // NOTE: Oculus mirror is set to defined screenWidth and screenHeight...
-    // ...ideally, it should be (hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2)
-    
-    // Initialize Oculus Buffers
-    layer = InitOculusLayer(session);   
-    buffer = LoadOculusBuffer(session, layer.width, layer.height);
-    mirror = LoadOculusMirror(session, screenWidth, screenHeight);
-    layer.eyeLayer.ColorTexture[0] = buffer.textureChain;     //SetOculusLayerTexture(eyeLayer, buffer.textureChain);
-}
-
-// Close Oculus Rift device
-void CloseOculusDevice(void)
-{
-    UnloadOculusMirror(session, mirror);    // Unload Oculus mirror buffer
-    UnloadOculusBuffer(session, buffer);    // Unload Oculus texture buffers
-
-    ovr_Destroy(session);   // Free Oculus session data
-    ovr_Shutdown();         // Close Oculus device connection
-}
-
-// Update Oculus Rift tracking (position and orientation)
-void UpdateOculusTracking(void)
-{
-    frameIndex++;
-
-    ovrPosef eyePoses[2];
-    ovr_GetEyePoses(session, frameIndex, ovrTrue, layer.viewScaleDesc.HmdToEyeOffset, eyePoses, &layer.eyeLayer.SensorSampleTime);
-    
-    layer.eyeLayer.RenderPose[0] = eyePoses[0];
-    layer.eyeLayer.RenderPose[1] = eyePoses[1];
-}
-#endif
-
 // Detect if KEY_ESCAPE pressed or Close icon pressed
 bool WindowShouldClose(void)
 {
@@ -642,10 +518,6 @@ void BeginDrawing(void)
     currentTime = GetTime();            // Number of elapsed seconds since InitTimer() was called
     updateTime = currentTime - previousTime;
     previousTime = currentTime;
-    
-#if defined(PLATFORM_OCULUS)
-    SetOculusBuffer(session, buffer);
-#endif
 
     rlClearScreenBuffers();             // Clear current framebuffers
     rlLoadIdentity();                   // Reset current matrix (MODELVIEW)
@@ -658,49 +530,7 @@ void BeginDrawing(void)
 // End canvas drawing and Swap Buffers (Double Buffering)
 void EndDrawing(void)
 {
-#if defined(PLATFORM_OCULUS)
-    for (int eye = 0; eye < 2; eye++)
-    {
-        rlViewport(layer.eyeLayer.Viewport[eye].Pos.x, layer.eyeLayer.Viewport[eye].Pos.y, layer.eyeLayer.Viewport[eye].Size.w, layer.eyeLayer.Viewport[eye].Size.h);
-
-        Quaternion eyeRPose = (Quaternion){ layer.eyeLayer.RenderPose[eye].Orientation.x, 
-                                            layer.eyeLayer.RenderPose[eye].Orientation.y, 
-                                            layer.eyeLayer.RenderPose[eye].Orientation.z, 
-                                            layer.eyeLayer.RenderPose[eye].Orientation.w };
-        QuaternionInvert(&eyeRPose);
-        Matrix eyeOrientation = QuaternionToMatrix(eyeRPose);
-        Matrix eyeTranslation = MatrixTranslate(-layer.eyeLayer.RenderPose[eye].Position.x, 
-                                                -layer.eyeLayer.RenderPose[eye].Position.y, 
-                                                -layer.eyeLayer.RenderPose[eye].Position.z);
-
-        Matrix eyeView = MatrixMultiply(eyeTranslation, eyeOrientation);
-        Matrix modelEyeView = MatrixMultiply(cameraView, eyeView);  // Using internal camera modelview matrix
-
-        SetMatrixModelview(modelEyeView);
-        SetMatrixProjection(layer.eyeProjections[eye]);
-#endif
-    
-        rlglDraw();                     // Draw Buffers (Only OpenGL 3+ and ES2)
-
-#if defined(PLATFORM_OCULUS)
-    }
-    
-    UnsetOculusBuffer(buffer);
-    
-    ovr_CommitTextureSwapChain(session, buffer.textureChain);
-    
-    ovrLayerHeader *layers = &layer.eyeLayer.Header;
-    ovr_SubmitFrame(session, frameIndex, &layer.viewScaleDesc, &layers, 1);
-
-    // Blit mirror texture to back buffer
-    BlitOculusMirror(session, mirror);
-
-    // Get session status information
-    ovrSessionStatus sessionStatus;
-    ovr_GetSessionStatus(session, &sessionStatus);
-    if (sessionStatus.ShouldQuit) TraceLog(WARNING, "OVR: Session should quit...");
-    if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session);
-#endif
+    rlglDraw();                     // Draw Buffers (Only OpenGL 3+ and ES2)
 
     SwapBuffers();                  // Copy back buffer to front buffer
     PollInputEvents();              // Poll user events
@@ -772,7 +602,7 @@ void Begin3dMode(Camera camera)
     rlLoadIdentity();                   // Reset current matrix (MODELVIEW)
 
     // Setup Camera view
-    cameraView = MatrixLookAt(camera.position, camera.target, camera.up);
+    Matrix cameraView = MatrixLookAt(camera.position, camera.target, camera.up);
     rlMultMatrixf(MatrixToFloat(cameraView));      // Multiply MODELVIEW matrix by view matrix (camera)
     
     rlEnableDepthTest();                // Enable DEPTH_TEST for 3D
@@ -1742,24 +1572,12 @@ static void InitDisplay(int width, int height)
 #endif
 
     glfwMakeContextCurrent(window);
-#if defined(PLATFORM_OCULUS)
-    glfwSwapInterval(0);
-#endif
+    glfwSwapInterval(0);                // Disable VSync by default
 
 #if defined(PLATFORM_DESKTOP)
-    // Load OpenGL 3.3 extensions using GLAD
-    if (rlGetVersion() == OPENGL_33)
-    {
-        // NOTE: glad is generated and contains only required OpenGL 3.3 Core extensions
-        if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) TraceLog(WARNING, "GLAD: Cannot load OpenGL extensions");
-        else TraceLog(INFO, "GLAD: OpenGL extensions loaded successfully");
-
-        if (GLAD_GL_VERSION_3_3) TraceLog(INFO, "OpenGL 3.3 Core profile supported");
-        else TraceLog(ERROR, "OpenGL 3.3 Core profile not supported");
-        
-        // With GLAD, we can check if an extension is supported using the GLAD_GL_xxx booleans
-        //if (GLAD_GL_ARB_vertex_array_object) // Use GL_ARB_vertex_array_object
-    }
+    // Load OpenGL 3.3 extensions
+    // NOTE: GLFW loader function is passed as parameter
+    rlglLoadExtensions(glfwGetProcAddress);
 #endif
     
     // Enables GPU v-sync, so frames are not limited to screen refresh rate (60Hz -> 60 FPS)
@@ -2954,214 +2772,6 @@ static void *GamepadThread(void *arg)
 }
 #endif
 
-
-#if defined(PLATFORM_OCULUS)
-// Convert from Oculus ovrMatrix4f struct to raymath Matrix struct
-static Matrix FromOvrMatrix(ovrMatrix4f ovrmat)
-{
-    Matrix rmat;
-    
-    rmat.m0 = ovrmat.M[0][0];
-    rmat.m1 = ovrmat.M[1][0];
-    rmat.m2 = ovrmat.M[2][0];
-    rmat.m3 = ovrmat.M[3][0];
-    rmat.m4 = ovrmat.M[0][1];
-    rmat.m5 = ovrmat.M[1][1];
-    rmat.m6 = ovrmat.M[2][1];
-    rmat.m7 = ovrmat.M[3][1];
-    rmat.m8 = ovrmat.M[0][2];
-    rmat.m9 = ovrmat.M[1][2];
-    rmat.m10 = ovrmat.M[2][2];
-    rmat.m11 = ovrmat.M[3][2];
-    rmat.m12 = ovrmat.M[0][3];
-    rmat.m13 = ovrmat.M[1][3];
-    rmat.m14 = ovrmat.M[2][3];
-    rmat.m15 = ovrmat.M[3][3];
-    
-    MatrixTranspose(&rmat);
-    
-    return rmat;
-}
-
-// Load Oculus required buffers: texture-swap-chain, fbo, texture-depth
-static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height)
-{
-    OculusBuffer buffer;
-    buffer.width = width;
-    buffer.height = height;
-    
-    // Create OVR texture chain
-    ovrTextureSwapChainDesc desc = {};
-    desc.Type = ovrTexture_2D;
-    desc.ArraySize = 1;
-    desc.Width = width;
-    desc.Height = height;
-    desc.MipLevels = 1;
-    desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;   // Requires glEnable(GL_FRAMEBUFFER_SRGB);
-    desc.SampleCount = 1;
-    desc.StaticImage = ovrFalse;
-
-    ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &buffer.textureChain);
-    
-    if (!OVR_SUCCESS(result)) TraceLog(WARNING, "OVR: Failed to create swap textures buffer");
-
-    int textureCount = 0;
-    ovr_GetTextureSwapChainLength(session, buffer.textureChain, &textureCount);
-    
-    if (!OVR_SUCCESS(result) || !textureCount) TraceLog(WARNING, "OVR: Unable to count swap chain textures");
-
-    for (int i = 0; i < textureCount; ++i)
-    {
-        GLuint chainTexId;
-        ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, i, &chainTexId);
-        glBindTexture(GL_TEXTURE_2D, chainTexId);
-        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
-        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
-        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
-        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-    }
-    
-    glBindTexture(GL_TEXTURE_2D, 0);
-    
-    /*
-    // Setup framebuffer object (using depth texture)
-    glGenFramebuffers(1, &buffer.fboId);
-    glGenTextures(1, &buffer.depthId);
-    glBindTexture(GL_TEXTURE_2D, buffer.depthId);
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
-    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, buffer.width, buffer.height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
-    */
-    
-    // Setup framebuffer object (using depth renderbuffer)
-    glGenFramebuffers(1, &buffer.fboId);
-    glGenRenderbuffers(1, &buffer.depthId);
-    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer.fboId);
-    glBindRenderbuffer(GL_RENDERBUFFER, buffer.depthId);
-    glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, buffer.width, buffer.height);
-    glBindRenderbuffer(GL_RENDERBUFFER, 0);
-    glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, buffer.depthId);
-    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
-
-    return buffer;
-}
-
-// Unload texture required buffers
-static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer)
-{
-    if (buffer.textureChain)
-    {
-        ovr_DestroyTextureSwapChain(session, buffer.textureChain);
-        buffer.textureChain = NULL;
-    }
-
-    if (buffer.depthId != 0) glDeleteTextures(1, &buffer.depthId);
-    if (buffer.fboId != 0) glDeleteFramebuffers(1, &buffer.fboId);
-}
-
-// Set current Oculus buffer
-static void SetOculusBuffer(ovrSession session, OculusBuffer buffer)
-{
-    GLuint currentTexId;
-    int currentIndex;
-    
-    ovr_GetTextureSwapChainCurrentIndex(session, buffer.textureChain, &currentIndex);
-    ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, currentIndex, &currentTexId);
-
-    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer.fboId);
-    glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, currentTexId, 0);
-    //glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, buffer.depthId, 0);    // Already binded
-
-    //glViewport(0, 0, buffer.width, buffer.height);        // Useful if rendering to separate framebuffers (every eye)
-    //glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
-    
-    // Required if OculusBuffer format is OVR_FORMAT_R8G8B8A8_UNORM_SRGB
-    glEnable(GL_FRAMEBUFFER_SRGB);
-}
-
-// Unset Oculus buffer
-static void UnsetOculusBuffer(OculusBuffer buffer)
-{
-    glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
-    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
-}
-
-// Load Oculus mirror buffers
-static OculusMirror LoadOculusMirror(ovrSession session, int width, int height)
-{
-    OculusMirror mirror;
-    mirror.width = width;
-    mirror.height = height;
-    
-    ovrMirrorTextureDesc mirrorDesc;
-    memset(&mirrorDesc, 0, sizeof(mirrorDesc));
-    mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
-    mirrorDesc.Width = mirror.width;
-    mirrorDesc.Height = mirror.height;
-    
-    if (!OVR_SUCCESS(ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirror.texture))) TraceLog(WARNING, "Could not create mirror texture");
-
-    glGenFramebuffers(1, &mirror.fboId);
-
-    return mirror;
-}
-
-// Unload Oculus mirror buffers
-static void UnloadOculusMirror(ovrSession session, OculusMirror mirror)
-{
-    if (mirror.fboId != 0) glDeleteFramebuffers(1, &mirror.fboId);
-    if (mirror.texture) ovr_DestroyMirrorTexture(session, mirror.texture);
-}
-
-static void BlitOculusMirror(ovrSession session, OculusMirror mirror)
-{
-    GLuint mirrorTextureId;
-    
-    ovr_GetMirrorTextureBufferGL(session, mirror.texture, &mirrorTextureId);
-    
-    glBindFramebuffer(GL_READ_FRAMEBUFFER, mirror.fboId);
-    glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0);
-    glBlitFramebuffer(0, 0, mirror.width, mirror.height, 0, mirror.height, mirror.width, 0, GL_COLOR_BUFFER_BIT, GL_NEAREST);
-    glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
-}
-
-// Requires: session, hmdDesc
-static OculusLayer InitOculusLayer(ovrSession session)
-{
-    OculusLayer layer = { 0 };
-    
-    layer.viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
-
-    memset(&layer.eyeLayer, 0, sizeof(ovrLayerEyeFov));
-    layer.eyeLayer.Header.Type = ovrLayerType_EyeFov;
-    layer.eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;
-
-    ovrEyeRenderDesc eyeRenderDescs[2];
-    
-    for (int eye = 0; eye < 2; eye++)
-    {
-        eyeRenderDescs[eye] = ovr_GetRenderDesc(session, eye, hmdDesc.DefaultEyeFov[eye]);
-        ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(eyeRenderDescs[eye].Fov, 0.01f, 10000.0f, ovrProjection_None); //ovrProjection_ClipRangeOpenGL);
-        layer.eyeProjections[eye] = FromOvrMatrix(ovrPerspectiveProjection);      // NOTE: struct ovrMatrix4f { float M[4][4] } --> struct Matrix
-
-        layer.viewScaleDesc.HmdToEyeOffset[eye] = eyeRenderDescs[eye].HmdToEyeOffset;
-        layer.eyeLayer.Fov[eye] = eyeRenderDescs[eye].Fov;
-        
-        ovrSizei eyeSize = ovr_GetFovTextureSize(session, eye, layer.eyeLayer.Fov[eye], 1.0f);
-        layer.eyeLayer.Viewport[eye].Size = eyeSize;
-        layer.eyeLayer.Viewport[eye].Pos.x = layer.width;
-        layer.eyeLayer.Viewport[eye].Pos.y = 0;
-
-        layer.height = eyeSize.h;     //std::max(renderTargetSize.y, (uint32_t)eyeSize.h);
-        layer.width += eyeSize.w;
-    }
-    
-    return layer;
-}
-#endif
-
 // Plays raylib logo appearing animation
 static void LogoAnimation(void)
 {

+ 196 - 0
src/external/OculusSDK/LibOVR/Include/Extras/OVR_CAPI_Util.h

@@ -0,0 +1,196 @@
+/********************************************************************************//**
+\file      OVR_CAPI_Util.h
+\brief     This header provides LibOVR utility function declarations
+\copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_CAPI_Util_h
+#define OVR_CAPI_Util_h
+
+
+#include "../OVR_CAPI.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/// Enumerates modifications to the projection matrix based on the application's needs.
+///
+/// \see ovrMatrix4f_Projection
+///
+typedef enum ovrProjectionModifier_
+{
+    /// Use for generating a default projection matrix that is:
+    /// * Right-handed.
+    /// * Near depth values stored in the depth buffer are smaller than far depth values.
+    /// * Both near and far are explicitly defined.
+    /// * With a clipping range that is (0 to w).
+    ovrProjection_None = 0x00,
+
+    /// Enable if using left-handed transformations in your application.
+    ovrProjection_LeftHanded = 0x01,
+
+    /// After the projection transform is applied, far values stored in the depth buffer will be less than closer depth values.
+    /// NOTE: Enable only if the application is using a floating-point depth buffer for proper precision.
+    ovrProjection_FarLessThanNear = 0x02,
+
+    /// When this flag is used, the zfar value pushed into ovrMatrix4f_Projection() will be ignored
+    /// NOTE: Enable only if ovrProjection_FarLessThanNear is also enabled where the far clipping plane will be pushed to infinity.
+    ovrProjection_FarClipAtInfinity = 0x04,
+
+    /// Enable if the application is rendering with OpenGL and expects a projection matrix with a clipping range of (-w to w).
+    /// Ignore this flag if your application already handles the conversion from D3D range (0 to w) to OpenGL.
+    ovrProjection_ClipRangeOpenGL = 0x08,
+} ovrProjectionModifier;
+
+
+/// Return values for ovr_Detect.
+///
+/// \see ovr_Detect
+///
+typedef struct OVR_ALIGNAS(8) ovrDetectResult_
+{
+    /// Is ovrFalse when the Oculus Service is not running.
+    ///   This means that the Oculus Service is either uninstalled or stopped.
+    ///   IsOculusHMDConnected will be ovrFalse in this case.
+    /// Is ovrTrue when the Oculus Service is running.
+    ///   This means that the Oculus Service is installed and running.
+    ///   IsOculusHMDConnected will reflect the state of the HMD.
+    ovrBool IsOculusServiceRunning;
+
+    /// Is ovrFalse when an Oculus HMD is not detected.
+    ///   If the Oculus Service is not running, this will be ovrFalse.
+    /// Is ovrTrue when an Oculus HMD is detected.
+    ///   This implies that the Oculus Service is also installed and running.
+    ovrBool IsOculusHMDConnected;
+
+    OVR_UNUSED_STRUCT_PAD(pad0, 6) ///< \internal struct padding
+
+} ovrDetectResult;
+
+OVR_STATIC_ASSERT(sizeof(ovrDetectResult) == 8, "ovrDetectResult size mismatch");
+
+
+/// Detects Oculus Runtime and Device Status
+///
+/// Checks for Oculus Runtime and Oculus HMD device status without loading the LibOVRRT
+/// shared library.  This may be called before ovr_Initialize() to help decide whether or
+/// not to initialize LibOVR.
+///
+/// \param[in] timeoutMilliseconds Specifies a timeout to wait for HMD to be attached or 0 to poll.
+///
+/// \return Returns an ovrDetectResult object indicating the result of detection.
+///
+/// \see ovrDetectResult
+///
+OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds);
+
+// On the Windows platform,
+#ifdef _WIN32
+    /// This is the Windows Named Event name that is used to check for HMD connected state.
+    #define OVR_HMD_CONNECTED_EVENT_NAME L"OculusHMDConnected"
+#endif // _WIN32
+
+
+/// Used to generate projection from ovrEyeDesc::Fov.
+///
+/// \param[in] fov Specifies the ovrFovPort to use.
+/// \param[in] znear Distance to near Z limit.
+/// \param[in] zfar Distance to far Z limit.
+/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.
+///
+/// \return Returns the calculated projection matrix.
+/// 
+/// \see ovrProjectionModifier
+///
+OVR_PUBLIC_FUNCTION(ovrMatrix4f) ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags);
+
+
+/// Extracts the required data from the result of ovrMatrix4f_Projection.
+///
+/// \param[in] projection Specifies the project matrix from which to extract ovrTimewarpProjectionDesc.
+/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.
+/// \return Returns the extracted ovrTimewarpProjectionDesc.
+/// \see ovrTimewarpProjectionDesc
+///
+OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc) ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f projection, unsigned int projectionModFlags);
+
+
+/// Generates an orthographic sub-projection.
+///
+/// Used for 2D rendering, Y is down.
+///
+/// \param[in] projection The perspective matrix that the orthographic matrix is derived from.
+/// \param[in] orthoScale Equal to 1.0f / pixelsPerTanAngleAtCenter.
+/// \param[in] orthoDistance Equal to the distance from the camera in meters, such as 0.8m.
+/// \param[in] HmdToEyeOffsetX Specifies the offset of the eye from the center.
+///
+/// \return Returns the calculated projection matrix.
+///
+OVR_PUBLIC_FUNCTION(ovrMatrix4f) ovrMatrix4f_OrthoSubProjection(ovrMatrix4f projection, ovrVector2f orthoScale,
+                                                                float orthoDistance, float HmdToEyeOffsetX);
+
+
+
+/// Computes offset eye poses based on headPose returned by ovrTrackingState.
+///
+/// \param[in] headPose Indicates the HMD position and orientation to use for the calculation.
+/// \param[in] hmdToEyeOffset Can be ovrEyeRenderDesc.HmdToEyeOffset returned from
+///            ovr_GetRenderDesc. For monoscopic rendering, use a vector that is the average 
+///            of the two vectors for both eyes.
+/// \param[out] outEyePoses If outEyePoses are used for rendering, they should be passed to 
+///             ovr_SubmitFrame in ovrLayerEyeFov::RenderPose or ovrLayerEyeFovDepth::RenderPose.
+///
+OVR_PUBLIC_FUNCTION(void) ovr_CalcEyePoses(ovrPosef headPose,
+                                           const ovrVector3f hmdToEyeOffset[2],
+                                           ovrPosef outEyePoses[2]);
+
+
+/// Returns the predicted head pose in outHmdTrackingState and offset eye poses in outEyePoses.
+///
+/// This is a thread-safe function where caller should increment frameIndex with every frame
+/// and pass that index where applicable to functions called on the rendering thread.
+/// Assuming outEyePoses are used for rendering, it should be passed as a part of ovrLayerEyeFov.
+/// The caller does not need to worry about applying HmdToEyeOffset to the returned outEyePoses variables.
+///
+/// \param[in]  hmd Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  frameIndex Specifies the targeted frame index, or 0 to refer to one frame after 
+///             the last time ovr_SubmitFrame was called.
+/// \param[in]  latencyMarker Specifies that this call is the point in time where
+///             the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer
+///             provides "SensorSampleTimestamp", that will override the value stored here.
+/// \param[in]  hmdToEyeOffset Can be ovrEyeRenderDesc.HmdToEyeOffset returned from
+///             ovr_GetRenderDesc. For monoscopic rendering, use a vector that is the average
+///             of the two vectors for both eyes.
+/// \param[out] outEyePoses The predicted eye poses.
+/// \param[out] outSensorSampleTime The time when this function was called. May be NULL, in which case it is ignored.
+///
+OVR_PUBLIC_FUNCTION(void) ovr_GetEyePoses(ovrSession session, long long frameIndex, ovrBool latencyMarker,
+                                             const ovrVector3f hmdToEyeOffset[2],
+                                             ovrPosef outEyePoses[2],
+                                             double* outSensorSampleTime);
+
+
+
+/// Tracking poses provided by the SDK come in a right-handed coordinate system. If an application
+/// is passing in ovrProjection_LeftHanded into ovrMatrix4f_Projection, then it should also use
+/// this function to flip the HMD tracking poses to be left-handed.
+///
+/// While this utility function is intended to convert a left-handed ovrPosef into a right-handed
+/// coordinate system, it will also work for converting right-handed to left-handed since the
+/// flip operation is the same for both cases.
+/// 
+/// \param[in]  inPose that is right-handed
+/// \param[out] outPose that is requested to be left-handed (can be the same pointer to inPose)
+///
+OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+
+#endif // Header include guard

+ 3785 - 0
src/external/OculusSDK/LibOVR/Include/Extras/OVR_Math.h

@@ -0,0 +1,3785 @@
+/********************************************************************************//**
+\file      OVR_Math.h
+\brief     Implementation of 3D primitives such as vectors, matrices.
+\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_Math_h
+#define OVR_Math_h
+
+
+// This file is intended to be independent of the rest of LibOVR and LibOVRKernel and thus 
+// has no #include dependencies on either.
+
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <float.h>
+#include "../OVR_CAPI.h" // Currently required due to a dependence on the ovrFovPort_ declaration.
+
+#if defined(_MSC_VER)
+    #pragma warning(push)
+    #pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+
+#if defined(_MSC_VER)
+    #define OVRMath_sprintf sprintf_s
+#else
+    #define OVRMath_sprintf snprintf
+#endif
+
+
+//-------------------------------------------------------------------------------------
+// ***** OVR_MATH_ASSERT
+//
+// Independent debug break implementation for OVR_Math.h.
+
+#if !defined(OVR_MATH_DEBUG_BREAK)
+    #if defined(_DEBUG)
+        #if defined(_MSC_VER)
+            #define OVR_MATH_DEBUG_BREAK __debugbreak()
+        #else
+            #define OVR_MATH_DEBUG_BREAK __builtin_trap()
+        #endif
+    #else
+        #define OVR_MATH_DEBUG_BREAK ((void)0)
+    #endif
+#endif
+
+
+//-------------------------------------------------------------------------------------
+// ***** OVR_MATH_ASSERT
+//
+// Independent OVR_MATH_ASSERT implementation for OVR_Math.h.
+
+#if !defined(OVR_MATH_ASSERT)
+    #if defined(_DEBUG)
+        #define OVR_MATH_ASSERT(p) if (!(p)) { OVR_MATH_DEBUG_BREAK; }
+    #else
+        #define OVR_MATH_ASSERT(p) ((void)0)
+    #endif
+#endif
+
+
+//-------------------------------------------------------------------------------------
+// ***** OVR_MATH_STATIC_ASSERT
+//
+// Independent OVR_MATH_ASSERT implementation for OVR_Math.h.
+
+#if !defined(OVR_MATH_STATIC_ASSERT)
+    #if defined(__cplusplus) && ((defined(_MSC_VER) && (defined(_MSC_VER) >= 1600)) || defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L))
+        #define OVR_MATH_STATIC_ASSERT static_assert
+    #else
+        #if !defined(OVR_SA_UNUSED)
+            #if defined(__GNUC__) || defined(__clang__)
+                #define OVR_SA_UNUSED __attribute__((unused))
+            #else
+                #define OVR_SA_UNUSED
+            #endif
+            #define OVR_SA_PASTE(a,b) a##b
+            #define OVR_SA_HELP(a,b)  OVR_SA_PASTE(a,b)
+        #endif
+
+        #define OVR_MATH_STATIC_ASSERT(expression, msg) typedef char OVR_SA_HELP(compileTimeAssert, __LINE__) [((expression) != 0) ? 1 : -1] OVR_SA_UNUSED
+    #endif
+#endif
+
+
+
+namespace OVR {
+
+template<class T>
+const T OVRMath_Min(const T a, const T b)
+{ return (a < b) ? a : b; }
+
+template<class T>
+const T OVRMath_Max(const T a, const T b)
+{ return (b < a) ? a : b; }
+
+template<class T>
+void OVRMath_Swap(T& a, T& b) 
+{  T temp(a); a = b; b = temp; }
+
+
+//-------------------------------------------------------------------------------------
+// ***** Constants for 3D world/axis definitions.
+
+// Definitions of axes for coordinate and rotation conversions.
+enum Axis
+{
+    Axis_X = 0, Axis_Y = 1, Axis_Z = 2
+};
+
+// RotateDirection describes the rotation direction around an axis, interpreted as follows:
+//  CW  - Clockwise while looking "down" from positive axis towards the origin.
+//  CCW - Counter-clockwise while looking from the positive axis towards the origin,
+//        which is in the negative axis direction.
+//  CCW is the default for the RHS coordinate system. Oculus standard RHS coordinate
+//  system defines Y up, X right, and Z back (pointing out from the screen). In this
+//  system Rotate_CCW around Z will specifies counter-clockwise rotation in XY plane.
+enum RotateDirection
+{
+    Rotate_CCW = 1,
+    Rotate_CW  = -1 
+};
+
+// Constants for right handed and left handed coordinate systems
+enum HandedSystem
+{
+    Handed_R = 1, Handed_L = -1
+};
+
+// AxisDirection describes which way the coordinate axis points. Used by WorldAxes.
+enum AxisDirection
+{
+    Axis_Up    =  2,
+    Axis_Down  = -2,
+    Axis_Right =  1,
+    Axis_Left  = -1,
+    Axis_In    =  3,
+    Axis_Out   = -3
+};
+
+struct WorldAxes
+{
+    AxisDirection XAxis, YAxis, ZAxis;
+
+    WorldAxes(AxisDirection x, AxisDirection y, AxisDirection z)
+        : XAxis(x), YAxis(y), ZAxis(z) 
+    { OVR_MATH_ASSERT(abs(x) != abs(y) && abs(y) != abs(z) && abs(z) != abs(x));}
+};
+
+} // namespace OVR
+
+
+//------------------------------------------------------------------------------------//
+// ***** C Compatibility Types
+
+// These declarations are used to support conversion between C types used in
+// LibOVR C interfaces and their C++ versions. As an example, they allow passing
+// Vector3f into a function that expects ovrVector3f.
+
+typedef struct ovrQuatf_ ovrQuatf;
+typedef struct ovrQuatd_ ovrQuatd;
+typedef struct ovrSizei_ ovrSizei;
+typedef struct ovrSizef_ ovrSizef;
+typedef struct ovrSized_ ovrSized;
+typedef struct ovrRecti_ ovrRecti;
+typedef struct ovrVector2i_ ovrVector2i;
+typedef struct ovrVector2f_ ovrVector2f;
+typedef struct ovrVector2d_ ovrVector2d;
+typedef struct ovrVector3f_ ovrVector3f;
+typedef struct ovrVector3d_ ovrVector3d;
+typedef struct ovrVector4f_ ovrVector4f;
+typedef struct ovrVector4d_ ovrVector4d;
+typedef struct ovrMatrix2f_ ovrMatrix2f;
+typedef struct ovrMatrix2d_ ovrMatrix2d;
+typedef struct ovrMatrix3f_ ovrMatrix3f;
+typedef struct ovrMatrix3d_ ovrMatrix3d;
+typedef struct ovrMatrix4f_ ovrMatrix4f;
+typedef struct ovrMatrix4d_ ovrMatrix4d;
+typedef struct ovrPosef_ ovrPosef;
+typedef struct ovrPosed_ ovrPosed;
+typedef struct ovrPoseStatef_ ovrPoseStatef;
+typedef struct ovrPoseStated_ ovrPoseStated;
+
+namespace OVR {
+
+// Forward-declare our templates.
+template<class T> class Quat;
+template<class T> class Size;
+template<class T> class Rect;
+template<class T> class Vector2;
+template<class T> class Vector3;
+template<class T> class Vector4;
+template<class T> class Matrix2;
+template<class T> class Matrix3;
+template<class T> class Matrix4;
+template<class T> class Pose;
+template<class T> class PoseState;
+
+// CompatibleTypes::Type is used to lookup a compatible C-version of a C++ class.
+template<class C>
+struct CompatibleTypes
+{    
+    // Declaration here seems necessary for MSVC; specializations are
+    // used instead.
+    typedef struct {} Type;
+};
+
+// Specializations providing CompatibleTypes::Type value.
+template<> struct CompatibleTypes<Quat<float> >     { typedef ovrQuatf Type; };
+template<> struct CompatibleTypes<Quat<double> >    { typedef ovrQuatd Type; };
+template<> struct CompatibleTypes<Matrix2<float> >  { typedef ovrMatrix2f Type; };
+template<> struct CompatibleTypes<Matrix2<double> > { typedef ovrMatrix2d Type; };
+template<> struct CompatibleTypes<Matrix3<float> >  { typedef ovrMatrix3f Type; };
+template<> struct CompatibleTypes<Matrix3<double> > { typedef ovrMatrix3d Type; };
+template<> struct CompatibleTypes<Matrix4<float> >  { typedef ovrMatrix4f Type; };
+template<> struct CompatibleTypes<Matrix4<double> > { typedef ovrMatrix4d Type; };
+template<> struct CompatibleTypes<Size<int> >       { typedef ovrSizei Type; };
+template<> struct CompatibleTypes<Size<float> >     { typedef ovrSizef Type; };
+template<> struct CompatibleTypes<Size<double> >    { typedef ovrSized Type; };
+template<> struct CompatibleTypes<Rect<int> >       { typedef ovrRecti Type; };
+template<> struct CompatibleTypes<Vector2<int> >    { typedef ovrVector2i Type; };
+template<> struct CompatibleTypes<Vector2<float> >  { typedef ovrVector2f Type; };
+template<> struct CompatibleTypes<Vector2<double> > { typedef ovrVector2d Type; };
+template<> struct CompatibleTypes<Vector3<float> >  { typedef ovrVector3f Type; };
+template<> struct CompatibleTypes<Vector3<double> > { typedef ovrVector3d Type; };
+template<> struct CompatibleTypes<Vector4<float> >  { typedef ovrVector4f Type; };
+template<> struct CompatibleTypes<Vector4<double> > { typedef ovrVector4d Type; };
+template<> struct CompatibleTypes<Pose<float> >     { typedef ovrPosef Type; };
+template<> struct CompatibleTypes<Pose<double> >    { typedef ovrPosed Type; };
+
+//------------------------------------------------------------------------------------//
+// ***** Math
+//
+// Math class contains constants and functions. This class is a template specialized
+// per type, with Math<float> and Math<double> being distinct.
+template<class T>
+class Math
+{  
+public:
+    // By default, support explicit conversion to float. This allows Vector2<int> to
+    // compile, for example.
+    typedef float OtherFloatType;
+
+    static int Tolerance() { return 0; }  // Default value so integer types compile
+};
+
+
+//------------------------------------------------------------------------------------//
+// ***** double constants
+#define MATH_DOUBLE_PI              3.14159265358979323846
+#define MATH_DOUBLE_TWOPI           (2*MATH_DOUBLE_PI)
+#define MATH_DOUBLE_PIOVER2         (0.5*MATH_DOUBLE_PI)
+#define MATH_DOUBLE_PIOVER4         (0.25*MATH_DOUBLE_PI)
+#define MATH_FLOAT_MAXVALUE             (FLT_MAX) 
+
+#define MATH_DOUBLE_RADTODEGREEFACTOR (360.0 / MATH_DOUBLE_TWOPI)
+#define MATH_DOUBLE_DEGREETORADFACTOR (MATH_DOUBLE_TWOPI / 360.0)
+
+#define MATH_DOUBLE_E               2.71828182845904523536
+#define MATH_DOUBLE_LOG2E           1.44269504088896340736
+#define MATH_DOUBLE_LOG10E          0.434294481903251827651
+#define MATH_DOUBLE_LN2             0.693147180559945309417
+#define MATH_DOUBLE_LN10            2.30258509299404568402
+
+#define MATH_DOUBLE_SQRT2           1.41421356237309504880
+#define MATH_DOUBLE_SQRT1_2         0.707106781186547524401
+
+#define MATH_DOUBLE_TOLERANCE       1e-12   // a default number for value equality tolerance: about 4500*Epsilon;
+#define MATH_DOUBLE_SINGULARITYRADIUS 1e-12 // about 1-cos(.0001 degree), for gimbal lock numerical problems    
+
+//------------------------------------------------------------------------------------//
+// ***** float constants
+#define MATH_FLOAT_PI               float(MATH_DOUBLE_PI)
+#define MATH_FLOAT_TWOPI            float(MATH_DOUBLE_TWOPI)
+#define MATH_FLOAT_PIOVER2          float(MATH_DOUBLE_PIOVER2)
+#define MATH_FLOAT_PIOVER4          float(MATH_DOUBLE_PIOVER4)
+
+#define MATH_FLOAT_RADTODEGREEFACTOR float(MATH_DOUBLE_RADTODEGREEFACTOR)
+#define MATH_FLOAT_DEGREETORADFACTOR float(MATH_DOUBLE_DEGREETORADFACTOR)
+
+#define MATH_FLOAT_E                float(MATH_DOUBLE_E)
+#define MATH_FLOAT_LOG2E            float(MATH_DOUBLE_LOG2E)
+#define MATH_FLOAT_LOG10E           float(MATH_DOUBLE_LOG10E)
+#define MATH_FLOAT_LN2              float(MATH_DOUBLE_LN2)
+#define MATH_FLOAT_LN10             float(MATH_DOUBLE_LN10)
+
+#define MATH_FLOAT_SQRT2            float(MATH_DOUBLE_SQRT2)
+#define MATH_FLOAT_SQRT1_2          float(MATH_DOUBLE_SQRT1_2)
+
+#define MATH_FLOAT_TOLERANCE        1e-5f   // a default number for value equality tolerance: 1e-5, about 84*EPSILON;
+#define MATH_FLOAT_SINGULARITYRADIUS 1e-7f  // about 1-cos(.025 degree), for gimbal lock numerical problems   
+
+
+
+// Single-precision Math constants class.
+template<>
+class Math<float>
+{
+public:
+     typedef double OtherFloatType;
+
+    static inline float Tolerance()         { return MATH_FLOAT_TOLERANCE; }; // a default number for value equality tolerance
+    static inline float SingularityRadius() { return MATH_FLOAT_SINGULARITYRADIUS; };    // for gimbal lock numerical problems    
+};
+
+// Double-precision Math constants class
+template<>
+class Math<double>
+{
+public:
+    typedef float OtherFloatType;
+
+    static inline double Tolerance()         { return MATH_DOUBLE_TOLERANCE; }; // a default number for value equality tolerance
+    static inline double SingularityRadius() { return MATH_DOUBLE_SINGULARITYRADIUS; };    // for gimbal lock numerical problems    
+};
+
+typedef Math<float>  Mathf;
+typedef Math<double> Mathd;
+
+// Conversion functions between degrees and radians
+// (non-templated to ensure passing int arguments causes warning)
+inline float  RadToDegree(float rad)         { return rad * MATH_FLOAT_RADTODEGREEFACTOR; }
+inline double RadToDegree(double rad)        { return rad * MATH_DOUBLE_RADTODEGREEFACTOR; }
+
+inline float  DegreeToRad(float deg)         { return deg * MATH_FLOAT_DEGREETORADFACTOR; }
+inline double DegreeToRad(double deg)        { return deg * MATH_DOUBLE_DEGREETORADFACTOR; }
+
+// Square function
+template<class T>
+inline T Sqr(T x) { return x*x; }
+
+// Sign: returns 0 if x == 0, -1 if x < 0, and 1 if x > 0
+template<class T>
+inline T Sign(T x) { return (x != T(0)) ? (x < T(0) ? T(-1) : T(1)) : T(0); }
+
+// Numerically stable acos function
+inline float Acos(float x)   { return (x > 1.0f) ? 0.0f : (x < -1.0f) ? MATH_FLOAT_PI : acosf(x); }
+inline double Acos(double x) { return (x > 1.0) ? 0.0 : (x < -1.0) ? MATH_DOUBLE_PI : acos(x); }
+
+// Numerically stable asin function
+inline float Asin(float x)   { return (x > 1.0f) ? MATH_FLOAT_PIOVER2 : (x < -1.0f) ? -MATH_FLOAT_PIOVER2 : asinf(x); }
+inline double Asin(double x) { return (x > 1.0) ? MATH_DOUBLE_PIOVER2 : (x < -1.0) ? -MATH_DOUBLE_PIOVER2 : asin(x); }
+
+#if defined(_MSC_VER)
+    inline int isnan(double x) { return ::_isnan(x); }
+#elif !defined(isnan) // Some libraries #define isnan.
+    inline int isnan(double x) { return ::isnan(x); }
+#endif
+
+template<class T>
+class Quat;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Vector2<>
+
+// Vector2f (Vector2d) represents a 2-dimensional vector or point in space,
+// consisting of coordinates x and y
+
+template<class T>
+class Vector2
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 2;
+
+    T x, y;
+
+    Vector2() : x(0), y(0) { }
+    Vector2(T x_, T y_) : x(x_), y(y_) { }
+    explicit Vector2(T s) : x(s), y(s) { }
+    explicit Vector2(const Vector2<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y) { }
+
+    static Vector2 Zero() { return Vector2(0, 0); }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes<Vector2<T> >::Type CompatibleType;
+
+    Vector2(const CompatibleType& s) : x(s.x), y(s.y) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Vector2<T>) == sizeof(CompatibleType), "sizeof(Vector2<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+        
+    bool     operator== (const Vector2& b) const  { return x == b.x && y == b.y; }
+    bool     operator!= (const Vector2& b) const  { return x != b.x || y != b.y; }
+             
+    Vector2  operator+  (const Vector2& b) const  { return Vector2(x + b.x, y + b.y); }
+    Vector2& operator+= (const Vector2& b)        { x += b.x; y += b.y; return *this; }
+    Vector2  operator-  (const Vector2& b) const  { return Vector2(x - b.x, y - b.y); }
+    Vector2& operator-= (const Vector2& b)        { x -= b.x; y -= b.y; return *this; }
+    Vector2  operator- () const                   { return Vector2(-x, -y); }
+
+    // Scalar multiplication/division scales vector.
+    Vector2  operator*  (T s) const               { return Vector2(x*s, y*s); }
+    Vector2& operator*= (T s)                     { x *= s; y *= s; return *this; }
+
+    Vector2  operator/  (T s) const               { T rcp = T(1)/s;
+                                                    return Vector2(x*rcp, y*rcp); }
+    Vector2& operator/= (T s)                     { T rcp = T(1)/s;
+                                                    x *= rcp; y *= rcp;
+                                                    return *this; }
+
+    static Vector2  Min(const Vector2& a, const Vector2& b) { return Vector2((a.x < b.x) ? a.x : b.x,
+                                                                             (a.y < b.y) ? a.y : b.y); }
+    static Vector2  Max(const Vector2& a, const Vector2& b) { return Vector2((a.x > b.x) ? a.x : b.x,
+                                                                             (a.y > b.y) ? a.y : b.y); }
+
+    Vector2 Clamped(T maxMag) const
+    {
+        T magSquared = LengthSq();
+        if (magSquared <= Sqr(maxMag))
+            return *this;
+        else
+            return *this * (maxMag / sqrt(magSquared));
+    }
+
+    // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance.
+    bool IsEqual(const Vector2& b, T tolerance =Math<T>::Tolerance()) const
+    {
+        return (fabs(b.x-x) <= tolerance) &&
+               (fabs(b.y-y) <= tolerance);
+    }
+    bool Compare(const Vector2& b, T tolerance = Math<T>::Tolerance()) const 
+    {
+        return IsEqual(b, tolerance);
+    }
+
+    // Access element by index
+    T& operator[] (int idx)
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 2);
+        return *(&x + idx);
+    }
+    const T& operator[] (int idx) const
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 2);
+        return *(&x + idx);
+    }
+
+    // Entry-wise product of two vectors
+    Vector2    EntrywiseMultiply(const Vector2& b) const    { return Vector2(x * b.x, y * b.y);}
+
+
+    // Multiply and divide operators do entry-wise math. Used Dot() for dot product.
+    Vector2  operator*  (const Vector2& b) const        { return Vector2(x * b.x,  y * b.y); }
+    Vector2  operator/  (const Vector2& b) const        { return Vector2(x / b.x,  y / b.y); }
+
+    // Dot product
+    // Used to calculate angle q between two vectors among other things,
+    // as (A dot B) = |a||b|cos(q).
+    T        Dot(const Vector2& b) const                 { return x*b.x + y*b.y; }
+
+    // Returns the angle from this vector to b, in radians.
+    T       Angle(const Vector2& b) const        
+    { 
+        T div = LengthSq()*b.LengthSq();
+        OVR_MATH_ASSERT(div != T(0));
+        T result = Acos((this->Dot(b))/sqrt(div));
+        return result;
+    }
+
+    // Return Length of the vector squared.
+    T       LengthSq() const                     { return (x * x + y * y); }
+
+    // Return vector length.
+    T       Length() const                       { return sqrt(LengthSq()); }
+
+    // Returns squared distance between two points represented by vectors.
+    T       DistanceSq(const Vector2& b) const   { return (*this - b).LengthSq(); }
+
+    // Returns distance between two points represented by vectors.
+    T       Distance(const Vector2& b) const     { return (*this - b).Length(); }
+
+    // Determine if this a unit vector.
+    bool    IsNormalized() const                 { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    // Normalize, convention vector length to 1.    
+    void    Normalize()                          
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    // Returns normalized (unit) version of the vector without modifying itself.
+    Vector2 Normalized() const                   
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s; 
+    }
+
+    // Linearly interpolates from this vector to another.
+    // Factor should be between 0.0 and 1.0, with 0 giving full value to this.
+    Vector2 Lerp(const Vector2& b, T f) const    { return *this*(T(1) - f) + b*f; }
+
+    // Projects this vector onto the argument; in other words,
+    // A.Project(B) returns projection of vector A onto B.
+    Vector2 ProjectTo(const Vector2& b) const    
+    { 
+        T l2 = b.LengthSq();
+        OVR_MATH_ASSERT(l2 != T(0));
+        return b * ( Dot(b) / l2 ); 
+    }
+    
+    // returns true if vector b is clockwise from this vector
+    bool IsClockwise(const Vector2& b) const
+    {
+        return (x * b.y - y * b.x) < 0;
+    }
+};
+
+
+typedef Vector2<float>  Vector2f;
+typedef Vector2<double> Vector2d;
+typedef Vector2<int>    Vector2i;
+
+typedef Vector2<float>  Point2f;
+typedef Vector2<double> Point2d;
+typedef Vector2<int>    Point2i;
+
+//-------------------------------------------------------------------------------------
+// ***** Vector3<> - 3D vector of {x, y, z}
+
+//
+// Vector3f (Vector3d) represents a 3-dimensional vector or point in space,
+// consisting of coordinates x, y and z.
+
+template<class T>
+class Vector3
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 3;
+
+    T x, y, z;
+
+    // FIXME: default initialization of a vector class can be very expensive in a full-blown
+    // application.  A few hundred thousand vector constructions is not unlikely and can add
+    // up to milliseconds of time on processors like the PS3 PPU.
+    Vector3() : x(0), y(0), z(0) { }
+    Vector3(T x_, T y_, T z_ = 0) : x(x_), y(y_), z(z_) { }
+    explicit Vector3(T s) : x(s), y(s), z(s) { }
+    explicit Vector3(const Vector3<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y), z((T)src.z) { }
+
+    static Vector3 Zero() { return Vector3(0, 0, 0); }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes<Vector3<T> >::Type CompatibleType;
+
+    Vector3(const CompatibleType& s) : x(s.x), y(s.y), z(s.z) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Vector3<T>) == sizeof(CompatibleType), "sizeof(Vector3<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    bool     operator== (const Vector3& b) const  { return x == b.x && y == b.y && z == b.z; }
+    bool     operator!= (const Vector3& b) const  { return x != b.x || y != b.y || z != b.z; }
+             
+    Vector3  operator+  (const Vector3& b) const  { return Vector3(x + b.x, y + b.y, z + b.z); }
+    Vector3& operator+= (const Vector3& b)        { x += b.x; y += b.y; z += b.z; return *this; }
+    Vector3  operator-  (const Vector3& b) const  { return Vector3(x - b.x, y - b.y, z - b.z); }
+    Vector3& operator-= (const Vector3& b)        { x -= b.x; y -= b.y; z -= b.z; return *this; }
+    Vector3  operator- () const                   { return Vector3(-x, -y, -z); }
+
+    // Scalar multiplication/division scales vector.
+    Vector3  operator*  (T s) const               { return Vector3(x*s, y*s, z*s); }
+    Vector3& operator*= (T s)                     { x *= s; y *= s; z *= s; return *this; }
+
+    Vector3  operator/  (T s) const               { T rcp = T(1)/s;
+                                                    return Vector3(x*rcp, y*rcp, z*rcp); }
+    Vector3& operator/= (T s)                     { T rcp = T(1)/s;
+                                                    x *= rcp; y *= rcp; z *= rcp;
+                                                    return *this; }
+
+    static Vector3  Min(const Vector3& a, const Vector3& b)
+    {
+        return Vector3((a.x < b.x) ? a.x : b.x,
+                       (a.y < b.y) ? a.y : b.y,
+                       (a.z < b.z) ? a.z : b.z);
+    }
+    static Vector3  Max(const Vector3& a, const Vector3& b)
+    { 
+        return Vector3((a.x > b.x) ? a.x : b.x,
+                       (a.y > b.y) ? a.y : b.y,
+                       (a.z > b.z) ? a.z : b.z);
+    }        
+
+    Vector3 Clamped(T maxMag) const
+    {
+        T magSquared = LengthSq();
+        if (magSquared <= Sqr(maxMag))
+            return *this;
+        else
+            return *this * (maxMag / sqrt(magSquared));
+    }
+
+    // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance.
+    bool IsEqual(const Vector3& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return (fabs(b.x-x) <= tolerance) && 
+               (fabs(b.y-y) <= tolerance) && 
+               (fabs(b.z-z) <= tolerance);
+    }
+    bool Compare(const Vector3& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return IsEqual(b, tolerance);
+    }
+
+    T& operator[] (int idx)
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 3);
+        return *(&x + idx);
+    }
+
+    const T& operator[] (int idx) const
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 3);
+        return *(&x + idx);
+    }
+
+    // Entrywise product of two vectors
+    Vector3    EntrywiseMultiply(const Vector3& b) const    { return Vector3(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z);}
+
+    // Multiply and divide operators do entry-wise math
+    Vector3  operator*  (const Vector3& b) const        { return Vector3(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z); }
+
+    Vector3  operator/  (const Vector3& b) const        { return Vector3(x / b.x, 
+                                                                         y / b.y, 
+                                                                         z / b.z); }
+
+
+    // Dot product
+    // Used to calculate angle q between two vectors among other things,
+    // as (A dot B) = |a||b|cos(q).
+     T      Dot(const Vector3& b) const          { return x*b.x + y*b.y + z*b.z; }
+
+    // Compute cross product, which generates a normal vector.
+    // Direction vector can be determined by right-hand rule: Pointing index finder in
+    // direction a and middle finger in direction b, thumb will point in a.Cross(b).
+    Vector3 Cross(const Vector3& b) const        { return Vector3(y*b.z - z*b.y,
+                                                                  z*b.x - x*b.z,
+                                                                  x*b.y - y*b.x); }
+
+    // Returns the angle from this vector to b, in radians.
+    T       Angle(const Vector3& b) const 
+    {
+        T div = LengthSq()*b.LengthSq();
+        OVR_MATH_ASSERT(div != T(0));
+        T result = Acos((this->Dot(b))/sqrt(div));
+        return result;
+    }
+
+    // Return Length of the vector squared.
+    T       LengthSq() const                     { return (x * x + y * y + z * z); }
+
+    // Return vector length.
+    T       Length() const                       { return (T)sqrt(LengthSq()); }
+
+    // Returns squared distance between two points represented by vectors.
+    T       DistanceSq(Vector3 const& b) const         { return (*this - b).LengthSq(); }
+
+    // Returns distance between two points represented by vectors.
+    T       Distance(Vector3 const& b) const     { return (*this - b).Length(); }
+    
+    bool    IsNormalized() const                 { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    // Normalize, convention vector length to 1.    
+    void    Normalize()                          
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    // Returns normalized (unit) version of the vector without modifying itself.
+    Vector3 Normalized() const                   
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s;
+    }
+
+    // Linearly interpolates from this vector to another.
+    // Factor should be between 0.0 and 1.0, with 0 giving full value to this.
+    Vector3 Lerp(const Vector3& b, T f) const    { return *this*(T(1) - f) + b*f; }
+
+    // Projects this vector onto the argument; in other words,
+    // A.Project(B) returns projection of vector A onto B.
+    Vector3 ProjectTo(const Vector3& b) const    
+    { 
+        T l2 = b.LengthSq();
+        OVR_MATH_ASSERT(l2 != T(0));
+        return b * ( Dot(b) / l2 ); 
+    }
+
+    // Projects this vector onto a plane defined by a normal vector
+    Vector3 ProjectToPlane(const Vector3& normal) const { return *this - this->ProjectTo(normal); }
+};
+
+typedef Vector3<float>  Vector3f;
+typedef Vector3<double> Vector3d;
+typedef Vector3<int32_t>  Vector3i;
+    
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3f) == 3*sizeof(float)), "sizeof(Vector3f) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3d) == 3*sizeof(double)), "sizeof(Vector3d) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3i) == 3*sizeof(int32_t)), "sizeof(Vector3i) failure");
+
+typedef Vector3<float>   Point3f;
+typedef Vector3<double>  Point3d;
+typedef Vector3<int32_t>  Point3i;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Vector4<> - 4D vector of {x, y, z, w}
+
+//
+// Vector4f (Vector4d) represents a 3-dimensional vector or point in space,
+// consisting of coordinates x, y, z and w.
+
+template<class T>
+class Vector4
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 4;
+
+    T x, y, z, w;
+
+    // FIXME: default initialization of a vector class can be very expensive in a full-blown
+    // application.  A few hundred thousand vector constructions is not unlikely and can add
+    // up to milliseconds of time on processors like the PS3 PPU.
+    Vector4() : x(0), y(0), z(0), w(0) { }
+    Vector4(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) { }
+    explicit Vector4(T s) : x(s), y(s), z(s), w(s) { }
+    explicit Vector4(const Vector3<T>& v, const T w_=T(1)) : x(v.x), y(v.y), z(v.z), w(w_) { }
+    explicit Vector4(const Vector4<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) { }
+
+    static Vector4 Zero() { return Vector4(0, 0, 0, 0); }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes< Vector4<T> >::Type CompatibleType;
+
+    Vector4(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Vector4<T>) == sizeof(CompatibleType), "sizeof(Vector4<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    Vector4& operator= (const Vector3<T>& other)  { x=other.x; y=other.y; z=other.z; w=1; return *this; }
+    bool     operator== (const Vector4& b) const  { return x == b.x && y == b.y && z == b.z && w == b.w; }
+    bool     operator!= (const Vector4& b) const  { return x != b.x || y != b.y || z != b.z || w != b.w; }
+             
+    Vector4  operator+  (const Vector4& b) const  { return Vector4(x + b.x, y + b.y, z + b.z, w + b.w); }
+    Vector4& operator+= (const Vector4& b)        { x += b.x; y += b.y; z += b.z; w += b.w; return *this; }
+    Vector4  operator-  (const Vector4& b) const  { return Vector4(x - b.x, y - b.y, z - b.z, w - b.w); }
+    Vector4& operator-= (const Vector4& b)        { x -= b.x; y -= b.y; z -= b.z; w -= b.w; return *this; }
+    Vector4  operator- () const                   { return Vector4(-x, -y, -z, -w); }
+
+    // Scalar multiplication/division scales vector.
+    Vector4  operator*  (T s) const               { return Vector4(x*s, y*s, z*s, w*s); }
+    Vector4& operator*= (T s)                     { x *= s; y *= s; z *= s; w *= s;return *this; }
+
+    Vector4  operator/  (T s) const               { T rcp = T(1)/s;
+                                                    return Vector4(x*rcp, y*rcp, z*rcp, w*rcp); }
+    Vector4& operator/= (T s)                     { T rcp = T(1)/s;
+                                                    x *= rcp; y *= rcp; z *= rcp; w *= rcp;
+                                                    return *this; }
+
+    static Vector4  Min(const Vector4& a, const Vector4& b)
+    {
+        return Vector4((a.x < b.x) ? a.x : b.x,
+                       (a.y < b.y) ? a.y : b.y,
+                       (a.z < b.z) ? a.z : b.z,
+                       (a.w < b.w) ? a.w : b.w);
+    }
+    static Vector4  Max(const Vector4& a, const Vector4& b)
+    { 
+        return Vector4((a.x > b.x) ? a.x : b.x,
+                       (a.y > b.y) ? a.y : b.y,
+                       (a.z > b.z) ? a.z : b.z,
+                       (a.w > b.w) ? a.w : b.w);
+    }        
+
+    Vector4 Clamped(T maxMag) const
+    {
+        T magSquared = LengthSq();
+        if (magSquared <= Sqr(maxMag))
+            return *this;
+        else
+            return *this * (maxMag / sqrt(magSquared));
+    }
+
+    // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance.
+    bool IsEqual(const Vector4& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return (fabs(b.x-x) <= tolerance) && 
+               (fabs(b.y-y) <= tolerance) && 
+               (fabs(b.z-z) <= tolerance) &&
+               (fabs(b.w-w) <= tolerance);
+    }
+    bool Compare(const Vector4& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return IsEqual(b, tolerance);
+    }
+    
+    T& operator[] (int idx)
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 4);
+        return *(&x + idx);
+    }
+
+    const T& operator[] (int idx) const
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 4);
+        return *(&x + idx);
+    }
+
+    // Entry wise product of two vectors
+    Vector4    EntrywiseMultiply(const Vector4& b) const    { return Vector4(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z,
+                                                                         w * b.w);}
+
+    // Multiply and divide operators do entry-wise math
+    Vector4  operator*  (const Vector4& b) const        { return Vector4(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z,
+                                                                         w * b.w); }
+
+    Vector4  operator/  (const Vector4& b) const        { return Vector4(x / b.x, 
+                                                                         y / b.y, 
+                                                                         z / b.z,
+                                                                         w / b.w); }
+
+
+    // Dot product
+    T       Dot(const Vector4& b) const          { return x*b.x + y*b.y + z*b.z + w*b.w; }
+
+    // Return Length of the vector squared.
+    T       LengthSq() const                     { return (x * x + y * y + z * z + w * w); }
+
+    // Return vector length.
+    T       Length() const                       { return sqrt(LengthSq()); }
+    
+    bool    IsNormalized() const                 { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    // Normalize, convention vector length to 1.    
+    void    Normalize()                          
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    // Returns normalized (unit) version of the vector without modifying itself.
+    Vector4 Normalized() const                   
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s;
+    }
+
+    // Linearly interpolates from this vector to another.
+    // Factor should be between 0.0 and 1.0, with 0 giving full value to this.
+    Vector4 Lerp(const Vector4& b, T f) const    { return *this*(T(1) - f) + b*f; }
+};
+
+typedef Vector4<float>  Vector4f;
+typedef Vector4<double> Vector4d;
+typedef Vector4<int>    Vector4i;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Bounds3
+
+// Bounds class used to describe a 3D axis aligned bounding box.
+
+template<class T>
+class Bounds3
+{
+public:
+    Vector3<T>    b[2];
+
+    Bounds3()
+    {
+    }
+
+    Bounds3( const Vector3<T> & mins, const Vector3<T> & maxs )
+{
+        b[0] = mins;
+        b[1] = maxs;
+    }
+
+    void Clear()
+    {
+        b[0].x = b[0].y = b[0].z = Math<T>::MaxValue;
+        b[1].x = b[1].y = b[1].z = -Math<T>::MaxValue;
+    }
+
+    void AddPoint( const Vector3<T> & v )
+    {
+        b[0].x = (b[0].x < v.x ? b[0].x : v.x);
+        b[0].y = (b[0].y < v.y ? b[0].y : v.y);
+        b[0].z = (b[0].z < v.z ? b[0].z : v.z);
+        b[1].x = (v.x < b[1].x ? b[1].x : v.x);
+        b[1].y = (v.y < b[1].y ? b[1].y : v.y);
+        b[1].z = (v.z < b[1].z ? b[1].z : v.z);
+    }
+
+    const Vector3<T> & GetMins() const { return b[0]; }
+    const Vector3<T> & GetMaxs() const { return b[1]; }
+
+    Vector3<T> & GetMins() { return b[0]; }
+    Vector3<T> & GetMaxs() { return b[1]; }
+};
+
+typedef Bounds3<float>    Bounds3f;
+typedef Bounds3<double>    Bounds3d;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Size
+
+// Size class represents 2D size with Width, Height components.
+// Used to describe distentions of render targets, etc.
+
+template<class T>
+class Size
+{
+public:
+    T   w, h;
+
+    Size()              : w(0), h(0)   { }
+    Size(T w_, T h_)    : w(w_), h(h_) { }
+    explicit Size(T s)  : w(s), h(s)   { }
+    explicit Size(const Size<typename Math<T>::OtherFloatType> &src)
+        : w((T)src.w), h((T)src.h) { }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes<Size<T> >::Type CompatibleType;
+
+    Size(const CompatibleType& s) : w(s.w), h(s.h) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Size<T>) == sizeof(CompatibleType), "sizeof(Size<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    bool     operator== (const Size& b) const  { return w == b.w && h == b.h; }
+    bool     operator!= (const Size& b) const  { return w != b.w || h != b.h; }
+             
+    Size  operator+  (const Size& b) const  { return Size(w + b.w, h + b.h); }
+    Size& operator+= (const Size& b)        { w += b.w; h += b.h; return *this; }
+    Size  operator-  (const Size& b) const  { return Size(w - b.w, h - b.h); }
+    Size& operator-= (const Size& b)        { w -= b.w; h -= b.h; return *this; }
+    Size  operator- () const                { return Size(-w, -h); }
+    Size  operator*  (const Size& b) const  { return Size(w * b.w, h * b.h); }
+    Size& operator*= (const Size& b)        { w *= b.w; h *= b.h; return *this; }
+    Size  operator/  (const Size& b) const  { return Size(w / b.w, h / b.h); }
+    Size& operator/= (const Size& b)        { w /= b.w; h /= b.h; return *this; }
+
+    // Scalar multiplication/division scales both components.
+    Size  operator*  (T s) const            { return Size(w*s, h*s); }
+    Size& operator*= (T s)                  { w *= s; h *= s; return *this; }    
+    Size  operator/  (T s) const            { return Size(w/s, h/s); }
+    Size& operator/= (T s)                  { w /= s; h /= s; return *this; }
+
+    static Size Min(const Size& a, const Size& b)  { return Size((a.w  < b.w)  ? a.w  : b.w,
+                                                                 (a.h < b.h) ? a.h : b.h); }
+    static Size Max(const Size& a, const Size& b)  { return Size((a.w  > b.w)  ? a.w  : b.w,
+                                                                 (a.h > b.h) ? a.h : b.h); }
+    
+    T       Area() const                    { return w * h; }
+
+    inline  Vector2<T> ToVector() const     { return Vector2<T>(w, h); }
+};
+
+
+typedef Size<int>       Sizei;
+typedef Size<unsigned>  Sizeu;
+typedef Size<float>     Sizef;
+typedef Size<double>    Sized;
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Rect
+
+// Rect describes a rectangular area for rendering, that includes position and size.
+template<class T>
+class Rect
+{
+public:
+    T x, y;
+    T w, h;
+
+    Rect() { }
+    Rect(T x1, T y1, T w1, T h1)                   : x(x1), y(y1), w(w1), h(h1) { }    
+    Rect(const Vector2<T>& pos, const Size<T>& sz) : x(pos.x), y(pos.y), w(sz.w), h(sz.h) { }
+    Rect(const Size<T>& sz)                        : x(0), y(0), w(sz.w), h(sz.h) { }
+    
+    // C-interop support.
+    typedef  typename CompatibleTypes<Rect<T> >::Type CompatibleType;
+
+    Rect(const CompatibleType& s) : x(s.Pos.x), y(s.Pos.y), w(s.Size.w), h(s.Size.h) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Rect<T>) == sizeof(CompatibleType), "sizeof(Rect<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    Vector2<T> GetPos() const                { return Vector2<T>(x, y); }
+    Size<T>    GetSize() const               { return Size<T>(w, h); }
+    void       SetPos(const Vector2<T>& pos) { x = pos.x; y = pos.y; }
+    void       SetSize(const Size<T>& sz)    { w = sz.w; h = sz.h; }
+
+    bool operator == (const Rect& vp) const
+    { return (x == vp.x) && (y == vp.y) && (w == vp.w) && (h == vp.h); }
+    bool operator != (const Rect& vp) const
+    { return !operator == (vp); }
+};
+
+typedef Rect<int> Recti;
+
+
+//-------------------------------------------------------------------------------------//
+// ***** Quat
+//
+// Quatf represents a quaternion class used for rotations.
+// 
+// Quaternion multiplications are done in right-to-left order, to match the
+// behavior of matrices.
+
+
+template<class T>
+class Quat
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 4;
+
+    // x,y,z = axis*sin(angle), w = cos(angle)
+    T x, y, z, w;    
+
+    Quat() : x(0), y(0), z(0), w(1) { }
+    Quat(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) { }
+    explicit Quat(const Quat<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w)
+    {
+        // NOTE: Converting a normalized Quat<float> to Quat<double>
+        // will generally result in an un-normalized quaternion.
+        // But we don't normalize here in case the quaternion
+        // being converted is not a normalized rotation quaternion.
+    }
+
+    typedef  typename CompatibleTypes<Quat<T> >::Type CompatibleType;
+
+    // C-interop support.
+    Quat(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) { }
+
+    operator CompatibleType () const
+    {
+        CompatibleType result;
+        result.x = x;
+        result.y = y;
+        result.z = z;
+        result.w = w;
+        return result;
+    }
+
+    // Constructs quaternion for rotation around the axis by an angle.
+    Quat(const Vector3<T>& axis, T angle)
+    {
+        // Make sure we don't divide by zero. 
+        if (axis.LengthSq() == T(0))
+        {
+            // Assert if the axis is zero, but the angle isn't
+            OVR_MATH_ASSERT(angle == T(0));
+            x = y = z = T(0); w = T(1);
+            return;
+        }
+
+        Vector3<T> unitAxis = axis.Normalized();
+        T          sinHalfAngle = sin(angle * T(0.5));
+
+        w = cos(angle * T(0.5));
+        x = unitAxis.x * sinHalfAngle;
+        y = unitAxis.y * sinHalfAngle;
+        z = unitAxis.z * sinHalfAngle;
+    }
+
+    // Constructs quaternion for rotation around one of the coordinate axis by an angle.
+    Quat(Axis A, T angle, RotateDirection d = Rotate_CCW, HandedSystem s = Handed_R)
+    {
+        T sinHalfAngle = s * d *sin(angle * T(0.5));
+        T v[3];
+        v[0] = v[1] = v[2] = T(0);
+        v[A] = sinHalfAngle;
+
+        w = cos(angle * T(0.5));
+        x = v[0];
+        y = v[1];
+        z = v[2];
+    }
+
+    Quat operator-() { return Quat(-x, -y, -z, -w); }   // unary minus
+
+    static Quat Identity() { return Quat(0, 0, 0, 1); }
+
+    // Compute axis and angle from quaternion
+    void GetAxisAngle(Vector3<T>* axis, T* angle) const
+    {
+        if ( x*x + y*y + z*z > Math<T>::Tolerance() * Math<T>::Tolerance() ) {
+            *axis  = Vector3<T>(x, y, z).Normalized();
+            *angle = 2 * Acos(w);
+            if (*angle > ((T)MATH_DOUBLE_PI)) // Reduce the magnitude of the angle, if necessary
+            {
+                *angle = ((T)MATH_DOUBLE_TWOPI) - *angle;
+                *axis = *axis * (-1);
+            }
+        }
+        else 
+        {
+            *axis = Vector3<T>(1, 0, 0);
+            *angle= T(0);
+        }
+    }
+
+    // Convert a quaternion to a rotation vector, also known as
+    // Rodrigues vector, AxisAngle vector, SORA vector, exponential map.
+    // A rotation vector describes a rotation about an axis:
+    // the axis of rotation is the vector normalized,
+    // the angle of rotation is the magnitude of the vector.
+    Vector3<T> ToRotationVector() const
+    {
+        OVR_MATH_ASSERT(IsNormalized() || LengthSq() == 0);
+        T s = T(0);
+        T sinHalfAngle = sqrt(x*x + y*y + z*z);
+        if (sinHalfAngle > T(0))
+        {
+            T cosHalfAngle = w;
+            T halfAngle = atan2(sinHalfAngle, cosHalfAngle);
+
+            // Ensure minimum rotation magnitude
+            if (cosHalfAngle < 0)
+                halfAngle -= T(MATH_DOUBLE_PI);
+
+            s = T(2) * halfAngle / sinHalfAngle;
+        }
+        return Vector3<T>(x*s, y*s, z*s);
+    }
+
+    // Faster version of the above, optimized for use with small rotations, where rotation angle ~= sin(angle)
+    inline OVR::Vector3<T> FastToRotationVector() const
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+        T s;
+        T sinHalfSquared = x*x + y*y + z*z;
+        if (sinHalfSquared < T(.0037)) // =~ sin(7/2 degrees)^2
+        {
+            // Max rotation magnitude error is about .062% at 7 degrees rotation, or about .0043 degrees
+            s = T(2) * Sign(w);
+        }
+        else
+        {
+            T sinHalfAngle = sqrt(sinHalfSquared);
+            T cosHalfAngle = w;
+            T halfAngle = atan2(sinHalfAngle, cosHalfAngle);
+
+            // Ensure minimum rotation magnitude
+            if (cosHalfAngle < 0)
+                halfAngle -= T(MATH_DOUBLE_PI);
+
+            s = T(2) * halfAngle / sinHalfAngle;
+        }
+        return Vector3<T>(x*s, y*s, z*s);
+    }
+
+    // Given a rotation vector of form unitRotationAxis * angle,
+    // returns the equivalent quaternion (unitRotationAxis * sin(angle), cos(Angle)).
+    static Quat FromRotationVector(const Vector3<T>& v)
+    {
+        T angleSquared = v.LengthSq();
+        T s = T(0);
+        T c = T(1);
+        if (angleSquared > T(0))
+        {
+            T angle = sqrt(angleSquared);
+            s = sin(angle * T(0.5)) / angle;    // normalize
+            c = cos(angle * T(0.5));
+        }
+        return Quat(s*v.x, s*v.y, s*v.z, c);
+    }
+
+    // Faster version of above, optimized for use with small rotation magnitudes, where rotation angle =~ sin(angle).
+    // If normalize is false, small-angle quaternions are returned un-normalized.
+    inline static Quat FastFromRotationVector(const OVR::Vector3<T>& v, bool normalize = true)
+    {
+        T s, c;
+        T angleSquared = v.LengthSq();
+        if (angleSquared < T(0.0076))   // =~ (5 degrees*pi/180)^2
+        {
+            s = T(0.5);
+            c = T(1.0);
+            // Max rotation magnitude error (after normalization) is about .064% at 5 degrees rotation, or .0032 degrees
+            if (normalize && angleSquared > 0)
+            {
+                // sin(angle/2)^2 ~= (angle/2)^2 and cos(angle/2)^2 ~= 1
+                T invLen = T(1) / sqrt(angleSquared * T(0.25) + T(1)); // normalize
+                s = s * invLen;
+                c = c * invLen;
+            }
+        }
+        else
+        {
+            T angle = sqrt(angleSquared);
+            s = sin(angle * T(0.5)) / angle;
+            c = cos(angle * T(0.5));
+        }
+        return Quat(s*v.x, s*v.y, s*v.z, c);
+    }
+
+    // Constructs the quaternion from a rotation matrix
+    explicit Quat(const Matrix4<T>& m)
+    {
+        T trace = m.M[0][0] + m.M[1][1] + m.M[2][2];
+
+        // In almost all cases, the first part is executed.
+        // However, if the trace is not positive, the other
+        // cases arise.
+        if (trace > T(0)) 
+        {
+            T s = sqrt(trace + T(1)) * T(2); // s=4*qw
+            w = T(0.25) * s;
+            x = (m.M[2][1] - m.M[1][2]) / s;
+            y = (m.M[0][2] - m.M[2][0]) / s;
+            z = (m.M[1][0] - m.M[0][1]) / s; 
+        } 
+        else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) 
+        {
+            T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2);
+            w = (m.M[2][1] - m.M[1][2]) / s;
+            x = T(0.25) * s;
+            y = (m.M[0][1] + m.M[1][0]) / s;
+            z = (m.M[2][0] + m.M[0][2]) / s;
+        } 
+        else if (m.M[1][1] > m.M[2][2]) 
+        {
+            T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy
+            w = (m.M[0][2] - m.M[2][0]) / s;
+            x = (m.M[0][1] + m.M[1][0]) / s;
+            y = T(0.25) * s;
+            z = (m.M[1][2] + m.M[2][1]) / s;
+        } 
+        else 
+        {
+            T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz
+            w = (m.M[1][0] - m.M[0][1]) / s;
+            x = (m.M[0][2] + m.M[2][0]) / s;
+            y = (m.M[1][2] + m.M[2][1]) / s;
+            z = T(0.25) * s;
+        }
+        OVR_MATH_ASSERT(IsNormalized());    // Ensure input matrix is orthogonal
+    }
+
+    // Constructs the quaternion from a rotation matrix
+    explicit Quat(const Matrix3<T>& m)
+    {
+        T trace = m.M[0][0] + m.M[1][1] + m.M[2][2];
+
+        // In almost all cases, the first part is executed.
+        // However, if the trace is not positive, the other
+        // cases arise.
+        if (trace > T(0)) 
+        {
+            T s = sqrt(trace + T(1)) * T(2); // s=4*qw
+            w = T(0.25) * s;
+            x = (m.M[2][1] - m.M[1][2]) / s;
+            y = (m.M[0][2] - m.M[2][0]) / s;
+            z = (m.M[1][0] - m.M[0][1]) / s; 
+        } 
+        else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) 
+        {
+            T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2);
+            w = (m.M[2][1] - m.M[1][2]) / s;
+            x = T(0.25) * s;
+            y = (m.M[0][1] + m.M[1][0]) / s;
+            z = (m.M[2][0] + m.M[0][2]) / s;
+        } 
+        else if (m.M[1][1] > m.M[2][2]) 
+        {
+            T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy
+            w = (m.M[0][2] - m.M[2][0]) / s;
+            x = (m.M[0][1] + m.M[1][0]) / s;
+            y = T(0.25) * s;
+            z = (m.M[1][2] + m.M[2][1]) / s;
+        } 
+        else 
+        {
+            T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz
+            w = (m.M[1][0] - m.M[0][1]) / s;
+            x = (m.M[0][2] + m.M[2][0]) / s;
+            y = (m.M[1][2] + m.M[2][1]) / s;
+            z = T(0.25) * s;
+        }
+        OVR_MATH_ASSERT(IsNormalized());    // Ensure input matrix is orthogonal
+    }
+
+    bool operator== (const Quat& b) const   { return x == b.x && y == b.y && z == b.z && w == b.w; }
+    bool operator!= (const Quat& b) const   { return x != b.x || y != b.y || z != b.z || w != b.w; }
+
+    Quat  operator+  (const Quat& b) const  { return Quat(x + b.x, y + b.y, z + b.z, w + b.w); }
+    Quat& operator+= (const Quat& b)        { w += b.w; x += b.x; y += b.y; z += b.z; return *this; }
+    Quat  operator-  (const Quat& b) const  { return Quat(x - b.x, y - b.y, z - b.z, w - b.w); }
+    Quat& operator-= (const Quat& b)        { w -= b.w; x -= b.x; y -= b.y; z -= b.z; return *this; }
+
+    Quat  operator*  (T s) const            { return Quat(x * s, y * s, z * s, w * s); }
+    Quat& operator*= (T s)                  { w *= s; x *= s; y *= s; z *= s; return *this; }
+    Quat  operator/  (T s) const            { T rcp = T(1)/s; return Quat(x * rcp, y * rcp, z * rcp, w *rcp); }
+    Quat& operator/= (T s)                  { T rcp = T(1)/s; w *= rcp; x *= rcp; y *= rcp; z *= rcp; return *this; }
+
+    // Compare two quats for equality within tolerance. Returns true if quats match withing tolerance.
+    bool IsEqual(const Quat& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return Abs(Dot(b)) >= T(1) - tolerance;
+    }
+
+    static T Abs(const T v)                 { return (v >= 0) ? v : -v; }
+
+    // Get Imaginary part vector
+    Vector3<T> Imag() const                 { return Vector3<T>(x,y,z); }
+
+    // Get quaternion length.
+    T       Length() const                  { return sqrt(LengthSq()); }
+
+    // Get quaternion length squared.
+    T       LengthSq() const                { return (x * x + y * y + z * z + w * w); }
+
+    // Simple Euclidean distance in R^4 (not SLERP distance, but at least respects Haar measure)
+    T       Distance(const Quat& q) const    
+    { 
+        T d1 = (*this - q).Length();
+        T d2 = (*this + q).Length(); // Antipodal point check
+        return (d1 < d2) ? d1 : d2;
+    }
+
+    T       DistanceSq(const Quat& q) const
+    {
+        T d1 = (*this - q).LengthSq();
+        T d2 = (*this + q).LengthSq(); // Antipodal point check
+        return (d1 < d2) ? d1 : d2;
+    }
+
+    T       Dot(const Quat& q) const
+    {
+        return x * q.x + y * q.y + z * q.z + w * q.w;
+    }
+
+    // Angle between two quaternions in radians
+    T Angle(const Quat& q) const
+    {
+        return T(2) * Acos(Abs(Dot(q)));
+    }
+
+    // Angle of quaternion
+    T Angle() const
+    {
+        return T(2) * Acos(Abs(w));
+    }
+
+    // Normalize
+    bool    IsNormalized() const            { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    void    Normalize()
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    Quat    Normalized() const
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s;
+    }
+
+    inline void EnsureSameHemisphere(const Quat& o)
+    {
+        if (Dot(o) < T(0))
+        {
+            x = -x;
+            y = -y;
+            z = -z;
+            w = -w;
+        }
+    }
+
+    // Returns conjugate of the quaternion. Produces inverse rotation if quaternion is normalized.
+    Quat    Conj() const                    { return Quat(-x, -y, -z, w); }
+
+    // Quaternion multiplication. Combines quaternion rotations, performing the one on the 
+    // right hand side first.
+    Quat  operator* (const Quat& b) const   { return Quat(w * b.x + x * b.w + y * b.z - z * b.y,
+                                                          w * b.y - x * b.z + y * b.w + z * b.x,
+                                                          w * b.z + x * b.y - y * b.x + z * b.w,
+                                                          w * b.w - x * b.x - y * b.y - z * b.z); }
+    const Quat& operator*= (const Quat& b)  { *this = *this * b;  return *this; }
+
+    // 
+    // this^p normalized; same as rotating by this p times.
+    Quat PowNormalized(T p) const
+    {
+        Vector3<T> v;
+        T          a;
+        GetAxisAngle(&v, &a);
+        return Quat(v, a * p);
+    }
+
+    // Compute quaternion that rotates v into alignTo: alignTo = Quat::Align(alignTo, v).Rotate(v).
+    // NOTE: alignTo and v must be normalized.
+    static Quat Align(const Vector3<T>& alignTo, const Vector3<T>& v)
+    {
+        OVR_MATH_ASSERT(alignTo.IsNormalized() && v.IsNormalized());
+        Vector3<T> bisector = (v + alignTo);
+        bisector.Normalize();
+        T cosHalfAngle = v.Dot(bisector); // 0..1
+        if (cosHalfAngle > T(0))
+        {
+            Vector3<T> imag = v.Cross(bisector);
+            return Quat(imag.x, imag.y, imag.z, cosHalfAngle);
+        }
+        else
+        {
+            // cosHalfAngle == 0: a 180 degree rotation.
+            // sinHalfAngle == 1, rotation axis is any axis perpendicular
+            // to alignTo.  Choose axis to include largest magnitude components
+            if (fabs(v.x) > fabs(v.y))
+            {
+                // x or z is max magnitude component
+                // = Cross(v, (0,1,0)).Normalized();
+                T invLen = sqrt(v.x*v.x + v.z*v.z);
+                if (invLen > T(0))
+                    invLen = T(1) / invLen;
+                return Quat(-v.z*invLen, 0, v.x*invLen, 0);
+            }
+            else
+            {
+                // y or z is max magnitude component
+                // = Cross(v, (1,0,0)).Normalized();
+                T invLen = sqrt(v.y*v.y + v.z*v.z);
+                if (invLen > T(0))
+                    invLen = T(1) / invLen;
+                return Quat(0, v.z*invLen, -v.y*invLen, 0);
+            }
+        }
+    }
+
+    // Normalized linear interpolation of quaternions
+    // NOTE: This function is a bad approximation of Slerp()
+    // when the angle between the *this and b is large.
+    // Use FastSlerp() or Slerp() instead.
+    Quat Lerp(const Quat& b, T s) const
+    {
+        return (*this * (T(1) - s) + b * (Dot(b) < 0 ? -s : s)).Normalized();
+    }
+
+    // Spherical linear interpolation between rotations
+    Quat Slerp(const Quat& b, T s) const
+    {
+        Vector3<T> delta = (b * this->Inverted()).ToRotationVector();
+        return FromRotationVector(delta * s) * *this;
+    }
+
+    // Spherical linear interpolation: much faster for small rotations, accurate for large rotations. See FastTo/FromRotationVector
+    Quat FastSlerp(const Quat& b, T s) const
+    {
+        Vector3<T> delta = (b * this->Inverted()).FastToRotationVector();
+        return (FastFromRotationVector(delta * s, false) * *this).Normalized();
+    }
+
+    // Rotate transforms vector in a manner that matches Matrix rotations (counter-clockwise,
+    // assuming negative direction of the axis). Standard formula: q(t) * V * q(t)^-1. 
+    Vector3<T> Rotate(const Vector3<T>& v) const
+    {
+        OVR_MATH_ASSERT(isnan(w) || IsNormalized());
+
+        // rv = q * (v,0) * q'
+        // Same as rv = v + real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2);
+
+        // uv = 2 * Imag().Cross(v);
+        T uvx = T(2) * (y*v.z - z*v.y);
+        T uvy = T(2) * (z*v.x - x*v.z);
+        T uvz = T(2) * (x*v.y - y*v.x);
+
+        // return v + Real()*uv + Imag().Cross(uv);
+        return Vector3<T>(v.x + w*uvx + y*uvz - z*uvy,
+                          v.y + w*uvy + z*uvx - x*uvz,
+                          v.z + w*uvz + x*uvy - y*uvx);
+    }
+
+    // Rotation by inverse of *this
+    Vector3<T> InverseRotate(const Vector3<T>& v) const
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+
+        // rv = q' * (v,0) * q
+        // Same as rv = v + real * cross(-imag,v)*2 + cross(-imag, cross(-imag,v)*2);
+        //      or rv = v - real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2);
+
+        // uv = 2 * Imag().Cross(v);
+        T uvx = T(2) * (y*v.z - z*v.y);
+        T uvy = T(2) * (z*v.x - x*v.z);
+        T uvz = T(2) * (x*v.y - y*v.x);
+
+        // return v - Real()*uv + Imag().Cross(uv);
+        return Vector3<T>(v.x - w*uvx + y*uvz - z*uvy,
+                          v.y - w*uvy + z*uvx - x*uvz,
+                          v.z - w*uvz + x*uvy - y*uvx);
+    }
+    
+    // Inversed quaternion rotates in the opposite direction.
+    Quat        Inverted() const
+    {
+        return Quat(-x, -y, -z, w);
+    }
+
+    Quat        Inverse() const
+    {
+        return Quat(-x, -y, -z, w);
+    }
+
+    // Sets this quaternion to the one rotates in the opposite direction.
+    void        Invert()
+    {
+        *this = Quat(-x, -y, -z, w);
+    }
+    
+    // Time integration of constant angular velocity over dt
+    Quat TimeIntegrate(Vector3<T> angularVelocity, T dt) const
+    {
+        // solution is: this * exp( omega*dt/2 ); FromRotationVector(v) gives exp(v*.5).
+        return (*this * FastFromRotationVector(angularVelocity * dt, false)).Normalized();
+    }
+
+    // Time integration of constant angular acceleration and velocity over dt
+    // These are the first two terms of the "Magnus expansion" of the solution
+    //
+    //   o = o * exp( W=(W1 + W2 + W3+...) * 0.5 );
+    //
+    //  omega1 = (omega + omegaDot*dt)
+    //  W1 = (omega + omega1)*dt/2              
+    //  W2 = cross(omega, omega1)/12*dt^2 % (= -cross(omega_dot, omega)/12*dt^3)
+    // Terms 3 and beyond are vanishingly small:
+    //  W3 = cross(omega_dot, cross(omega_dot, omega))/240*dt^5 
+    //
+    Quat TimeIntegrate(Vector3<T> angularVelocity, Vector3<T> angularAcceleration, T dt) const
+    {
+        const Vector3<T>& omega = angularVelocity;
+        const Vector3<T>& omegaDot = angularAcceleration;
+
+        Vector3<T> omega1 = (omega + omegaDot * dt);
+        Vector3<T> W = ( (omega + omega1) + omega.Cross(omega1) * (dt/T(6)) ) * (dt/T(2));
+
+        // FromRotationVector(v) is exp(v*.5)
+        return (*this * FastFromRotationVector(W, false)).Normalized();
+    }
+
+    // Decompose rotation into three rotations:
+    // roll radians about Z axis, then pitch radians about X axis, then yaw radians about Y axis.
+    // Call with nullptr if a return value is not needed.
+    void GetYawPitchRoll(T* yaw, T* pitch, T* roll) const
+    {
+        return GetEulerAngles<Axis_Y, Axis_X, Axis_Z, Rotate_CCW, Handed_R>(yaw, pitch, roll);
+    }
+
+    // GetEulerAngles extracts Euler angles from the quaternion, in the specified order of
+    // axis rotations and the specified coordinate system. Right-handed coordinate system
+    // is the default, with CCW rotations while looking in the negative axis direction.
+    // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned.
+    // Rotation order is c, b, a:
+    // rotation c around axis A3
+    // is followed by rotation b around axis A2
+    // is followed by rotation a around axis A1
+    // rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    // 
+    template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S>
+    void GetEulerAngles(T *a, T *b, T *c) const 
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+        OVR_MATH_STATIC_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)");
+
+        T Q[3] = { x, y, z };  //Quaternion components x,y,z
+
+        T ww  = w*w;
+        T Q11 = Q[A1]*Q[A1];
+        T Q22 = Q[A2]*Q[A2];
+        T Q33 = Q[A3]*Q[A3];
+
+        T psign = T(-1);
+        // Determine whether even permutation
+        if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3))
+            psign = T(1);
+        
+        T s2 = psign * T(2) * (psign*w*Q[A2] + Q[A1]*Q[A3]);
+
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (s2 < T(-1) + singularityRadius)
+        { // South pole singularity
+            if (a) *a = T(0);
+            if (b) *b = -S*D*((T)MATH_DOUBLE_PIOVER2);
+            if (c) *c = S*D*atan2(T(2)*(psign*Q[A1] * Q[A2] + w*Q[A3]), ww + Q22 - Q11 - Q33 );
+        }
+        else if (s2 > T(1) - singularityRadius)
+        {  // North pole singularity
+            if (a) *a = T(0);
+            if (b) *b = S*D*((T)MATH_DOUBLE_PIOVER2);
+            if (c) *c = S*D*atan2(T(2)*(psign*Q[A1] * Q[A2] + w*Q[A3]), ww + Q22 - Q11 - Q33);
+        }
+        else
+        {
+            if (a) *a = -S*D*atan2(T(-2)*(w*Q[A1] - psign*Q[A2] * Q[A3]), ww + Q33 - Q11 - Q22);
+            if (b) *b = S*D*asin(s2);
+            if (c) *c = S*D*atan2(T(2)*(w*Q[A3] - psign*Q[A1] * Q[A2]), ww + Q11 - Q22 - Q33);
+        }      
+    }
+
+    template <Axis A1, Axis A2, Axis A3, RotateDirection D>
+    void GetEulerAngles(T *a, T *b, T *c) const
+    { GetEulerAngles<A1, A2, A3, D, Handed_R>(a, b, c); }
+
+    template <Axis A1, Axis A2, Axis A3>
+    void GetEulerAngles(T *a, T *b, T *c) const
+    { GetEulerAngles<A1, A2, A3, Rotate_CCW, Handed_R>(a, b, c); }
+
+    // GetEulerAnglesABA extracts Euler angles from the quaternion, in the specified order of
+    // axis rotations and the specified coordinate system. Right-handed coordinate system
+    // is the default, with CCW rotations while looking in the negative axis direction.
+    // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned.
+    // rotation a around axis A1
+    // is followed by rotation b around axis A2
+    // is followed by rotation c around axis A1
+    // Rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    template <Axis A1, Axis A2, RotateDirection D, HandedSystem S>
+    void GetEulerAnglesABA(T *a, T *b, T *c) const
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+        OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2");
+
+        T Q[3] = {x, y, z}; // Quaternion components
+
+        // Determine the missing axis that was not supplied
+        int m = 3 - A1 - A2;
+
+        T ww = w*w;
+        T Q11 = Q[A1]*Q[A1];
+        T Q22 = Q[A2]*Q[A2];
+        T Qmm = Q[m]*Q[m];
+
+        T psign = T(-1);
+        if ((A1 + 1) % 3 == A2) // Determine whether even permutation
+        {
+            psign = T(1);
+        }
+
+        T c2 = ww + Q11 - Q22 - Qmm;
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (c2 < T(-1) + singularityRadius)
+        { // South pole singularity
+            if (a) *a = T(0);
+            if (b) *b = S*D*((T)MATH_DOUBLE_PI);
+            if (c) *c = S*D*atan2(T(2)*(w*Q[A1] - psign*Q[A2] * Q[m]),
+                            ww + Q22 - Q11 - Qmm);
+        }
+        else if (c2 > T(1) - singularityRadius)
+        {  // North pole singularity
+            if (a) *a = T(0);
+            if (b) *b = T(0);
+            if (c) *c = S*D*atan2(T(2)*(w*Q[A1] - psign*Q[A2] * Q[m]),
+                           ww + Q22 - Q11 - Qmm);
+        }
+        else
+        {
+            if (a) *a = S*D*atan2(psign*w*Q[m] + Q[A1] * Q[A2],
+                           w*Q[A2] -psign*Q[A1]*Q[m]);
+            if (b) *b = S*D*acos(c2);
+            if (c) *c = S*D*atan2(-psign*w*Q[m] + Q[A1] * Q[A2],
+                           w*Q[A2] + psign*Q[A1]*Q[m]);
+        }
+    }
+};
+
+typedef Quat<float>  Quatf;
+typedef Quat<double> Quatd;
+
+OVR_MATH_STATIC_ASSERT((sizeof(Quatf) == 4*sizeof(float)), "sizeof(Quatf) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Quatd) == 4*sizeof(double)), "sizeof(Quatd) failure");
+
+//-------------------------------------------------------------------------------------
+// ***** Pose
+//
+// Position and orientation combined.
+//
+// This structure needs to be the same size and layout on 32-bit and 64-bit arch.
+// Update OVR_PadCheck.cpp when updating this object.
+template<class T>
+class Pose
+{
+public:
+    typedef typename CompatibleTypes<Pose<T> >::Type CompatibleType;
+
+    Pose() { }
+    Pose(const Quat<T>& orientation, const Vector3<T>& pos)
+        : Rotation(orientation), Translation(pos) {  }
+    Pose(const Pose& s)
+        : Rotation(s.Rotation), Translation(s.Translation) {  }
+    Pose(const Matrix3<T>& R, const Vector3<T>& t)
+        : Rotation((Quat<T>)R), Translation(t) {  }
+    Pose(const CompatibleType& s)
+        : Rotation(s.Orientation), Translation(s.Position) {  }
+
+    explicit Pose(const Pose<typename Math<T>::OtherFloatType> &s)
+        : Rotation(s.Rotation), Translation(s.Translation)
+    {
+        // Ensure normalized rotation if converting from float to double
+        if (sizeof(T) > sizeof(typename Math<T>::OtherFloatType))
+            Rotation.Normalize();
+    }
+
+    static Pose Identity() { return Pose(Quat<T>(0, 0, 0, 1), Vector3<T>(0, 0, 0)); }
+
+    void SetIdentity() { Rotation = Quat<T>(0, 0, 0, 1); Translation = Vector3<T>(0, 0, 0); }
+
+    // used to make things obviously broken if someone tries to use the value
+    void SetInvalid() { Rotation = Quat<T>(NAN, NAN, NAN, NAN); Translation = Vector3<T>(NAN, NAN, NAN); }
+
+    bool IsEqual(const Pose&b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return Translation.IsEqual(b.Translation, tolerance) && Rotation.IsEqual(b.Rotation, tolerance);
+    }
+
+    operator typename CompatibleTypes<Pose<T> >::Type () const
+    {
+        typename CompatibleTypes<Pose<T> >::Type result;
+        result.Orientation = Rotation;
+        result.Position = Translation;
+        return result;
+    }
+
+    Quat<T>    Rotation;
+    Vector3<T> Translation;
+    
+    OVR_MATH_STATIC_ASSERT((sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float)), "(sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float))");
+
+    void ToArray(T* arr) const
+    {
+        T temp[7] =  { Rotation.x, Rotation.y, Rotation.z, Rotation.w, Translation.x, Translation.y, Translation.z };
+        for (int i = 0; i < 7; i++) arr[i] = temp[i];
+    }
+
+    static Pose<T> FromArray(const T* v)
+    {
+        Quat<T> rotation(v[0], v[1], v[2], v[3]);
+        Vector3<T> translation(v[4], v[5], v[6]);
+        // Ensure rotation is normalized, in case it was originally a float, stored in a .json file, etc.
+        return Pose<T>(rotation.Normalized(), translation);
+    }
+
+    Vector3<T> Rotate(const Vector3<T>& v) const
+    {
+        return Rotation.Rotate(v);
+    }
+
+    Vector3<T> InverseRotate(const Vector3<T>& v) const
+    {
+        return Rotation.InverseRotate(v);
+    }
+
+    Vector3<T> Translate(const Vector3<T>& v) const
+    {
+        return v + Translation;
+    }
+
+    Vector3<T> Transform(const Vector3<T>& v) const
+    {
+        return Rotate(v) + Translation;
+    }
+
+    Vector3<T> InverseTransform(const Vector3<T>& v) const
+    {
+        return InverseRotate(v - Translation);
+    }
+
+
+    Vector3<T> Apply(const Vector3<T>& v) const
+    {
+        return Transform(v);
+    }
+
+    Pose operator*(const Pose& other) const   
+    {
+        return Pose(Rotation * other.Rotation, Apply(other.Translation));
+    }
+
+    Pose Inverted() const   
+    {
+        Quat<T> inv = Rotation.Inverted();
+        return Pose(inv, inv.Rotate(-Translation));
+    }
+
+    // Interpolation between two poses: translation is interpolated with Lerp(),
+    // and rotations are interpolated with Slerp().
+    Pose Lerp(const Pose& b, T s)
+    {
+        return Pose(Rotation.Slerp(b.Rotation, s), Translation.Lerp(b.Translation, s));
+    }
+
+    // Similar to Lerp above, except faster in case of small rotation differences.  See Quat<T>::FastSlerp.
+    Pose FastLerp(const Pose& b, T s)
+    {
+        return Pose(Rotation.FastSlerp(b.Rotation, s), Translation.Lerp(b.Translation, s));
+    }
+
+    Pose TimeIntegrate(const Vector3<T>& linearVelocity, const Vector3<T>& angularVelocity, T dt) const
+    {
+        return Pose(
+                (Rotation * Quat<T>::FastFromRotationVector(angularVelocity * dt, false)).Normalized(),
+                Translation + linearVelocity * dt);
+    }
+
+    Pose TimeIntegrate(const Vector3<T>& linearVelocity, const Vector3<T>& linearAcceleration,
+                       const Vector3<T>& angularVelocity, const Vector3<T>& angularAcceleration,
+                       T dt) const
+    {
+        return Pose(Rotation.TimeIntegrate(angularVelocity, angularAcceleration, dt),
+                    Translation + linearVelocity*dt + linearAcceleration*dt*dt * T(0.5));
+    }
+};
+
+typedef Pose<float>  Posef;
+typedef Pose<double> Posed;
+
+OVR_MATH_STATIC_ASSERT((sizeof(Posed) == sizeof(Quatd) + sizeof(Vector3d)), "sizeof(Posed) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Posef) == sizeof(Quatf) + sizeof(Vector3f)), "sizeof(Posef) failure");
+    
+
+//-------------------------------------------------------------------------------------
+// ***** Matrix4
+//
+// Matrix4 is a 4x4 matrix used for 3d transformations and projections.
+// Translation stored in the last column.
+// The matrix is stored in row-major order in memory, meaning that values
+// of the first row are stored before the next one.
+//
+// The arrangement of the matrix is chosen to be in Right-Handed 
+// coordinate system and counterclockwise rotations when looking down
+// the axis
+//
+// Transformation Order:
+//   - Transformations are applied from right to left, so the expression
+//     M1 * M2 * M3 * V means that the vector V is transformed by M3 first,
+//     followed by M2 and M1. 
+//
+// Coordinate system: Right Handed
+//
+// Rotations: Counterclockwise when looking down the axis. All angles are in radians.
+//    
+//  | sx   01   02   tx |    // First column  (sx, 10, 20): Axis X basis vector.
+//  | 10   sy   12   ty |    // Second column (01, sy, 21): Axis Y basis vector.
+//  | 20   21   sz   tz |    // Third columnt (02, 12, sz): Axis Z basis vector.
+//  | 30   31   32   33 |
+//
+//  The basis vectors are first three columns.
+
+template<class T>
+class Matrix4
+{
+public:
+    typedef T ElementType;
+    static const size_t Dimension = 4;
+
+    T M[4][4];
+
+    enum NoInitType { NoInit };
+
+    // Construct with no memory initialization.
+    Matrix4(NoInitType) { }
+
+    // By default, we construct identity matrix.
+    Matrix4()
+    {
+        M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1);
+        M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0);
+        M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0);
+        M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0);
+    }
+
+    Matrix4(T m11, T m12, T m13, T m14,
+            T m21, T m22, T m23, T m24,
+            T m31, T m32, T m33, T m34,
+            T m41, T m42, T m43, T m44)
+    {
+        M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; M[0][3] = m14;
+        M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; M[1][3] = m24;
+        M[2][0] = m31; M[2][1] = m32; M[2][2] = m33; M[2][3] = m34;
+        M[3][0] = m41; M[3][1] = m42; M[3][2] = m43; M[3][3] = m44;
+    }
+
+    Matrix4(T m11, T m12, T m13,
+            T m21, T m22, T m23,
+            T m31, T m32, T m33)
+    {
+        M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; M[0][3] = T(0);
+        M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; M[1][3] = T(0);
+        M[2][0] = m31; M[2][1] = m32; M[2][2] = m33; M[2][3] = T(0);
+        M[3][0] = T(0);   M[3][1] = T(0);   M[3][2] = T(0);   M[3][3] = T(1);
+    }
+
+    explicit Matrix4(const Matrix3<T>& m)
+    {
+        M[0][0] = m.M[0][0]; M[0][1] = m.M[0][1]; M[0][2] = m.M[0][2]; M[0][3] = T(0);
+        M[1][0] = m.M[1][0]; M[1][1] = m.M[1][1]; M[1][2] = m.M[1][2]; M[1][3] = T(0);
+        M[2][0] = m.M[2][0]; M[2][1] = m.M[2][1]; M[2][2] = m.M[2][2]; M[2][3] = T(0);
+        M[3][0] = T(0);         M[3][1] = T(0);         M[3][2] = T(0);         M[3][3] = T(1);
+    }
+
+    explicit Matrix4(const Quat<T>& q)
+    {
+        OVR_MATH_ASSERT(q.IsNormalized());
+        T ww = q.w*q.w;
+        T xx = q.x*q.x;
+        T yy = q.y*q.y;
+        T zz = q.z*q.z;
+
+        M[0][0] = ww + xx - yy - zz;       M[0][1] = 2 * (q.x*q.y - q.w*q.z); M[0][2] = 2 * (q.x*q.z + q.w*q.y); M[0][3] = T(0);
+        M[1][0] = 2 * (q.x*q.y + q.w*q.z); M[1][1] = ww - xx + yy - zz;       M[1][2] = 2 * (q.y*q.z - q.w*q.x); M[1][3] = T(0);
+        M[2][0] = 2 * (q.x*q.z - q.w*q.y); M[2][1] = 2 * (q.y*q.z + q.w*q.x); M[2][2] = ww - xx - yy + zz;       M[2][3] = T(0);
+        M[3][0] = T(0);                       M[3][1] = T(0);                       M[3][2] = T(0);                       M[3][3] = T(1);
+    }
+
+    explicit Matrix4(const Pose<T>& p)
+    {
+        Matrix4 result(p.Rotation);
+        result.SetTranslation(p.Translation);
+        *this = result;
+    }
+
+
+    // C-interop support
+    explicit Matrix4(const Matrix4<typename Math<T>::OtherFloatType> &src)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] = (T)src.M[i][j];
+    }
+
+    // C-interop support.
+    Matrix4(const typename CompatibleTypes<Matrix4<T> >::Type& s) 
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix4), "sizeof(s) == sizeof(Matrix4)");
+        memcpy(M, s.M, sizeof(M));
+    }
+
+    operator typename CompatibleTypes<Matrix4<T> >::Type () const
+    {
+        typename CompatibleTypes<Matrix4<T> >::Type result;
+        OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix4), "sizeof(result) == sizeof(Matrix4)");
+        memcpy(result.M, M, sizeof(M));
+        return result;
+    }
+
+    void ToString(char* dest, size_t destsize) const
+    {
+        size_t pos = 0;
+        for (int r=0; r<4; r++)
+        {
+            for (int c=0; c<4; c++)
+            {
+                pos += OVRMath_sprintf(dest+pos, destsize-pos, "%g ", M[r][c]);
+            }
+        }
+    }
+
+    static Matrix4 FromString(const char* src)
+    {
+        Matrix4 result;
+        if (src)
+        {
+            for (int r = 0; r < 4; r++)
+            {
+                for (int c = 0; c < 4; c++)
+                {
+                    result.M[r][c] = (T)atof(src);
+                    while (*src && *src != ' ')
+                    {
+                        src++;
+                    }
+                    while (*src && *src == ' ')
+                    {
+                        src++;
+                    }
+                }
+            }
+        }
+        return result;
+    }
+
+    static Matrix4 Identity()  { return Matrix4(); }
+
+    void SetIdentity()
+    {
+        M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1);
+        M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0);
+        M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0);
+        M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0);
+    }
+
+    void SetXBasis(const Vector3<T>& v)
+    {
+        M[0][0] = v.x;
+        M[1][0] = v.y;
+        M[2][0] = v.z;
+    }
+    Vector3<T> GetXBasis() const
+    {
+        return Vector3<T>(M[0][0], M[1][0], M[2][0]);
+    }
+
+    void SetYBasis(const Vector3<T> & v)
+    {
+        M[0][1] = v.x;
+        M[1][1] = v.y;
+        M[2][1] = v.z;
+    }
+    Vector3<T> GetYBasis() const
+    {
+        return Vector3<T>(M[0][1], M[1][1], M[2][1]);
+    }
+
+    void SetZBasis(const Vector3<T> & v)
+    {
+        M[0][2] = v.x;
+        M[1][2] = v.y;
+        M[2][2] = v.z;
+    }
+    Vector3<T> GetZBasis() const
+    {
+        return Vector3<T>(M[0][2], M[1][2], M[2][2]);
+    }
+
+    bool operator== (const Matrix4& b) const
+    {
+        bool isEqual = true;
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                isEqual &= (M[i][j] == b.M[i][j]);
+
+        return isEqual;
+    }
+
+    Matrix4 operator+ (const Matrix4& b) const
+    {
+        Matrix4 result(*this);
+        result += b;
+        return result;
+    }
+
+    Matrix4& operator+= (const Matrix4& b)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] += b.M[i][j];
+        return *this;
+    }
+
+    Matrix4 operator- (const Matrix4& b) const
+    {
+        Matrix4 result(*this);
+        result -= b;
+        return result;
+    }
+
+    Matrix4& operator-= (const Matrix4& b)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] -= b.M[i][j];
+        return *this;
+    }
+
+    // Multiplies two matrices into destination with minimum copying.
+    static Matrix4& Multiply(Matrix4* d, const Matrix4& a, const Matrix4& b)
+    {
+        OVR_MATH_ASSERT((d != &a) && (d != &b));
+        int i = 0;
+        do {
+            d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0] + a.M[i][3] * b.M[3][0];
+            d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1] + a.M[i][3] * b.M[3][1];
+            d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2] + a.M[i][3] * b.M[3][2];
+            d->M[i][3] = a.M[i][0] * b.M[0][3] + a.M[i][1] * b.M[1][3] + a.M[i][2] * b.M[2][3] + a.M[i][3] * b.M[3][3];
+        } while((++i) < 4);
+
+        return *d;
+    }
+
+    Matrix4 operator* (const Matrix4& b) const
+    {
+        Matrix4 result(Matrix4::NoInit);
+        Multiply(&result, *this, b);
+        return result;
+    }
+
+    Matrix4& operator*= (const Matrix4& b)
+    {
+        return Multiply(this, Matrix4(*this), b);
+    }
+
+    Matrix4 operator* (T s) const
+    {
+        Matrix4 result(*this);
+        result *= s;
+        return result;
+    }
+
+    Matrix4& operator*= (T s)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] *= s;
+        return *this;
+    }
+
+
+    Matrix4 operator/ (T s) const
+    {
+        Matrix4 result(*this);
+        result /= s;
+        return result;
+    }
+
+    Matrix4& operator/= (T s)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] /= s;
+        return *this;
+    }
+
+    Vector3<T> Transform(const Vector3<T>& v) const
+    {
+        const T rcpW = T(1) / (M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3]);
+        return Vector3<T>((M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3]) * rcpW,
+                          (M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3]) * rcpW,
+                          (M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3]) * rcpW);
+    }
+
+    Vector4<T> Transform(const Vector4<T>& v) const
+    {
+        return Vector4<T>(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3] * v.w,
+                          M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3] * v.w,
+                          M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3] * v.w,
+                          M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3] * v.w);
+    }
+
+    Matrix4 Transposed() const
+    {
+        return Matrix4(M[0][0], M[1][0], M[2][0], M[3][0],
+                        M[0][1], M[1][1], M[2][1], M[3][1],
+                        M[0][2], M[1][2], M[2][2], M[3][2],
+                        M[0][3], M[1][3], M[2][3], M[3][3]);
+    }
+
+    void     Transpose()
+    {
+        *this = Transposed();
+    }
+
+
+    T SubDet (const size_t* rows, const size_t* cols) const
+    {
+        return M[rows[0]][cols[0]] * (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]])
+             - M[rows[0]][cols[1]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]])
+             + M[rows[0]][cols[2]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]);
+    }
+
+    T Cofactor(size_t I, size_t J) const
+    {
+        const size_t indices[4][3] = {{1,2,3},{0,2,3},{0,1,3},{0,1,2}};
+        return ((I+J)&1) ? -SubDet(indices[I],indices[J]) : SubDet(indices[I],indices[J]);
+    }
+
+    T    Determinant() const
+    {
+        return M[0][0] * Cofactor(0,0) + M[0][1] * Cofactor(0,1) + M[0][2] * Cofactor(0,2) + M[0][3] * Cofactor(0,3);
+    }
+
+    Matrix4 Adjugated() const
+    {
+        return Matrix4(Cofactor(0,0), Cofactor(1,0), Cofactor(2,0), Cofactor(3,0), 
+                        Cofactor(0,1), Cofactor(1,1), Cofactor(2,1), Cofactor(3,1), 
+                        Cofactor(0,2), Cofactor(1,2), Cofactor(2,2), Cofactor(3,2),
+                        Cofactor(0,3), Cofactor(1,3), Cofactor(2,3), Cofactor(3,3));
+    }
+
+    Matrix4 Inverted() const
+    {
+        T det = Determinant();
+        OVR_MATH_ASSERT(det != 0);
+        return Adjugated() * (T(1)/det);
+    }
+
+    void Invert()
+    {
+        *this = Inverted();
+    }
+
+    // This is more efficient than general inverse, but ONLY works
+    // correctly if it is a homogeneous transform matrix (rot + trans)
+    Matrix4 InvertedHomogeneousTransform() const
+    {
+        // Make the inverse rotation matrix
+        Matrix4 rinv = this->Transposed();
+        rinv.M[3][0] = rinv.M[3][1] = rinv.M[3][2] = T(0);
+        // Make the inverse translation matrix
+        Vector3<T> tvinv(-M[0][3],-M[1][3],-M[2][3]);
+        Matrix4 tinv = Matrix4::Translation(tvinv);
+        return rinv * tinv;  // "untranslate", then "unrotate"
+    }
+
+    // This is more efficient than general inverse, but ONLY works
+    // correctly if it is a homogeneous transform matrix (rot + trans)
+    void InvertHomogeneousTransform()
+    {
+        *this = InvertedHomogeneousTransform();
+    }
+
+    // Matrix to Euler Angles conversion
+    // a,b,c, are the YawPitchRoll angles to be returned
+    // rotation a around axis A1
+    // is followed by rotation b around axis A2
+    // is followed by rotation c around axis A3
+    // rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S>
+    void ToEulerAngles(T *a, T *b, T *c) const
+    {
+        OVR_MATH_STATIC_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)");
+
+        T psign = T(-1);
+        if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) // Determine whether even permutation
+            psign = T(1);
+        
+        T pm = psign*M[A1][A3];
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (pm < T(-1) + singularityRadius)
+        { // South pole singularity
+            *a = T(0);
+            *b = -S*D*((T)MATH_DOUBLE_PIOVER2);
+            *c = S*D*atan2( psign*M[A2][A1], M[A2][A2] );
+        }
+        else if (pm > T(1) - singularityRadius)
+        { // North pole singularity
+            *a = T(0);
+            *b = S*D*((T)MATH_DOUBLE_PIOVER2);
+            *c = S*D*atan2( psign*M[A2][A1], M[A2][A2] );
+        }
+        else
+        { // Normal case (nonsingular)
+            *a = S*D*atan2( -psign*M[A2][A3], M[A3][A3] );
+            *b = S*D*asin(pm);
+            *c = S*D*atan2( -psign*M[A1][A2], M[A1][A1] );
+        }
+    }
+
+    // Matrix to Euler Angles conversion
+    // a,b,c, are the YawPitchRoll angles to be returned
+    // rotation a around axis A1
+    // is followed by rotation b around axis A2
+    // is followed by rotation c around axis A1
+    // rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    template <Axis A1, Axis A2, RotateDirection D, HandedSystem S>
+    void ToEulerAnglesABA(T *a, T *b, T *c) const
+    {        
+         OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2");
+  
+        // Determine the axis that was not supplied
+        int m = 3 - A1 - A2;
+
+        T psign = T(-1);
+        if ((A1 + 1) % 3 == A2) // Determine whether even permutation
+            psign = T(1);
+
+        T c2 = M[A1][A1];
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (c2 < T(-1) + singularityRadius)
+        { // South pole singularity
+            *a = T(0);
+            *b = S*D*((T)MATH_DOUBLE_PI);
+            *c = S*D*atan2( -psign*M[A2][m],M[A2][A2]);
+        }
+        else if (c2 > T(1) - singularityRadius)
+        { // North pole singularity
+            *a = T(0);
+            *b = T(0);
+            *c = S*D*atan2( -psign*M[A2][m],M[A2][A2]);
+        }
+        else
+        { // Normal case (nonsingular)
+            *a = S*D*atan2( M[A2][A1],-psign*M[m][A1]);
+            *b = S*D*acos(c2);
+            *c = S*D*atan2( M[A1][A2],psign*M[A1][m]);
+        }
+    }
+  
+    // Creates a matrix that converts the vertices from one coordinate system
+    // to another.
+    static Matrix4 AxisConversion(const WorldAxes& to, const WorldAxes& from)
+    {        
+        // Holds axis values from the 'to' structure
+        int toArray[3] = { to.XAxis, to.YAxis, to.ZAxis };
+
+        // The inverse of the toArray
+        int inv[4]; 
+        inv[0] = inv[abs(to.XAxis)] = 0;
+        inv[abs(to.YAxis)] = 1;
+        inv[abs(to.ZAxis)] = 2;
+
+        Matrix4 m(0,  0,  0, 
+                  0,  0,  0,
+                  0,  0,  0);
+
+        // Only three values in the matrix need to be changed to 1 or -1.
+        m.M[inv[abs(from.XAxis)]][0] = T(from.XAxis/toArray[inv[abs(from.XAxis)]]);
+        m.M[inv[abs(from.YAxis)]][1] = T(from.YAxis/toArray[inv[abs(from.YAxis)]]);
+        m.M[inv[abs(from.ZAxis)]][2] = T(from.ZAxis/toArray[inv[abs(from.ZAxis)]]);
+        return m;
+    } 
+
+
+    // Creates a matrix for translation by vector
+    static Matrix4 Translation(const Vector3<T>& v)
+    {
+        Matrix4 t;
+        t.M[0][3] = v.x;
+        t.M[1][3] = v.y;
+        t.M[2][3] = v.z;
+        return t;
+    }
+
+    // Creates a matrix for translation by vector
+    static Matrix4 Translation(T x, T y, T z = T(0))
+    {
+        Matrix4 t;
+        t.M[0][3] = x;
+        t.M[1][3] = y;
+        t.M[2][3] = z;
+        return t;
+    }
+
+    // Sets the translation part
+    void SetTranslation(const Vector3<T>& v)
+    {
+        M[0][3] = v.x;
+        M[1][3] = v.y;
+        M[2][3] = v.z;
+    }
+
+    Vector3<T> GetTranslation() const
+    {
+        return Vector3<T>( M[0][3], M[1][3], M[2][3] );
+    }
+
+    // Creates a matrix for scaling by vector
+    static Matrix4 Scaling(const Vector3<T>& v)
+    {
+        Matrix4 t;
+        t.M[0][0] = v.x;
+        t.M[1][1] = v.y;
+        t.M[2][2] = v.z;
+        return t;
+    }
+
+    // Creates a matrix for scaling by vector
+    static Matrix4 Scaling(T x, T y, T z)
+    {
+        Matrix4 t;
+        t.M[0][0] = x;
+        t.M[1][1] = y;
+        t.M[2][2] = z;
+        return t;
+    }
+
+    // Creates a matrix for scaling by constant
+    static Matrix4 Scaling(T s)
+    {
+        Matrix4 t;
+        t.M[0][0] = s;
+        t.M[1][1] = s;
+        t.M[2][2] = s;
+        return t;
+    }
+
+    // Simple L1 distance in R^12
+    T Distance(const Matrix4& m2) const           
+    { 
+        T d = fabs(M[0][0] - m2.M[0][0]) + fabs(M[0][1] - m2.M[0][1]);
+        d += fabs(M[0][2] - m2.M[0][2]) + fabs(M[0][3] - m2.M[0][3]);
+        d += fabs(M[1][0] - m2.M[1][0]) + fabs(M[1][1] - m2.M[1][1]);
+        d += fabs(M[1][2] - m2.M[1][2]) + fabs(M[1][3] - m2.M[1][3]);
+        d += fabs(M[2][0] - m2.M[2][0]) + fabs(M[2][1] - m2.M[2][1]);
+        d += fabs(M[2][2] - m2.M[2][2]) + fabs(M[2][3] - m2.M[2][3]);
+        d += fabs(M[3][0] - m2.M[3][0]) + fabs(M[3][1] - m2.M[3][1]);
+        d += fabs(M[3][2] - m2.M[3][2]) + fabs(M[3][3] - m2.M[3][3]);
+        return d; 
+    }
+
+    // Creates a rotation matrix rotating around the X axis by 'angle' radians.
+    // Just for quick testing.  Not for final API.  Need to remove case.
+    static Matrix4 RotationAxis(Axis A, T angle, RotateDirection d, HandedSystem s)
+    {
+        T sina = s * d *sin(angle);
+        T cosa = cos(angle);
+        
+        switch(A)
+        {
+        case Axis_X:
+            return Matrix4(1,  0,     0, 
+                           0,  cosa,  -sina,
+                           0,  sina,  cosa);
+        case Axis_Y:
+            return Matrix4(cosa,  0,   sina, 
+                           0,     1,   0,
+                           -sina, 0,   cosa);
+        case Axis_Z:
+            return Matrix4(cosa,  -sina,  0, 
+                           sina,  cosa,   0,
+                           0,     0,      1);
+        default:
+            return Matrix4();
+        }
+    }
+
+
+    // Creates a rotation matrix rotating around the X axis by 'angle' radians.
+    // Rotation direction is depends on the coordinate system:
+    // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),
+    //                        while looking in the negative axis direction. This is the
+    //                        same as looking down from positive axis values towards origin.
+    // LHS: Positive angle values rotate clock-wise (CW), while looking in the
+    //       negative axis direction.
+    static Matrix4 RotationX(T angle)
+    {
+        T sina = sin(angle);
+        T cosa = cos(angle);
+        return Matrix4(1,  0,     0, 
+                       0,  cosa,  -sina,
+                       0,  sina,  cosa);
+    }
+
+    // Creates a rotation matrix rotating around the Y axis by 'angle' radians.
+    // Rotation direction is depends on the coordinate system:
+    //  RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),
+    //                        while looking in the negative axis direction. This is the
+    //                        same as looking down from positive axis values towards origin.
+    //  LHS: Positive angle values rotate clock-wise (CW), while looking in the
+    //       negative axis direction.
+    static Matrix4 RotationY(T angle)
+    {
+        T sina = (T)sin(angle);
+        T cosa = (T)cos(angle);
+        return Matrix4(cosa,  0,   sina, 
+                       0,     1,   0,
+                       -sina, 0,   cosa);
+    }
+
+    // Creates a rotation matrix rotating around the Z axis by 'angle' radians.
+    // Rotation direction is depends on the coordinate system:
+    //  RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),
+    //                        while looking in the negative axis direction. This is the
+    //                        same as looking down from positive axis values towards origin.
+    //  LHS: Positive angle values rotate clock-wise (CW), while looking in the
+    //       negative axis direction.
+    static Matrix4 RotationZ(T angle)
+    {
+        T sina = sin(angle);
+        T cosa = cos(angle);
+        return Matrix4(cosa,  -sina,  0, 
+                       sina,  cosa,   0,
+                       0,     0,      1);
+    }
+
+    // LookAtRH creates a View transformation matrix for right-handed coordinate system.
+    // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up'
+    // specifying the up vector. The resulting matrix should be used with PerspectiveRH
+    // projection.
+    static Matrix4 LookAtRH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up)
+    {
+        Vector3<T> z = (eye - at).Normalized();  // Forward
+        Vector3<T> x = up.Cross(z).Normalized(); // Right
+        Vector3<T> y = z.Cross(x);
+
+        Matrix4 m(x.x,  x.y,  x.z,  -(x.Dot(eye)),
+                  y.x,  y.y,  y.z,  -(y.Dot(eye)),
+                  z.x,  z.y,  z.z,  -(z.Dot(eye)),
+                  0,    0,    0,    1 );
+        return m;
+    }
+    
+    // LookAtLH creates a View transformation matrix for left-handed coordinate system.
+    // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up'
+    // specifying the up vector. 
+    static Matrix4 LookAtLH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up)
+    {
+        Vector3<T> z = (at - eye).Normalized();  // Forward
+        Vector3<T> x = up.Cross(z).Normalized(); // Right
+        Vector3<T> y = z.Cross(x);
+
+        Matrix4 m(x.x,  x.y,  x.z,  -(x.Dot(eye)),
+                  y.x,  y.y,  y.z,  -(y.Dot(eye)),
+                  z.x,  z.y,  z.z,  -(z.Dot(eye)),
+                  0,    0,    0,    1 ); 
+        return m;
+    }
+    
+    // PerspectiveRH creates a right-handed perspective projection matrix that can be
+    // used with the Oculus sample renderer. 
+    //  yfov   - Specifies vertical field of view in radians.
+    //  aspect - Screen aspect ration, which is usually width/height for square pixels.
+    //           Note that xfov = yfov * aspect.
+    //  znear  - Absolute value of near Z clipping clipping range.
+    //  zfar   - Absolute value of far  Z clipping clipping range (larger then near).
+    // Even though RHS usually looks in the direction of negative Z, positive values
+    // are expected for znear and zfar.
+    static Matrix4 PerspectiveRH(T yfov, T aspect, T znear, T zfar)
+    {
+        Matrix4 m;
+        T tanHalfFov = tan(yfov * T(0.5));
+
+        m.M[0][0] = T(1) / (aspect * tanHalfFov);
+        m.M[1][1] = T(1) / tanHalfFov;
+        m.M[2][2] = zfar / (znear - zfar);
+        m.M[3][2] = T(-1);
+        m.M[2][3] = (zfar * znear) / (znear - zfar);
+        m.M[3][3] = T(0);
+
+        // Note: Post-projection matrix result assumes Left-Handed coordinate system,
+        //       with Y up, X right and Z forward. This supports positive z-buffer values.
+        // This is the case even for RHS coordinate input.
+        return m;
+    }
+    
+    // PerspectiveLH creates a left-handed perspective projection matrix that can be
+    // used with the Oculus sample renderer. 
+    //  yfov   - Specifies vertical field of view in radians.
+    //  aspect - Screen aspect ration, which is usually width/height for square pixels.
+    //           Note that xfov = yfov * aspect.
+    //  znear  - Absolute value of near Z clipping clipping range.
+    //  zfar   - Absolute value of far  Z clipping clipping range (larger then near).
+    static Matrix4 PerspectiveLH(T yfov, T aspect, T znear, T zfar)
+    {
+        Matrix4 m;
+        T tanHalfFov = tan(yfov * T(0.5));
+
+        m.M[0][0] = T(1) / (aspect * tanHalfFov);
+        m.M[1][1] = T(1) / tanHalfFov;
+        //m.M[2][2] = zfar / (znear - zfar);
+         m.M[2][2] = zfar / (zfar - znear);
+        m.M[3][2] = T(-1);
+        m.M[2][3] = (zfar * znear) / (znear - zfar);
+        m.M[3][3] = T(0);
+
+        // Note: Post-projection matrix result assumes Left-Handed coordinate system,    
+        //       with Y up, X right and Z forward. This supports positive z-buffer values.
+        // This is the case even for RHS coordinate input. 
+        return m;
+    }
+
+    static Matrix4 Ortho2D(T w, T h)
+    {
+        Matrix4 m;
+        m.M[0][0] = T(2.0)/w;
+        m.M[1][1] = T(-2.0)/h;
+        m.M[0][3] = T(-1.0);
+        m.M[1][3] = T(1.0);
+        m.M[2][2] = T(0);
+        return m;
+    }
+};
+
+typedef Matrix4<float>  Matrix4f;
+typedef Matrix4<double> Matrix4d;
+
+//-------------------------------------------------------------------------------------
+// ***** Matrix3
+//
+// Matrix3 is a 3x3 matrix used for representing a rotation matrix.
+// The matrix is stored in row-major order in memory, meaning that values
+// of the first row are stored before the next one.
+//
+// The arrangement of the matrix is chosen to be in Right-Handed 
+// coordinate system and counterclockwise rotations when looking down
+// the axis
+//
+// Transformation Order:
+//   - Transformations are applied from right to left, so the expression
+//     M1 * M2 * M3 * V means that the vector V is transformed by M3 first,
+//     followed by M2 and M1. 
+//
+// Coordinate system: Right Handed
+//
+// Rotations: Counterclockwise when looking down the axis. All angles are in radians.
+
+template<class T>
+class Matrix3
+{
+public:
+    typedef T ElementType;
+    static const size_t Dimension = 3;
+
+    T M[3][3];
+
+    enum NoInitType { NoInit };
+
+    // Construct with no memory initialization.
+    Matrix3(NoInitType) { }
+
+    // By default, we construct identity matrix.
+    Matrix3()
+    {
+        M[0][0] = M[1][1] = M[2][2] = T(1);
+        M[0][1] = M[1][0] = M[2][0] = T(0);
+        M[0][2] = M[1][2] = M[2][1] = T(0);
+    }
+
+    Matrix3(T m11, T m12, T m13,
+            T m21, T m22, T m23,
+            T m31, T m32, T m33)
+    {
+        M[0][0] = m11; M[0][1] = m12; M[0][2] = m13;
+        M[1][0] = m21; M[1][1] = m22; M[1][2] = m23;
+        M[2][0] = m31; M[2][1] = m32; M[2][2] = m33;
+    }
+    
+    // Construction from X, Y, Z basis vectors
+    Matrix3(const Vector3<T>& xBasis, const Vector3<T>& yBasis, const Vector3<T>& zBasis)
+    {
+        M[0][0] = xBasis.x; M[0][1] = yBasis.x; M[0][2] = zBasis.x;
+        M[1][0] = xBasis.y; M[1][1] = yBasis.y; M[1][2] = zBasis.y;
+        M[2][0] = xBasis.z; M[2][1] = yBasis.z; M[2][2] = zBasis.z;
+    }
+
+    explicit Matrix3(const Quat<T>& q)
+    {
+        OVR_MATH_ASSERT(q.IsNormalized());
+        const T tx  = q.x+q.x,  ty  = q.y+q.y,  tz  = q.z+q.z;
+        const T twx = q.w*tx,   twy = q.w*ty,   twz = q.w*tz;
+        const T txx = q.x*tx,   txy = q.x*ty,   txz = q.x*tz;
+        const T tyy = q.y*ty,   tyz = q.y*tz,   tzz = q.z*tz;
+        M[0][0] = T(1) - (tyy + tzz);    M[0][1] = txy - twz;            M[0][2] = txz + twy;
+        M[1][0] = txy + twz;            M[1][1] = T(1) - (txx + tzz);    M[1][2] = tyz - twx;
+        M[2][0] = txz - twy;            M[2][1] = tyz + twx;            M[2][2] = T(1) - (txx + tyy);
+    }
+    
+    inline explicit Matrix3(T s)
+    {
+        M[0][0] = M[1][1] = M[2][2] = s;
+        M[0][1] = M[0][2] = M[1][0] = M[1][2] = M[2][0] = M[2][1] = T(0);
+    }
+
+    Matrix3(T m11, T m22, T m33)
+    {
+        M[0][0] = m11; M[0][1] = T(0); M[0][2] = T(0);
+        M[1][0] = T(0); M[1][1] = m22; M[1][2] = T(0);
+        M[2][0] = T(0); M[2][1] = T(0); M[2][2] = m33;
+    }
+
+    explicit Matrix3(const Matrix3<typename Math<T>::OtherFloatType> &src)
+    {
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                M[i][j] = (T)src.M[i][j];
+    }
+
+    // C-interop support.
+    Matrix3(const typename CompatibleTypes<Matrix3<T> >::Type& s) 
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix3), "sizeof(s) == sizeof(Matrix3)");
+        memcpy(M, s.M, sizeof(M));
+    }
+
+    operator const typename CompatibleTypes<Matrix3<T> >::Type () const
+    {
+        typename CompatibleTypes<Matrix3<T> >::Type result;
+        OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix3), "sizeof(result) == sizeof(Matrix3)");
+        memcpy(result.M, M, sizeof(M));
+        return result;
+    }
+
+    T  operator()(int i, int j) const { return M[i][j]; }
+    T& operator()(int i, int j)       { return M[i][j]; }
+
+    void ToString(char* dest, size_t destsize) const
+    {
+        size_t pos = 0;
+        for (int r=0; r<3; r++)
+        {
+            for (int c=0; c<3; c++)
+                pos += OVRMath_sprintf(dest+pos, destsize-pos, "%g ", M[r][c]);
+        }
+    }
+
+    static Matrix3 FromString(const char* src)
+    {
+        Matrix3 result;
+        if (src)
+        {
+            for (int r=0; r<3; r++)
+            {
+                for (int c=0; c<3; c++)
+                {
+                    result.M[r][c] = (T)atof(src);
+                    while (*src && *src != ' ')
+                        src++;
+                    while (*src && *src == ' ')
+                        src++;
+                }
+            }
+        }
+        return result;
+    }
+
+    static Matrix3 Identity()  { return Matrix3(); }
+
+    void SetIdentity()
+    {
+        M[0][0] = M[1][1] = M[2][2] = T(1);
+        M[0][1] = M[1][0] = M[2][0] = T(0);
+        M[0][2] = M[1][2] = M[2][1] = T(0);
+    }
+
+    static Matrix3 Diagonal(T m00, T m11, T m22)
+    {
+        return Matrix3(m00, 0, 0,
+            0, m11, 0,
+            0, 0, m22);
+    }
+    static Matrix3 Diagonal(const Vector3<T>& v) { return Diagonal(v.x, v.y, v.z); }
+
+    T Trace() const { return M[0][0] + M[1][1] + M[2][2]; }
+    
+    bool operator== (const Matrix3& b) const
+    {
+        bool isEqual = true;
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                isEqual &= (M[i][j] == b.M[i][j]);
+        }
+
+        return isEqual;
+    }
+
+    Matrix3 operator+ (const Matrix3& b) const
+    {
+        Matrix3<T> result(*this);
+        result += b;
+        return result;
+    }
+
+    Matrix3& operator+= (const Matrix3& b)
+    {
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                M[i][j] += b.M[i][j];
+        return *this;
+    }
+
+    void operator= (const Matrix3& b)
+    {
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                M[i][j] = b.M[i][j];
+    }
+
+    Matrix3 operator- (const Matrix3& b) const
+    {
+        Matrix3 result(*this);
+        result -= b;
+        return result;
+    }
+
+    Matrix3& operator-= (const Matrix3& b)
+    {
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                M[i][j] -= b.M[i][j];
+        }
+
+        return *this;
+    }
+
+    // Multiplies two matrices into destination with minimum copying.
+    static Matrix3& Multiply(Matrix3* d, const Matrix3& a, const Matrix3& b)
+    {
+        OVR_MATH_ASSERT((d != &a) && (d != &b));
+        int i = 0;
+        do {
+            d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0];
+            d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1];
+            d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2];
+        } while((++i) < 3);
+
+        return *d;
+    }
+
+    Matrix3 operator* (const Matrix3& b) const
+    {
+        Matrix3 result(Matrix3::NoInit);
+        Multiply(&result, *this, b);
+        return result;
+    }
+
+    Matrix3& operator*= (const Matrix3& b)
+    {
+        return Multiply(this, Matrix3(*this), b);
+    }
+
+    Matrix3 operator* (T s) const
+    {
+        Matrix3 result(*this);
+        result *= s;
+        return result;
+    }
+
+    Matrix3& operator*= (T s)
+    {
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                M[i][j] *= s;
+        }
+
+        return *this;
+    }
+
+    Vector3<T> operator* (const Vector3<T> &b) const
+    {
+        Vector3<T> result;
+        result.x = M[0][0]*b.x + M[0][1]*b.y + M[0][2]*b.z;
+        result.y = M[1][0]*b.x + M[1][1]*b.y + M[1][2]*b.z;
+        result.z = M[2][0]*b.x + M[2][1]*b.y + M[2][2]*b.z;
+
+        return result;
+    }
+
+    Matrix3 operator/ (T s) const
+    {
+        Matrix3 result(*this);
+        result /= s;
+        return result;
+    }
+
+    Matrix3& operator/= (T s)
+    {
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                M[i][j] /= s;
+        }
+
+        return *this;
+    }
+
+    Vector2<T> Transform(const Vector2<T>& v) const
+    {
+        const T rcpZ = T(1) / (M[2][0] * v.x + M[2][1] * v.y + M[2][2]);
+        return Vector2<T>((M[0][0] * v.x + M[0][1] * v.y + M[0][2]) * rcpZ,
+                          (M[1][0] * v.x + M[1][1] * v.y + M[1][2]) * rcpZ);
+    }
+
+    Vector3<T> Transform(const Vector3<T>& v) const
+    {
+        return Vector3<T>(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z,
+                          M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z,
+                          M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z);
+    }
+
+    Matrix3 Transposed() const
+    {
+        return Matrix3(M[0][0], M[1][0], M[2][0],
+                       M[0][1], M[1][1], M[2][1],
+                       M[0][2], M[1][2], M[2][2]);
+    }
+
+    void     Transpose()
+    {
+        *this = Transposed();
+    }
+
+
+    T SubDet (const size_t* rows, const size_t* cols) const
+    {
+        return M[rows[0]][cols[0]] * (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]])
+             - M[rows[0]][cols[1]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]])
+             + M[rows[0]][cols[2]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]);
+    }
+
+    
+    // M += a*b.t()
+    inline void Rank1Add(const Vector3<T> &a, const Vector3<T> &b)
+    {
+        M[0][0] += a.x*b.x;        M[0][1] += a.x*b.y;        M[0][2] += a.x*b.z;
+        M[1][0] += a.y*b.x;        M[1][1] += a.y*b.y;        M[1][2] += a.y*b.z;
+        M[2][0] += a.z*b.x;        M[2][1] += a.z*b.y;        M[2][2] += a.z*b.z;
+    }
+
+    // M -= a*b.t()
+    inline void Rank1Sub(const Vector3<T> &a, const Vector3<T> &b)
+    {
+        M[0][0] -= a.x*b.x;        M[0][1] -= a.x*b.y;        M[0][2] -= a.x*b.z;
+        M[1][0] -= a.y*b.x;        M[1][1] -= a.y*b.y;        M[1][2] -= a.y*b.z;
+        M[2][0] -= a.z*b.x;        M[2][1] -= a.z*b.y;        M[2][2] -= a.z*b.z;
+    }
+
+    inline Vector3<T> Col(int c) const
+    {
+        return Vector3<T>(M[0][c], M[1][c], M[2][c]);
+    }
+
+    inline Vector3<T> Row(int r) const
+    {
+        return Vector3<T>(M[r][0], M[r][1], M[r][2]);
+    }
+
+    inline Vector3<T> GetColumn(int c) const
+    {
+        return Vector3<T>(M[0][c], M[1][c], M[2][c]);
+    }
+
+    inline Vector3<T> GetRow(int r) const
+    {
+        return Vector3<T>(M[r][0], M[r][1], M[r][2]);
+    }
+
+    inline void SetColumn(int c, const Vector3<T>& v)
+    {
+        M[0][c] = v.x;
+        M[1][c] = v.y;
+        M[2][c] = v.z;
+    }
+
+    inline void SetRow(int r, const Vector3<T>& v)
+    {
+        M[r][0] = v.x;
+        M[r][1] = v.y;
+        M[r][2] = v.z;
+    }
+
+    inline T Determinant() const
+    {
+        const Matrix3<T>& m = *this;
+        T d; 
+
+        d  = m.M[0][0] * (m.M[1][1]*m.M[2][2] - m.M[1][2] * m.M[2][1]);
+        d -= m.M[0][1] * (m.M[1][0]*m.M[2][2] - m.M[1][2] * m.M[2][0]);
+        d += m.M[0][2] * (m.M[1][0]*m.M[2][1] - m.M[1][1] * m.M[2][0]);
+
+        return d;
+    }
+    
+    inline Matrix3<T> Inverse() const
+    {
+        Matrix3<T> a;
+        const  Matrix3<T>& m = *this;
+        T d = Determinant();
+
+        OVR_MATH_ASSERT(d != 0);
+        T s = T(1)/d;
+
+        a.M[0][0] = s * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]);   
+        a.M[1][0] = s * (m.M[1][2] * m.M[2][0] - m.M[1][0] * m.M[2][2]);   
+        a.M[2][0] = s * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]);   
+
+        a.M[0][1] = s * (m.M[0][2] * m.M[2][1] - m.M[0][1] * m.M[2][2]);   
+        a.M[1][1] = s * (m.M[0][0] * m.M[2][2] - m.M[0][2] * m.M[2][0]);   
+        a.M[2][1] = s * (m.M[0][1] * m.M[2][0] - m.M[0][0] * m.M[2][1]);   
+        
+        a.M[0][2] = s * (m.M[0][1] * m.M[1][2] - m.M[0][2] * m.M[1][1]);   
+        a.M[1][2] = s * (m.M[0][2] * m.M[1][0] - m.M[0][0] * m.M[1][2]);   
+        a.M[2][2] = s * (m.M[0][0] * m.M[1][1] - m.M[0][1] * m.M[1][0]);   
+        
+        return a;
+    }
+    
+    // Outer Product of two column vectors: a * b.Transpose()
+    static Matrix3 OuterProduct(const Vector3<T>& a, const Vector3<T>& b)
+    {
+        return Matrix3(a.x*b.x, a.x*b.y, a.x*b.z,
+                       a.y*b.x, a.y*b.y, a.y*b.z,
+                       a.z*b.x, a.z*b.y, a.z*b.z);
+    }
+
+    // Vector cross product as a premultiply matrix:
+    // L.Cross(R) = LeftCrossAsMatrix(L) * R
+    static Matrix3 LeftCrossAsMatrix(const Vector3<T>& L)
+    {
+        return Matrix3(
+            T(0), -L.z, +L.y,
+            +L.z, T(0), -L.x,
+            -L.y, +L.x, T(0));
+    }
+
+    // Vector cross product as a premultiply matrix:
+    // L.Cross(R) = RightCrossAsMatrix(R) * L
+    static Matrix3 RightCrossAsMatrix(const Vector3<T>& R)
+    {
+        return Matrix3(
+            T(0), +R.z, -R.y,
+            -R.z, T(0), +R.x,
+            +R.y, -R.x, T(0));
+    }
+
+    // Angle in radians of a rotation matrix
+    // Uses identity trace(a) = 2*cos(theta) + 1
+    T Angle() const
+    {
+        return Acos((Trace() - T(1)) * T(0.5));
+    }
+
+    // Angle in radians between two rotation matrices
+    T Angle(const Matrix3& b) const
+    {
+        // Compute trace of (this->Transposed() * b)
+        // This works out to sum of products of elements.
+        T trace = T(0);
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+            {
+                trace += M[i][j] * b.M[i][j];
+            }
+        }
+        return Acos((trace - T(1)) * T(0.5));
+    }
+};
+
+typedef Matrix3<float>  Matrix3f;
+typedef Matrix3<double> Matrix3d;
+
+//-------------------------------------------------------------------------------------
+// ***** Matrix2
+
+template<class T>
+class Matrix2
+{
+public:
+    typedef T ElementType;
+    static const size_t Dimension = 2;
+
+    T M[2][2];
+
+    enum NoInitType { NoInit };
+
+    // Construct with no memory initialization.
+    Matrix2(NoInitType) { }
+
+    // By default, we construct identity matrix.
+    Matrix2()
+    {
+        M[0][0] = M[1][1] = T(1);
+        M[0][1] = M[1][0] = T(0);
+    }
+
+    Matrix2(T m11, T m12,
+            T m21, T m22)
+    {
+        M[0][0] = m11; M[0][1] = m12;
+        M[1][0] = m21; M[1][1] = m22;
+    }
+
+    // Construction from X, Y basis vectors
+    Matrix2(const Vector2<T>& xBasis, const Vector2<T>& yBasis)
+    {
+        M[0][0] = xBasis.x; M[0][1] = yBasis.x;
+        M[1][0] = xBasis.y; M[1][1] = yBasis.y;
+    }
+
+    explicit Matrix2(T s)
+    {
+        M[0][0] = M[1][1] = s;
+        M[0][1] = M[1][0] = T(0);
+    }
+
+    Matrix2(T m11, T m22)
+    {
+        M[0][0] = m11; M[0][1] = T(0);
+        M[1][0] = T(0);   M[1][1] = m22;
+    }
+
+    explicit Matrix2(const Matrix2<typename Math<T>::OtherFloatType> &src)
+    {
+        M[0][0] = T(src.M[0][0]); M[0][1] = T(src.M[0][1]);
+        M[1][0] = T(src.M[1][0]); M[1][1] = T(src.M[1][1]);
+    }
+
+    // C-interop support
+    Matrix2(const typename CompatibleTypes<Matrix2<T> >::Type& s)
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix2), "sizeof(s) == sizeof(Matrix2)");
+        memcpy(M, s.M, sizeof(M));
+    }
+
+    operator const typename CompatibleTypes<Matrix2<T> >::Type() const
+    {
+        typename CompatibleTypes<Matrix2<T> >::Type result;
+        OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix2), "sizeof(result) == sizeof(Matrix2)");
+        memcpy(result.M, M, sizeof(M));
+        return result;
+    }
+
+    T  operator()(int i, int j) const { return M[i][j]; }
+    T& operator()(int i, int j)       { return M[i][j]; }
+    const T*  operator[](int i) const { return M[i]; }
+    T*  operator[](int i)             { return M[i]; }
+
+    static Matrix2 Identity()  { return Matrix2(); }
+
+    void SetIdentity()
+    {
+        M[0][0] = M[1][1] = T(1);
+        M[0][1] = M[1][0] = T(0);
+    }
+
+    static Matrix2 Diagonal(T m00, T m11)
+    {
+        return Matrix2(m00, m11);
+    }
+    static Matrix2 Diagonal(const Vector2<T>& v) { return Matrix2(v.x, v.y); }
+
+    T Trace() const { return M[0][0] + M[1][1]; }
+
+    bool operator== (const Matrix2& b) const
+    {
+        return M[0][0] == b.M[0][0] && M[0][1] == b.M[0][1] &&
+               M[1][0] == b.M[1][0] && M[1][1] == b.M[1][1];
+    }
+
+    Matrix2 operator+ (const Matrix2& b) const
+    {
+        return Matrix2(M[0][0] + b.M[0][0], M[0][1] + b.M[0][1],
+                       M[1][0] + b.M[1][0], M[1][1] + b.M[1][1]);
+    }
+
+    Matrix2& operator+= (const Matrix2& b)
+    {
+        M[0][0] += b.M[0][0]; M[0][1] += b.M[0][1];
+        M[1][0] += b.M[1][0]; M[1][1] += b.M[1][1];
+        return *this;
+    }
+
+    void operator= (const Matrix2& b)
+    {
+        M[0][0] = b.M[0][0]; M[0][1] = b.M[0][1];
+        M[1][0] = b.M[1][0]; M[1][1] = b.M[1][1];
+    }
+
+    Matrix2 operator- (const Matrix2& b) const
+    {
+        return Matrix2(M[0][0] - b.M[0][0], M[0][1] - b.M[0][1],
+                       M[1][0] - b.M[1][0], M[1][1] - b.M[1][1]);
+    }
+
+    Matrix2& operator-= (const Matrix2& b)
+    {
+        M[0][0] -= b.M[0][0]; M[0][1] -= b.M[0][1];
+        M[1][0] -= b.M[1][0]; M[1][1] -= b.M[1][1];
+        return *this;
+    }
+
+    Matrix2 operator* (const Matrix2& b) const
+    {
+        return Matrix2(M[0][0] * b.M[0][0] + M[0][1] * b.M[1][0], M[0][0] * b.M[0][1] + M[0][1] * b.M[1][1],
+                       M[1][0] * b.M[0][0] + M[1][1] * b.M[1][0], M[1][0] * b.M[0][1] + M[1][1] * b.M[1][1]);
+    }
+
+    Matrix2& operator*= (const Matrix2& b)
+    {
+        *this = *this * b;
+        return *this;
+    }
+
+    Matrix2 operator* (T s) const
+    {
+        return Matrix2(M[0][0] * s, M[0][1] * s,
+                       M[1][0] * s, M[1][1] * s);
+    }
+
+    Matrix2& operator*= (T s)
+    {
+        M[0][0] *= s; M[0][1] *= s;
+        M[1][0] *= s; M[1][1] *= s;
+        return *this;
+    }
+
+    Matrix2 operator/ (T s) const
+    {
+        return *this * (T(1) / s);
+    }
+
+    Matrix2& operator/= (T s)
+    {
+        return *this *= (T(1) / s);
+    }
+
+    Vector2<T> operator* (const Vector2<T> &b) const
+    {
+        return Vector2<T>(M[0][0] * b.x + M[0][1] * b.y,
+                          M[1][0] * b.x + M[1][1] * b.y);
+    }
+
+    Vector2<T> Transform(const Vector2<T>& v) const
+    {
+        return Vector2<T>(M[0][0] * v.x + M[0][1] * v.y,
+                          M[1][0] * v.x + M[1][1] * v.y);
+    }
+
+    Matrix2 Transposed() const
+    {
+        return Matrix2(M[0][0], M[1][0],
+                       M[0][1], M[1][1]);
+    }
+
+    void Transpose()
+    {
+        OVRMath_Swap(M[1][0], M[0][1]);
+    }
+
+    Vector2<T> GetColumn(int c) const
+    {
+        return Vector2<T>(M[0][c], M[1][c]);
+    }
+
+    Vector2<T> GetRow(int r) const
+    {
+        return Vector2<T>(M[r][0], M[r][1]);
+    }
+
+    void SetColumn(int c, const Vector2<T>& v)
+    {
+        M[0][c] = v.x;
+        M[1][c] = v.y;
+    }
+
+    void SetRow(int r, const Vector2<T>& v)
+    {
+        M[r][0] = v.x;
+        M[r][1] = v.y;
+    }
+
+    T Determinant() const
+    {
+        return M[0][0] * M[1][1] - M[0][1] * M[1][0];
+    }
+
+    Matrix2 Inverse() const
+    {
+        T rcpDet = T(1) / Determinant();
+        return Matrix2( M[1][1] * rcpDet, -M[0][1] * rcpDet,
+                       -M[1][0] * rcpDet,  M[0][0] * rcpDet);
+    }
+
+    // Outer Product of two column vectors: a * b.Transpose()
+    static Matrix2 OuterProduct(const Vector2<T>& a, const Vector2<T>& b)
+    {
+        return Matrix2(a.x*b.x, a.x*b.y,
+                       a.y*b.x, a.y*b.y);
+    }
+
+    // Angle in radians between two rotation matrices
+    T Angle(const Matrix2& b) const
+    {
+        const Matrix2& a = *this;
+        return Acos(a(0, 0)*b(0, 0) + a(1, 0)*b(1, 0));
+    }
+};
+
+typedef Matrix2<float>  Matrix2f;
+typedef Matrix2<double> Matrix2d;
+
+//-------------------------------------------------------------------------------------
+
+template<class T>
+class SymMat3
+{
+private:
+    typedef SymMat3<T> this_type;
+
+public:
+    typedef T Value_t;
+    // Upper symmetric
+    T v[6]; // _00 _01 _02 _11 _12 _22
+
+    inline SymMat3() {}
+
+    inline explicit SymMat3(T s)
+    {
+        v[0] = v[3] = v[5] = s;
+        v[1] = v[2] = v[4] = T(0);
+    }
+
+    inline explicit SymMat3(T a00, T a01, T a02, T a11, T a12, T a22)
+    {
+        v[0] = a00; v[1] = a01; v[2] = a02;
+        v[3] = a11; v[4] = a12;
+        v[5] = a22;
+    }
+
+    // Cast to symmetric Matrix3
+    operator Matrix3<T>() const
+    {
+        return Matrix3<T>(v[0], v[1], v[2],
+                          v[1], v[3], v[4],
+                          v[2], v[4], v[5]);
+    }
+
+    static inline int Index(unsigned int i, unsigned int j)
+    {
+        return (i <= j) ? (3*i - i*(i+1)/2 + j) : (3*j - j*(j+1)/2 + i);
+    }
+
+    inline T operator()(int i, int j) const { return v[Index(i,j)]; }
+    
+    inline T &operator()(int i, int j) { return v[Index(i,j)]; }
+
+    inline this_type& operator+=(const this_type& b)
+    {
+        v[0]+=b.v[0];
+        v[1]+=b.v[1];
+        v[2]+=b.v[2];
+        v[3]+=b.v[3];
+        v[4]+=b.v[4];
+        v[5]+=b.v[5];
+        return *this;
+    }
+
+    inline this_type& operator-=(const this_type& b)
+    {
+        v[0]-=b.v[0];
+        v[1]-=b.v[1];
+        v[2]-=b.v[2];
+        v[3]-=b.v[3];
+        v[4]-=b.v[4];
+        v[5]-=b.v[5];
+
+        return *this;
+    }
+
+    inline this_type& operator*=(T s)
+    {
+        v[0]*=s;
+        v[1]*=s;
+        v[2]*=s;
+        v[3]*=s;
+        v[4]*=s;
+        v[5]*=s;
+
+        return *this;
+    }
+        
+    inline SymMat3 operator*(T s) const
+    {
+        SymMat3 d;
+        d.v[0] = v[0]*s; 
+        d.v[1] = v[1]*s; 
+        d.v[2] = v[2]*s; 
+        d.v[3] = v[3]*s; 
+        d.v[4] = v[4]*s; 
+        d.v[5] = v[5]*s; 
+                        
+        return d;
+    }
+
+    // Multiplies two matrices into destination with minimum copying.
+    static SymMat3& Multiply(SymMat3* d, const SymMat3& a, const SymMat3& b)
+    {        
+        // _00 _01 _02 _11 _12 _22
+
+        d->v[0] = a.v[0] * b.v[0];
+        d->v[1] = a.v[0] * b.v[1] + a.v[1] * b.v[3];
+        d->v[2] = a.v[0] * b.v[2] + a.v[1] * b.v[4];
+                    
+        d->v[3] = a.v[3] * b.v[3];
+        d->v[4] = a.v[3] * b.v[4] + a.v[4] * b.v[5];
+                
+        d->v[5] = a.v[5] * b.v[5];
+    
+        return *d;
+    }
+    
+    inline T Determinant() const
+    {
+        const this_type& m = *this;
+        T d; 
+
+        d  = m(0,0) * (m(1,1)*m(2,2) - m(1,2) * m(2,1));
+        d -= m(0,1) * (m(1,0)*m(2,2) - m(1,2) * m(2,0));
+        d += m(0,2) * (m(1,0)*m(2,1) - m(1,1) * m(2,0));
+
+        return d;
+    }
+
+    inline this_type Inverse() const
+    {
+        this_type a;
+        const this_type& m = *this;
+        T d = Determinant();
+
+        OVR_MATH_ASSERT(d != 0);
+        T s = T(1)/d;
+
+        a(0,0) = s * (m(1,1) * m(2,2) - m(1,2) * m(2,1));   
+
+        a(0,1) = s * (m(0,2) * m(2,1) - m(0,1) * m(2,2));   
+        a(1,1) = s * (m(0,0) * m(2,2) - m(0,2) * m(2,0));   
+
+        a(0,2) = s * (m(0,1) * m(1,2) - m(0,2) * m(1,1));   
+        a(1,2) = s * (m(0,2) * m(1,0) - m(0,0) * m(1,2));   
+        a(2,2) = s * (m(0,0) * m(1,1) - m(0,1) * m(1,0));   
+
+        return a;
+    }
+
+    inline T Trace() const { return v[0] + v[3] + v[5]; }
+
+    // M = a*a.t()
+    inline void Rank1(const Vector3<T> &a)
+    {
+        v[0] = a.x*a.x; v[1] = a.x*a.y; v[2] = a.x*a.z;
+        v[3] = a.y*a.y; v[4] = a.y*a.z;
+        v[5] = a.z*a.z;
+    }
+
+    // M += a*a.t()
+    inline void Rank1Add(const Vector3<T> &a)
+    {
+        v[0] += a.x*a.x; v[1] += a.x*a.y; v[2] += a.x*a.z;
+        v[3] += a.y*a.y; v[4] += a.y*a.z;
+        v[5] += a.z*a.z;
+    }
+
+    // M -= a*a.t()
+    inline void Rank1Sub(const Vector3<T> &a)
+    {
+        v[0] -= a.x*a.x; v[1] -= a.x*a.y; v[2] -= a.x*a.z;
+        v[3] -= a.y*a.y; v[4] -= a.y*a.z;
+        v[5] -= a.z*a.z;
+    }
+};
+
+typedef SymMat3<float>  SymMat3f;
+typedef SymMat3<double> SymMat3d;
+
+template<class T>
+inline Matrix3<T> operator*(const SymMat3<T>& a, const SymMat3<T>& b)
+{
+    #define AJB_ARBC(r,c) (a(r,0)*b(0,c)+a(r,1)*b(1,c)+a(r,2)*b(2,c))
+    return Matrix3<T>(
+        AJB_ARBC(0,0), AJB_ARBC(0,1), AJB_ARBC(0,2),
+        AJB_ARBC(1,0), AJB_ARBC(1,1), AJB_ARBC(1,2),
+        AJB_ARBC(2,0), AJB_ARBC(2,1), AJB_ARBC(2,2));
+    #undef AJB_ARBC
+}
+
+template<class T>
+inline Matrix3<T> operator*(const Matrix3<T>& a, const SymMat3<T>& b)
+{
+    #define AJB_ARBC(r,c) (a(r,0)*b(0,c)+a(r,1)*b(1,c)+a(r,2)*b(2,c))
+    return Matrix3<T>(
+        AJB_ARBC(0,0), AJB_ARBC(0,1), AJB_ARBC(0,2),
+        AJB_ARBC(1,0), AJB_ARBC(1,1), AJB_ARBC(1,2),
+        AJB_ARBC(2,0), AJB_ARBC(2,1), AJB_ARBC(2,2));
+    #undef AJB_ARBC
+}
+
+//-------------------------------------------------------------------------------------
+// ***** Angle
+
+// Cleanly representing the algebra of 2D rotations.
+// The operations maintain the angle between -Pi and Pi, the same range as atan2.
+
+template<class T>
+class Angle
+{
+public:
+    enum AngularUnits
+    {
+        Radians = 0,
+        Degrees = 1
+    };
+
+    Angle() : a(0) {}
+    
+    // Fix the range to be between -Pi and Pi
+    Angle(T a_, AngularUnits u = Radians) : a((u == Radians) ? a_ : a_*((T)MATH_DOUBLE_DEGREETORADFACTOR)) { FixRange(); }
+
+    T    Get(AngularUnits u = Radians) const       { return (u == Radians) ? a : a*((T)MATH_DOUBLE_RADTODEGREEFACTOR); }
+    void Set(const T& x, AngularUnits u = Radians) { a = (u == Radians) ? x : x*((T)MATH_DOUBLE_DEGREETORADFACTOR); FixRange(); }
+    int Sign() const                               { if (a == 0) return 0; else return (a > 0) ? 1 : -1; }
+    T   Abs() const                                { return (a >= 0) ? a : -a; }
+
+    bool operator== (const Angle& b) const    { return a == b.a; }
+    bool operator!= (const Angle& b) const    { return a != b.a; }
+//    bool operator<  (const Angle& b) const    { return a < a.b; } 
+//    bool operator>  (const Angle& b) const    { return a > a.b; } 
+//    bool operator<= (const Angle& b) const    { return a <= a.b; } 
+//    bool operator>= (const Angle& b) const    { return a >= a.b; } 
+//    bool operator= (const T& x)               { a = x; FixRange(); }
+
+    // These operations assume a is already between -Pi and Pi.
+    Angle& operator+= (const Angle& b)        { a = a + b.a; FastFixRange(); return *this; }
+    Angle& operator+= (const T& x)            { a = a + x; FixRange(); return *this; }
+    Angle  operator+  (const Angle& b) const  { Angle res = *this; res += b; return res; }
+    Angle  operator+  (const T& x) const      { Angle res = *this; res += x; return res; }
+    Angle& operator-= (const Angle& b)        { a = a - b.a; FastFixRange(); return *this; }
+    Angle& operator-= (const T& x)            { a = a - x; FixRange(); return *this; }
+    Angle  operator-  (const Angle& b) const  { Angle res = *this; res -= b; return res; }
+    Angle  operator-  (const T& x) const      { Angle res = *this; res -= x; return res; }
+    
+    T   Distance(const Angle& b)              { T c = fabs(a - b.a); return (c <= ((T)MATH_DOUBLE_PI)) ? c : ((T)MATH_DOUBLE_TWOPI) - c; }
+
+private:
+
+    // The stored angle, which should be maintained between -Pi and Pi
+    T a;
+
+    // Fixes the angle range to [-Pi,Pi], but assumes no more than 2Pi away on either side 
+    inline void FastFixRange()
+    {
+        if (a < -((T)MATH_DOUBLE_PI))
+            a += ((T)MATH_DOUBLE_TWOPI);
+        else if (a > ((T)MATH_DOUBLE_PI))
+            a -= ((T)MATH_DOUBLE_TWOPI);
+    }
+
+    // Fixes the angle range to [-Pi,Pi] for any given range, but slower then the fast method
+    inline void FixRange()
+    {
+        // do nothing if the value is already in the correct range, since fmod call is expensive
+        if (a >= -((T)MATH_DOUBLE_PI) && a <= ((T)MATH_DOUBLE_PI))
+            return;
+        a = fmod(a,((T)MATH_DOUBLE_TWOPI));
+        if (a < -((T)MATH_DOUBLE_PI))
+            a += ((T)MATH_DOUBLE_TWOPI);
+        else if (a > ((T)MATH_DOUBLE_PI))
+            a -= ((T)MATH_DOUBLE_TWOPI);
+    }
+};
+
+
+typedef Angle<float>  Anglef;
+typedef Angle<double> Angled;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Plane
+
+// Consists of a normal vector and distance from the origin where the plane is located.
+
+template<class T>
+class Plane
+{
+public:
+    Vector3<T> N;
+    T          D;
+
+    Plane() : D(0) {}
+
+    // Normals must already be normalized
+    Plane(const Vector3<T>& n, T d) : N(n), D(d) {}
+    Plane(T x, T y, T z, T d) : N(x,y,z), D(d) {}
+
+    // construct from a point on the plane and the normal
+    Plane(const Vector3<T>& p, const Vector3<T>& n) : N(n), D(-(p * n)) {}
+
+    // Find the point to plane distance. The sign indicates what side of the plane the point is on (0 = point on plane).
+    T TestSide(const Vector3<T>& p) const
+    {
+        return (N.Dot(p)) + D;
+    }
+
+    Plane<T> Flipped() const
+    {
+        return Plane(-N, -D);
+    }
+
+    void Flip()
+    {
+        N = -N;
+        D = -D;
+    }
+
+    bool operator==(const Plane<T>& rhs) const
+    {
+        return (this->D == rhs.D && this->N == rhs.N);
+    }
+};
+
+typedef Plane<float> Planef;
+typedef Plane<double> Planed;
+
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** ScaleAndOffset2D
+
+struct ScaleAndOffset2D
+{
+    Vector2f Scale;
+    Vector2f Offset;
+
+    ScaleAndOffset2D(float sx = 0.0f, float sy = 0.0f, float ox = 0.0f, float oy = 0.0f)
+        : Scale(sx, sy), Offset(ox, oy)        
+    { }
+};
+
+
+//-----------------------------------------------------------------------------------
+// ***** FovPort
+
+// FovPort describes Field Of View (FOV) of a viewport.
+// This class has values for up, down, left and right, stored in 
+// tangent of the angle units to simplify calculations.
+//
+// As an example, for a standard 90 degree vertical FOV, we would 
+// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }.
+//
+// CreateFromRadians/Degrees helper functions can be used to
+// access FOV in different units.
+
+
+// ***** FovPort
+
+struct FovPort
+{
+    float UpTan;
+    float DownTan;
+    float LeftTan;
+    float RightTan;
+
+    FovPort ( float sideTan = 0.0f ) :
+        UpTan(sideTan), DownTan(sideTan), LeftTan(sideTan), RightTan(sideTan) { }
+    FovPort ( float u, float d, float l, float r ) :
+        UpTan(u), DownTan(d), LeftTan(l), RightTan(r) { }
+
+    // C-interop support: FovPort <-> ovrFovPort (implementation in OVR_CAPI.cpp).
+    FovPort(const ovrFovPort &src)
+        : UpTan(src.UpTan), DownTan(src.DownTan), LeftTan(src.LeftTan), RightTan(src.RightTan)
+    { }    
+
+    operator ovrFovPort () const
+    {
+        ovrFovPort result;
+        result.LeftTan  = LeftTan;
+        result.RightTan = RightTan;
+        result.UpTan    = UpTan;
+        result.DownTan  = DownTan;
+        return result;
+    }
+
+    static FovPort CreateFromRadians(float horizontalFov, float verticalFov)
+    {
+        FovPort result;
+        result.UpTan    = tanf (   verticalFov * 0.5f );
+        result.DownTan  = tanf (   verticalFov * 0.5f );
+        result.LeftTan  = tanf ( horizontalFov * 0.5f );
+        result.RightTan = tanf ( horizontalFov * 0.5f );
+        return result;
+    }
+
+    static FovPort CreateFromDegrees(float horizontalFovDegrees,
+                                     float verticalFovDegrees)
+    {
+        return CreateFromRadians(DegreeToRad(horizontalFovDegrees),
+                                 DegreeToRad(verticalFovDegrees));
+    }
+
+    //  Get Horizontal/Vertical components of Fov in radians.
+    float GetVerticalFovRadians() const     { return atanf(UpTan)    + atanf(DownTan); }
+    float GetHorizontalFovRadians() const   { return atanf(LeftTan)  + atanf(RightTan); }
+    //  Get Horizontal/Vertical components of Fov in degrees.
+    float GetVerticalFovDegrees() const     { return RadToDegree(GetVerticalFovRadians()); }
+    float GetHorizontalFovDegrees() const   { return RadToDegree(GetHorizontalFovRadians()); }
+
+    // Compute maximum tangent value among all four sides.
+    float GetMaxSideTan() const
+    {
+        return OVRMath_Max(OVRMath_Max(UpTan, DownTan), OVRMath_Max(LeftTan, RightTan));
+    }
+
+    static ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov ( FovPort tanHalfFov )
+    {
+        float projXScale = 2.0f / ( tanHalfFov.LeftTan + tanHalfFov.RightTan );
+        float projXOffset = ( tanHalfFov.LeftTan - tanHalfFov.RightTan ) * projXScale * 0.5f;
+        float projYScale = 2.0f / ( tanHalfFov.UpTan + tanHalfFov.DownTan );
+        float projYOffset = ( tanHalfFov.UpTan - tanHalfFov.DownTan ) * projYScale * 0.5f;
+
+        ScaleAndOffset2D result;
+        result.Scale    = Vector2f(projXScale, projYScale);
+        result.Offset   = Vector2f(projXOffset, projYOffset);
+        // Hey - why is that Y.Offset negated?
+        // It's because a projection matrix transforms from world coords with Y=up,
+        // whereas this is from NDC which is Y=down.
+
+        return result;
+    }
+
+    // Converts Fov Tan angle units to [-1,1] render target NDC space
+    Vector2f TanAngleToRendertargetNDC(Vector2f const &tanEyeAngle)
+    {  
+        ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(*this);
+        return tanEyeAngle * eyeToSourceNDC.Scale + eyeToSourceNDC.Offset;
+    }
+
+    // Compute per-channel minimum and maximum of Fov.
+    static FovPort Min(const FovPort& a, const FovPort& b)
+    {   
+        FovPort fov( OVRMath_Min( a.UpTan   , b.UpTan    ),   
+                     OVRMath_Min( a.DownTan , b.DownTan  ),
+                     OVRMath_Min( a.LeftTan , b.LeftTan  ),
+                     OVRMath_Min( a.RightTan, b.RightTan ) );
+        return fov;
+    }
+
+    static FovPort Max(const FovPort& a, const FovPort& b)
+    {   
+        FovPort fov( OVRMath_Max( a.UpTan   , b.UpTan    ),   
+                     OVRMath_Max( a.DownTan , b.DownTan  ),
+                     OVRMath_Max( a.LeftTan , b.LeftTan  ),
+                     OVRMath_Max( a.RightTan, b.RightTan ) );
+        return fov;
+    }
+};
+
+
+} // Namespace OVR
+
+
+#if defined(_MSC_VER)
+    #pragma warning(pop)
+#endif
+
+
+#endif

+ 70 - 0
src/external/OculusSDK/LibOVR/Include/Extras/OVR_StereoProjection.h

@@ -0,0 +1,70 @@
+/************************************************************************************
+
+Filename    :   OVR_StereoProjection.h
+Content     :   Stereo projection functions
+Created     :   November 30, 2013
+Authors     :   Tom Fosyth
+
+Copyright   :   Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); 
+you may not use the Oculus VR Rift SDK except in compliance with the License, 
+which is provided at the time of installation or download, or which 
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.3 
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK 
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef OVR_StereoProjection_h
+#define OVR_StereoProjection_h
+
+
+#include "Extras/OVR_Math.h"
+
+
+namespace OVR {
+
+
+//-----------------------------------------------------------------------------------
+// ***** Stereo Enumerations
+
+// StereoEye specifies which eye we are rendering for; it is used to
+// retrieve StereoEyeParams.
+enum StereoEye
+{
+    StereoEye_Left,
+    StereoEye_Right,
+    StereoEye_Center
+};
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Propjection functions
+
+Matrix4f            CreateProjection ( bool rightHanded, bool isOpenGL, FovPort fov, StereoEye eye,
+                                       float zNear = 0.01f, float zFar = 10000.0f,
+                                       bool flipZ = false, bool farAtInfinity = false);
+
+Matrix4f            CreateOrthoSubProjection ( bool rightHanded, StereoEye eyeType,
+                                               float tanHalfFovX, float tanHalfFovY,
+                                               float unitsX, float unitsY, float distanceFromCamera,
+                                               float interpupillaryDistance, Matrix4f const &projection,
+                                               float zNear = 0.0f, float zFar = 0.0f,
+                                               bool flipZ = false, bool farAtInfinity = false);
+
+ScaleAndOffset2D    CreateNDCScaleAndOffsetFromFov ( FovPort fov );
+
+
+} //namespace OVR
+
+#endif // OVR_StereoProjection_h

+ 2116 - 0
src/external/OculusSDK/LibOVR/Include/OVR_CAPI.h

@@ -0,0 +1,2116 @@
+/********************************************************************************//**
+\file      OVR_CAPI.h
+\brief     C Interface to the Oculus PC SDK tracking and rendering library.
+\copyright Copyright 2014 Oculus VR, LLC All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_h  //   We don't use version numbers within this name, as all versioned variations of this file are currently mutually exclusive.
+#define OVR_CAPI_h  ///< Header include guard
+
+
+#include "OVR_CAPI_Keys.h"
+#include "OVR_Version.h"
+#include "OVR_ErrorCode.h"
+
+
+#include <stdint.h>
+
+#if defined(_MSC_VER)
+    #pragma warning(push)
+    #pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+    #pragma warning(disable: 4359) // The alignment specified for a type is less than the alignment of the type of one of its data members
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_OS
+//
+#if !defined(OVR_OS_WIN32) && defined(_WIN32)
+    #define OVR_OS_WIN32
+#endif
+
+#if !defined(OVR_OS_MAC) && defined(__APPLE__)
+    #define OVR_OS_MAC
+#endif
+
+#if !defined(OVR_OS_LINUX) && defined(__linux__)
+    #define OVR_OS_LINUX
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_CPP
+//
+#if !defined(OVR_CPP)
+    #if defined(__cplusplus)
+        #define OVR_CPP(x) x
+    #else
+        #define OVR_CPP(x) /* Not C++ */
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_CDECL
+//
+/// LibOVR calling convention for 32-bit Windows builds.
+//
+#if !defined(OVR_CDECL)
+    #if defined(_WIN32)
+        #define OVR_CDECL __cdecl
+    #else
+        #define OVR_CDECL
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_EXTERN_C
+//
+/// Defined as extern "C" when built from C++ code.
+//
+#if !defined(OVR_EXTERN_C)
+    #ifdef __cplusplus
+        #define OVR_EXTERN_C extern "C"
+    #else
+        #define OVR_EXTERN_C
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_PUBLIC_FUNCTION / OVR_PRIVATE_FUNCTION
+//
+// OVR_PUBLIC_FUNCTION  - Functions that externally visible from a shared library. Corresponds to Microsoft __dllexport.
+// OVR_PUBLIC_CLASS     - C++ structs and classes that are externally visible from a shared library. Corresponds to Microsoft __dllexport.
+// OVR_PRIVATE_FUNCTION - Functions that are not visible outside of a shared library. They are private to the shared library.
+// OVR_PRIVATE_CLASS    - C++ structs and classes that are not visible outside of a shared library. They are private to the shared library.
+//
+// OVR_DLL_BUILD        - Used to indicate that the current compilation unit is of a shared library.
+// OVR_DLL_IMPORT       - Used to indicate that the current compilation unit is a user of the corresponding shared library.
+// OVR_STATIC_BUILD     - used to indicate that the current compilation unit is not a shared library but rather statically linked code.
+//
+#if !defined(OVR_PUBLIC_FUNCTION)
+    #if defined(OVR_DLL_BUILD)
+        #if defined(_WIN32)
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllexport) rval OVR_CDECL
+            #define OVR_PUBLIC_CLASS          __declspec(dllexport)
+            #define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL
+            #define OVR_PRIVATE_CLASS
+        #else
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __attribute__((visibility("default"))) rval OVR_CDECL /* Requires GCC 4.0+ */
+            #define OVR_PUBLIC_CLASS          __attribute__((visibility("default"))) /* Requires GCC 4.0+ */
+            #define OVR_PRIVATE_FUNCTION(rval) __attribute__((visibility("hidden"))) rval OVR_CDECL
+            #define OVR_PRIVATE_CLASS         __attribute__((visibility("hidden")))
+        #endif
+    #elif defined(OVR_DLL_IMPORT)
+        #if defined(_WIN32)
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllimport) rval OVR_CDECL
+            #define OVR_PUBLIC_CLASS          __declspec(dllimport)
+        #else
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C rval OVR_CDECL
+            #define OVR_PUBLIC_CLASS
+        #endif
+        #define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL
+        #define OVR_PRIVATE_CLASS
+    #else // OVR_STATIC_BUILD
+        #define OVR_PUBLIC_FUNCTION(rval)     OVR_EXTERN_C rval OVR_CDECL
+        #define OVR_PUBLIC_CLASS
+        #define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL
+        #define OVR_PRIVATE_CLASS
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_EXPORT
+//
+/// Provided for backward compatibility with older versions of this library.
+//
+#if !defined(OVR_EXPORT)
+    #ifdef OVR_OS_WIN32
+        #define OVR_EXPORT __declspec(dllexport)
+    #else
+        #define OVR_EXPORT
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_ALIGNAS
+//
+#if !defined(OVR_ALIGNAS)
+    #if defined(__GNUC__) || defined(__clang__)
+        #define OVR_ALIGNAS(n) __attribute__((aligned(n)))
+    #elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
+        #define OVR_ALIGNAS(n) __declspec(align(n))
+    #elif defined(__CC_ARM)
+        #define OVR_ALIGNAS(n) __align(n)
+    #else
+        #error Need to define OVR_ALIGNAS
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_CC_HAS_FEATURE
+//
+// This is a portable way to use compile-time feature identification available
+// with some compilers in a clean way. Direct usage of __has_feature in preprocessing
+// statements of non-supporting compilers results in a preprocessing error.
+//
+// Example usage:
+//     #if OVR_CC_HAS_FEATURE(is_pod)
+//         if(__is_pod(T)) // If the type is plain data then we can safely memcpy it.
+//             memcpy(&destObject, &srcObject, sizeof(object));
+//     #endif
+//
+#if !defined(OVR_CC_HAS_FEATURE)
+    #if defined(__clang__) // http://clang.llvm.org/docs/LanguageExtensions.html#id2
+        #define OVR_CC_HAS_FEATURE(x) __has_feature(x)
+    #else
+        #define OVR_CC_HAS_FEATURE(x) 0
+    #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// ***** OVR_STATIC_ASSERT
+//
+// Portable support for C++11 static_assert().
+// Acts as if the following were declared:
+//     void OVR_STATIC_ASSERT(bool const_expression, const char* msg);
+//
+// Example usage:
+//     OVR_STATIC_ASSERT(sizeof(int32_t) == 4, "int32_t expected to be 4 bytes.");
+
+#if !defined(OVR_STATIC_ASSERT)
+    #if !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* Other */ && \
+        !(defined(__GXX_EXPERIMENTAL_CXX0X__)) /* GCC */ && \
+        !(defined(__clang__) && defined(__cplusplus) && OVR_CC_HAS_FEATURE(cxx_static_assert)) /* clang */ && \
+        !(defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(__cplusplus)) /* VS2010+  */
+
+        #if !defined(OVR_SA_UNUSED)
+        #if defined(OVR_CC_GNU) || defined(OVR_CC_CLANG)
+            #define OVR_SA_UNUSED __attribute__((unused))
+        #else
+            #define OVR_SA_UNUSED
+        #endif
+        #define OVR_SA_PASTE(a,b) a##b
+        #define OVR_SA_HELP(a,b)  OVR_SA_PASTE(a,b)
+        #endif
+
+        #if defined(__COUNTER__)
+            #define OVR_STATIC_ASSERT(expression, msg) typedef char OVR_SA_HELP(compileTimeAssert, __COUNTER__) [((expression) != 0) ? 1 : -1] OVR_SA_UNUSED
+        #else
+            #define OVR_STATIC_ASSERT(expression, msg) typedef char OVR_SA_HELP(compileTimeAssert, __LINE__) [((expression) != 0) ? 1 : -1] OVR_SA_UNUSED
+        #endif
+
+    #else
+        #define OVR_STATIC_ASSERT(expression, msg) static_assert(expression, msg)
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** Padding
+//
+/// Defines explicitly unused space for a struct.
+/// When used correcly, usage of this macro should not change the size of the struct.
+/// Compile-time and runtime behavior with and without this defined should be identical.
+///
+#if !defined(OVR_UNUSED_STRUCT_PAD)
+    #define OVR_UNUSED_STRUCT_PAD(padName, size) char padName[size];
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** Word Size
+//
+/// Specifies the size of a pointer on the given platform.
+///
+#if !defined(OVR_PTR_SIZE)
+    #if defined(__WORDSIZE)
+        #define OVR_PTR_SIZE ((__WORDSIZE) / 8)
+    #elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || defined(__ia64__) || defined(__arch64__) || defined(__64BIT__) || defined(__Ptr_Is_64)
+        #define OVR_PTR_SIZE 8
+    #elif defined(__CC_ARM) && (__sizeof_ptr == 8)
+        #define OVR_PTR_SIZE 8
+    #else
+        #define OVR_PTR_SIZE 4
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_ON32 / OVR_ON64
+//
+#if OVR_PTR_SIZE == 8
+    #define OVR_ON32(x)
+    #define OVR_ON64(x) x
+#else
+    #define OVR_ON32(x) x
+    #define OVR_ON64(x)
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** ovrBool
+
+typedef char ovrBool;   ///< Boolean type
+#define ovrFalse 0      ///< ovrBool value of false.
+#define ovrTrue  1      ///< ovrBool value of true.
+
+
+//-----------------------------------------------------------------------------------
+// ***** Simple Math Structures
+
+/// A 2D vector with integer components.
+typedef struct OVR_ALIGNAS(4) ovrVector2i_
+{
+    int x, y;
+} ovrVector2i;
+
+/// A 2D size with integer components.
+typedef struct OVR_ALIGNAS(4) ovrSizei_
+{
+    int w, h;
+} ovrSizei;
+
+/// A 2D rectangle with a position and size.
+/// All components are integers.
+typedef struct OVR_ALIGNAS(4) ovrRecti_
+{
+    ovrVector2i Pos;
+    ovrSizei    Size;
+} ovrRecti;
+
+/// A quaternion rotation.
+typedef struct OVR_ALIGNAS(4) ovrQuatf_
+{
+    float x, y, z, w;
+} ovrQuatf;
+
+/// A 2D vector with float components.
+typedef struct OVR_ALIGNAS(4) ovrVector2f_
+{
+    float x, y;
+} ovrVector2f;
+
+/// A 3D vector with float components.
+typedef struct OVR_ALIGNAS(4) ovrVector3f_
+{
+    float x, y, z;
+} ovrVector3f;
+
+/// A 4x4 matrix with float elements.
+typedef struct OVR_ALIGNAS(4) ovrMatrix4f_
+{
+    float M[4][4];
+} ovrMatrix4f;
+
+
+/// Position and orientation together.
+typedef struct OVR_ALIGNAS(4) ovrPosef_
+{
+    ovrQuatf     Orientation;
+    ovrVector3f  Position;
+} ovrPosef;
+
+/// A full pose (rigid body) configuration with first and second derivatives.
+///
+/// Body refers to any object for which ovrPoseStatef is providing data.
+/// It can be the HMD, Touch controller, sensor or something else. The context 
+/// depends on the usage of the struct.
+typedef struct OVR_ALIGNAS(8) ovrPoseStatef_
+{
+    ovrPosef     ThePose;               ///< Position and orientation.
+    ovrVector3f  AngularVelocity;       ///< Angular velocity in radians per second.
+    ovrVector3f  LinearVelocity;        ///< Velocity in meters per second.
+    ovrVector3f  AngularAcceleration;   ///< Angular acceleration in radians per second per second.
+    ovrVector3f  LinearAcceleration;    ///< Acceleration in meters per second per second.
+    OVR_UNUSED_STRUCT_PAD(pad0, 4)      ///< \internal struct pad.
+    double       TimeInSeconds;         ///< Absolute time that this pose refers to. \see ovr_GetTimeInSeconds
+} ovrPoseStatef;
+
+/// Describes the up, down, left, and right angles of the field of view.
+///
+/// Field Of View (FOV) tangent of the angle units.
+/// \note For a standard 90 degree vertical FOV, we would
+/// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }.
+typedef struct OVR_ALIGNAS(4) ovrFovPort_
+{
+    float UpTan;    ///< The tangent of the angle between the viewing vector and the top edge of the field of view.
+    float DownTan;  ///< The tangent of the angle between the viewing vector and the bottom edge of the field of view.
+    float LeftTan;  ///< The tangent of the angle between the viewing vector and the left edge of the field of view.
+    float RightTan; ///< The tangent of the angle between the viewing vector and the right edge of the field of view.
+} ovrFovPort;
+
+
+//-----------------------------------------------------------------------------------
+// ***** HMD Types
+
+/// Enumerates all HMD types that we support.
+///
+/// The currently released developer kits are ovrHmd_DK1 and ovrHmd_DK2. The other enumerations are for internal use only.
+typedef enum ovrHmdType_
+{
+    ovrHmd_None      = 0,
+    ovrHmd_DK1       = 3,
+    ovrHmd_DKHD      = 4,
+    ovrHmd_DK2       = 6,
+    ovrHmd_CB        = 8,
+    ovrHmd_Other     = 9,
+    ovrHmd_E3_2015   = 10,
+    ovrHmd_ES06      = 11,
+    ovrHmd_ES09      = 12,
+    ovrHmd_ES11      = 13,
+    ovrHmd_CV1       = 14,
+
+    ovrHmd_EnumSize  = 0x7fffffff ///< \internal Force type int32_t.
+} ovrHmdType;
+
+
+/// HMD capability bits reported by device.
+///
+typedef enum ovrHmdCaps_
+{
+    // Read-only flags
+    ovrHmdCap_DebugDevice             = 0x0010,   ///< <B>(read only)</B> Specifies that the HMD is a virtual debug device.
+
+
+    ovrHmdCap_EnumSize            = 0x7fffffff ///< \internal Force type int32_t.
+} ovrHmdCaps;
+
+
+/// Tracking capability bits reported by the device.
+/// Used with ovr_GetTrackingCaps.
+typedef enum ovrTrackingCaps_
+{
+    ovrTrackingCap_Orientation      = 0x0010,    ///< Supports orientation tracking (IMU).
+    ovrTrackingCap_MagYawCorrection = 0x0020,    ///< Supports yaw drift correction via a magnetometer or other means.
+    ovrTrackingCap_Position         = 0x0040,    ///< Supports positional tracking.
+    ovrTrackingCap_EnumSize         = 0x7fffffff ///< \internal Force type int32_t.
+} ovrTrackingCaps;
+
+
+/// Specifies which eye is being used for rendering.
+/// This type explicitly does not include a third "NoStereo" monoscopic option, as such is
+/// not required for an HMD-centered API.
+typedef enum ovrEyeType_
+{
+    ovrEye_Left     = 0,         ///< The left eye, from the viewer's perspective.
+    ovrEye_Right    = 1,         ///< The right eye, from the viewer's perspective.
+    ovrEye_Count    = 2,         ///< \internal Count of enumerated elements.
+    ovrEye_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrEyeType;
+
+/// Specifies the coordinate system ovrTrackingState returns tracking poses in.
+/// Used with ovr_SetTrackingOriginType()
+typedef enum ovrTrackingOrigin_
+{
+    /// \brief Tracking system origin reported at eye (HMD) height
+    /// \details Prefer using this origin when your application requires
+    /// matching user's current physical head pose to a virtual head pose
+    /// without any regards to a the height of the floor. Cockpit-based,
+    /// or 3rd-person experiences are ideal candidates.
+    /// When used, all poses in ovrTrackingState are reported as an offset
+    /// transform from the profile calibrated or recentered HMD pose.
+    /// It is recommended that apps using this origin type call ovr_RecenterTrackingOrigin
+    /// prior to starting the VR experience, but notify the user before doing so
+    /// to make sure the user is in a comfortable pose, facing a comfortable
+    /// direction.
+    ovrTrackingOrigin_EyeLevel = 0,
+    /// \brief Tracking system origin reported at floor height
+    /// \details Prefer using this origin when your application requires the
+    /// physical floor height to match the virtual floor height, such as
+    /// standing experiences.
+    /// When used, all poses in ovrTrackingState are reported as an offset
+    /// transform from the profile calibrated floor pose. Calling ovr_RecenterTrackingOrigin
+    /// will recenter the X & Z axes as well as yaw, but the Y-axis (i.e. height) will continue
+    /// to be reported using the floor height as the origin for all poses.
+    ovrTrackingOrigin_FloorLevel = 1,
+    ovrTrackingOrigin_Count = 2,            ///< \internal Count of enumerated elements.
+    ovrTrackingOrigin_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrTrackingOrigin;
+
+/// Identifies a graphics device in a platform-specific way.
+/// For Windows this is a LUID type.
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrGraphicsLuid_
+{
+    // Public definition reserves space for graphics API-specific implementation
+    char        Reserved[8];
+} ovrGraphicsLuid;
+
+
+/// This is a complete descriptor of the HMD.
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrHmdDesc_
+{
+    ovrHmdType   Type;                         ///< The type of HMD.
+    OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4))   ///< \internal struct paddding.
+    char         ProductName[64];              ///< UTF8-encoded product identification string (e.g. "Oculus Rift DK1").
+    char         Manufacturer[64];             ///< UTF8-encoded HMD manufacturer identification string.
+    short        VendorId;                     ///< HID (USB) vendor identifier of the device.
+    short        ProductId;                    ///< HID (USB) product identifier of the device.
+    char         SerialNumber[24];             ///< HMD serial number.
+    short        FirmwareMajor;                ///< HMD firmware major version.
+    short        FirmwareMinor;                ///< HMD firmware minor version.
+    unsigned int AvailableHmdCaps;             ///< Capability bits described by ovrHmdCaps which the HMD currently supports.
+    unsigned int DefaultHmdCaps;               ///< Capability bits described by ovrHmdCaps which are default for the current Hmd.
+    unsigned int AvailableTrackingCaps;        ///< Capability bits described by ovrTrackingCaps which the system currently supports.
+    unsigned int DefaultTrackingCaps;          ///< Capability bits described by ovrTrackingCaps which are default for the current system.
+    ovrFovPort   DefaultEyeFov[ovrEye_Count];  ///< Defines the recommended FOVs for the HMD.
+    ovrFovPort   MaxEyeFov[ovrEye_Count];      ///< Defines the maximum FOVs for the HMD.
+    ovrSizei     Resolution;                   ///< Resolution of the full HMD screen (both eyes) in pixels.
+    float        DisplayRefreshRate;           ///< Nominal refresh rate of the display in cycles per second at the time of HMD creation.
+    OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad1, 4))   ///< \internal struct paddding.
+} ovrHmdDesc;
+
+
+/// Used as an opaque pointer to an OVR session.
+typedef struct ovrHmdStruct* ovrSession;
+
+
+
+/// Bit flags describing the current status of sensor tracking.
+///  The values must be the same as in enum StatusBits
+///
+/// \see ovrTrackingState
+///
+typedef enum ovrStatusBits_
+{
+    ovrStatus_OrientationTracked    = 0x0001,    ///< Orientation is currently tracked (connected and in use).
+    ovrStatus_PositionTracked       = 0x0002,    ///< Position is currently tracked (false if out of range).
+    ovrStatus_EnumSize              = 0x7fffffff ///< \internal Force type int32_t.
+} ovrStatusBits;
+
+
+///  Specifies the description of a single sensor.
+///
+/// \see ovrGetTrackerDesc
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTrackerDesc_
+{
+    float FrustumHFovInRadians;      ///< Sensor frustum horizontal field-of-view (if present).
+    float FrustumVFovInRadians;      ///< Sensor frustum vertical field-of-view (if present).
+    float FrustumNearZInMeters;      ///< Sensor frustum near Z (if present).
+    float FrustumFarZInMeters;       ///< Sensor frustum far Z (if present).
+} ovrTrackerDesc;
+
+
+///  Specifies sensor flags.
+///
+///  /see ovrTrackerPose
+///
+typedef enum ovrTrackerFlags_
+{
+    ovrTracker_Connected   = 0x0020,      ///< The sensor is present, else the sensor is absent or offline.
+    ovrTracker_PoseTracked = 0x0004       ///< The sensor has a valid pose, else the pose is unavailable. This will only be set if ovrTracker_Connected is set.
+} ovrTrackerFlags;
+
+
+///  Specifies the pose for a single sensor.
+///
+typedef struct OVR_ALIGNAS(8) _ovrTrackerPose
+{
+    unsigned int TrackerFlags;      ///< ovrTrackerFlags.
+    ovrPosef     Pose;              ///< The sensor's pose. This pose includes sensor tilt (roll and pitch). For a leveled coordinate system use LeveledPose.
+    ovrPosef     LeveledPose;       ///< The sensor's leveled pose, aligned with gravity. This value includes position and yaw of the sensor, but not roll and pitch. It can be used as a reference point to render real-world objects in the correct location.
+    OVR_UNUSED_STRUCT_PAD(pad0, 4)  ///< \internal struct pad.
+} ovrTrackerPose;
+
+
+/// Tracking state at a given absolute time (describes predicted HMD pose, etc.).
+/// Returned by ovr_GetTrackingState.
+///
+/// \see ovr_GetTrackingState
+///
+typedef struct OVR_ALIGNAS(8) ovrTrackingState_
+{
+    /// Predicted head pose (and derivatives) at the requested absolute time.
+    ovrPoseStatef  HeadPose;
+
+    /// HeadPose tracking status described by ovrStatusBits.
+    unsigned int   StatusFlags;
+
+    /// The most recent calculated pose for each hand when hand controller tracking is present.
+    /// HandPoses[ovrHand_Left] refers to the left hand and HandPoses[ovrHand_Right] to the right hand.
+    /// These values can be combined with ovrInputState for complete hand controller information.
+    ovrPoseStatef  HandPoses[2];
+
+    /// HandPoses status flags described by ovrStatusBits.
+    /// Only ovrStatus_OrientationTracked and ovrStatus_PositionTracked are reported.
+    unsigned int   HandStatusFlags[2];
+
+    /// The pose of the origin captured during calibration.
+    /// Like all other poses here, this is expressed in the space set by ovr_RecenterTrackingOrigin,
+    /// and so will change every time that is called. This pose can be used to calculate
+    /// where the calibrated origin lands in the new recentered space.
+    /// If an application never calls ovr_RecenterTrackingOrigin, expect this value to be the identity
+    /// pose and as such will point respective origin based on ovrTrackingOrigin requested when
+    /// calling ovr_GetTrackingState.
+    ovrPosef      CalibratedOrigin;
+
+} ovrTrackingState;
+
+
+/// Rendering information for each eye. Computed by ovr_GetRenderDesc() based on the
+/// specified FOV. Note that the rendering viewport is not included
+/// here as it can be specified separately and modified per frame by
+/// passing different Viewport values in the layer structure.
+///
+/// \see ovr_GetRenderDesc
+///
+typedef struct OVR_ALIGNAS(4) ovrEyeRenderDesc_
+{
+    ovrEyeType  Eye;                        ///< The eye index to which this instance corresponds.
+    ovrFovPort  Fov;                        ///< The field of view.
+    ovrRecti    DistortedViewport;          ///< Distortion viewport.
+    ovrVector2f PixelsPerTanAngleAtCenter;  ///< How many display pixels will fit in tan(angle) = 1.
+    ovrVector3f HmdToEyeOffset;             ///< Translation of each eye, in meters.
+} ovrEyeRenderDesc;
+
+
+/// Projection information for ovrLayerEyeFovDepth.
+///
+/// Use the utility function ovrTimewarpProjectionDesc_FromProjection to
+/// generate this structure from the application's projection matrix.
+///
+/// \see ovrLayerEyeFovDepth, ovrTimewarpProjectionDesc_FromProjection
+///
+typedef struct OVR_ALIGNAS(4) ovrTimewarpProjectionDesc_
+{
+    float Projection22;     ///< Projection matrix element [2][2].
+    float Projection23;     ///< Projection matrix element [2][3].
+    float Projection32;     ///< Projection matrix element [3][2].
+} ovrTimewarpProjectionDesc;
+
+
+/// Contains the data necessary to properly calculate position info for various layer types.
+/// - HmdToEyeOffset is the same value pair provided in ovrEyeRenderDesc.
+/// - HmdSpaceToWorldScaleInMeters is used to scale player motion into in-application units.
+///   In other words, it is how big an in-application unit is in the player's physical meters.
+///   For example, if the application uses inches as its units then HmdSpaceToWorldScaleInMeters would be 0.0254.
+///   Note that if you are scaling the player in size, this must also scale. So if your application
+///   units are inches, but you're shrinking the player to half their normal size, then
+///   HmdSpaceToWorldScaleInMeters would be 0.0254*2.0.
+///
+/// \see ovrEyeRenderDesc, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(4) ovrViewScaleDesc_
+{
+    ovrVector3f HmdToEyeOffset[ovrEye_Count];   ///< Translation of each eye.
+    float       HmdSpaceToWorldScaleInMeters;   ///< Ratio of viewer units to meter units.
+} ovrViewScaleDesc;
+
+
+//-----------------------------------------------------------------------------------
+// ***** Platform-independent Rendering Configuration
+
+/// The type of texture resource.
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureType_
+{
+    ovrTexture_2D,              ///< 2D textures.
+    ovrTexture_2D_External,     ///< External 2D texture. Not used on PC
+    ovrTexture_Cube,            ///< Cube maps. Not currently supported on PC.
+    ovrTexture_Count,
+    ovrTexture_EnumSize = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureType;
+
+/// The bindings required for texture swap chain.
+///
+/// All texture swap chains are automatically bindable as shader
+/// input resources since the Oculus runtime needs this to read them.
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureBindFlags_
+{
+    ovrTextureBind_None,
+    ovrTextureBind_DX_RenderTarget = 0x0001,    ///< The application can write into the chain with pixel shader
+    ovrTextureBind_DX_UnorderedAccess = 0x0002, ///< The application can write to the chain with compute shader
+    ovrTextureBind_DX_DepthStencil = 0x0004,    ///< The chain buffers can be bound as depth and/or stencil buffers
+
+    ovrTextureBind_EnumSize = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureBindFlags;
+
+/// The format of a texture.
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureFormat_
+{
+    OVR_FORMAT_UNKNOWN,
+    OVR_FORMAT_B5G6R5_UNORM,    ///< Not currently supported on PC. Would require a DirectX 11.1 device.
+    OVR_FORMAT_B5G5R5A1_UNORM,  ///< Not currently supported on PC. Would require a DirectX 11.1 device.
+    OVR_FORMAT_B4G4R4A4_UNORM,  ///< Not currently supported on PC. Would require a DirectX 11.1 device.
+    OVR_FORMAT_R8G8B8A8_UNORM,
+    OVR_FORMAT_R8G8B8A8_UNORM_SRGB,
+    OVR_FORMAT_B8G8R8A8_UNORM,
+    OVR_FORMAT_B8G8R8A8_UNORM_SRGB, ///< Not supported for OpenGL applications
+    OVR_FORMAT_B8G8R8X8_UNORM,      ///< Not supported for OpenGL applications
+    OVR_FORMAT_B8G8R8X8_UNORM_SRGB, ///< Not supported for OpenGL applications
+    OVR_FORMAT_R16G16B16A16_FLOAT,
+    OVR_FORMAT_D16_UNORM,
+    OVR_FORMAT_D24_UNORM_S8_UINT,
+    OVR_FORMAT_D32_FLOAT,
+    OVR_FORMAT_D32_FLOAT_S8X24_UINT,
+
+    OVR_FORMAT_ENUMSIZE = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureFormat;
+
+/// Misc flags overriding particular
+///   behaviors of a texture swap chain
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureMiscFlags_
+{
+    ovrTextureMisc_None, 
+
+    /// DX only: The underlying texture is created with a TYPELESS equivalent of the
+    /// format specified in the texture desc. The SDK will still access the
+    /// texture using the format specified in the texture desc, but the app can
+    /// create views with different formats if this is specified.
+    ovrTextureMisc_DX_Typeless = 0x0001,
+
+    /// DX only: Allow generation of the mip chain on the GPU via the GenerateMips
+    /// call. This flag requires that RenderTarget binding also be specified.
+    ovrTextureMisc_AllowGenerateMips = 0x0002,
+
+    /// Texture swap chain contains protected content, and requires
+    /// HDCP connection in order to display to HMD. Also prevents
+    /// mirroring or other redirection of any frame containing this contents
+    ovrTextureMisc_ProtectedContent = 0x0004,
+
+    ovrTextureMisc_EnumSize = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureFlags;
+
+/// Description used to create a texture swap chain.
+///
+/// \see ovr_CreateTextureSwapChainDX
+/// \see ovr_CreateTextureSwapChainGL
+///
+typedef struct ovrTextureSwapChainDesc_
+{
+    ovrTextureType      Type;
+    ovrTextureFormat    Format;
+    int                 ArraySize;      ///< Only supported with ovrTexture_2D. Not supported on PC at this time.
+    int                 Width;
+    int                 Height;
+    int                 MipLevels;
+    int                 SampleCount;    ///< Current only supported on depth textures
+    ovrBool             StaticImage;    ///< Not buffered in a chain. For images that don't change
+    unsigned int        MiscFlags;      ///< ovrTextureFlags
+    unsigned int        BindFlags;      ///< ovrTextureBindFlags. Not used for GL.
+} ovrTextureSwapChainDesc;
+
+/// Description used to create a mirror texture.
+///
+/// \see ovr_CreateMirrorTextureDX
+/// \see ovr_CreateMirrorTextureGL
+///
+typedef struct ovrMirrorTextureDesc_
+{
+    ovrTextureFormat    Format;
+    int                 Width;
+    int                 Height;
+    unsigned int        MiscFlags;      ///< ovrTextureFlags
+} ovrMirrorTextureDesc;
+
+typedef struct ovrTextureSwapChainData* ovrTextureSwapChain;
+typedef struct ovrMirrorTextureData* ovrMirrorTexture;
+
+//-----------------------------------------------------------------------------------
+
+/// Describes button input types.
+/// Button inputs are combined; that is they will be reported as pressed if they are 
+/// pressed on either one of the two devices.
+/// The ovrButton_Up/Down/Left/Right map to both XBox D-Pad and directional buttons.
+/// The ovrButton_Enter and ovrButton_Return map to Start and Back controller buttons, respectively.
+typedef enum ovrButton_
+{    
+    ovrButton_A         = 0x00000001,
+    ovrButton_B         = 0x00000002,
+    ovrButton_RThumb    = 0x00000004,
+    ovrButton_RShoulder = 0x00000008,
+
+    // Bit mask of all buttons on the right Touch controller
+    ovrButton_RMask     = ovrButton_A | ovrButton_B | ovrButton_RThumb | ovrButton_RShoulder,
+
+    ovrButton_X         = 0x00000100,
+    ovrButton_Y         = 0x00000200,
+    ovrButton_LThumb    = 0x00000400,  
+    ovrButton_LShoulder = 0x00000800,
+
+    // Bit mask of all buttons on the left Touch controller
+    ovrButton_LMask     = ovrButton_X | ovrButton_Y | ovrButton_LThumb | ovrButton_LShoulder,
+
+    // Navigation through DPad.
+    ovrButton_Up        = 0x00010000,
+    ovrButton_Down      = 0x00020000,
+    ovrButton_Left      = 0x00040000,
+    ovrButton_Right     = 0x00080000,
+    ovrButton_Enter     = 0x00100000, // Start on XBox controller.
+    ovrButton_Back      = 0x00200000, // Back on Xbox controller.
+    ovrButton_VolUp     = 0x00400000,  // only supported by Remote.
+    ovrButton_VolDown   = 0x00800000,  // only supported by Remote.
+    ovrButton_Home      = 0x01000000,  
+    ovrButton_Private   = ovrButton_VolUp | ovrButton_VolDown | ovrButton_Home,
+
+
+    ovrButton_EnumSize  = 0x7fffffff ///< \internal Force type int32_t.
+} ovrButton;
+
+/// Describes touch input types.
+/// These values map to capacitive touch values reported ovrInputState::Touch.
+/// Some of these values are mapped to button bits for consistency.
+typedef enum ovrTouch_
+{
+    ovrTouch_A              = ovrButton_A,
+    ovrTouch_B              = ovrButton_B,
+    ovrTouch_RThumb         = ovrButton_RThumb,
+    ovrTouch_RIndexTrigger  = 0x00000010,
+
+    // Bit mask of all the button touches on the right controller
+    ovrTouch_RButtonMask    = ovrTouch_A | ovrTouch_B | ovrTouch_RThumb | ovrTouch_RIndexTrigger,
+
+    ovrTouch_X              = ovrButton_X,
+    ovrTouch_Y              = ovrButton_Y,
+    ovrTouch_LThumb         = ovrButton_LThumb,
+    ovrTouch_LIndexTrigger  = 0x00001000,
+
+    // Bit mask of all the button touches on the left controller
+    ovrTouch_LButtonMask    = ovrTouch_X | ovrTouch_Y | ovrTouch_LThumb | ovrTouch_LIndexTrigger,
+
+    // Finger pose state 
+    // Derived internally based on distance, proximity to sensors and filtering.
+    ovrTouch_RIndexPointing = 0x00000020,
+    ovrTouch_RThumbUp       = 0x00000040,
+
+    // Bit mask of all right controller poses
+    ovrTouch_RPoseMask      = ovrTouch_RIndexPointing | ovrTouch_RThumbUp,
+
+    ovrTouch_LIndexPointing = 0x00002000,
+    ovrTouch_LThumbUp       = 0x00004000,
+
+    // Bit mask of all left controller poses
+    ovrTouch_LPoseMask      = ovrTouch_LIndexPointing | ovrTouch_LThumbUp,
+
+    ovrTouch_EnumSize       = 0x7fffffff ///< \internal Force type int32_t.
+} ovrTouch;
+
+/// Specifies which controller is connected; multiple can be connected at once.
+typedef enum ovrControllerType_
+{
+    ovrControllerType_None      = 0x00,
+    ovrControllerType_LTouch    = 0x01,
+    ovrControllerType_RTouch    = 0x02,
+    ovrControllerType_Touch     = 0x03,
+    ovrControllerType_Remote    = 0x04,
+    ovrControllerType_XBox      = 0x10,
+
+    ovrControllerType_Active    = 0xff,      ///< Operate on or query whichever controller is active.
+
+    ovrControllerType_EnumSize  = 0x7fffffff ///< \internal Force type int32_t.
+} ovrControllerType;
+
+
+/// Provides names for the left and right hand array indexes.
+///
+/// \see ovrInputState, ovrTrackingState
+/// 
+typedef enum ovrHandType_
+{
+    ovrHand_Left  = 0,
+    ovrHand_Right = 1,
+    ovrHand_Count = 2,
+    ovrHand_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrHandType;
+
+
+
+/// ovrInputState describes the complete controller input state, including Oculus Touch,
+/// and XBox gamepad. If multiple inputs are connected and used at the same time,
+/// their inputs are combined.
+typedef struct ovrInputState_
+{
+    // System type when the controller state was last updated.
+    double              TimeInSeconds;
+
+    // Values for buttons described by ovrButton.
+    unsigned int        Buttons;
+
+    // Touch values for buttons and sensors as described by ovrTouch.
+    unsigned int        Touches;
+    
+    // Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.
+    float               IndexTrigger[ovrHand_Count];
+    
+    // Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.
+    float               HandTrigger[ovrHand_Count];
+
+    // Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range -1.0f to 1.0f.
+    ovrVector2f         Thumbstick[ovrHand_Count];
+
+    // The type of the controller this state is for.
+    ovrControllerType   ControllerType;
+    
+} ovrInputState;
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Initialize structures
+
+/// Initialization flags.
+///
+/// \see ovrInitParams, ovr_Initialize
+///
+typedef enum ovrInitFlags_
+{
+    /// When a debug library is requested, a slower debugging version of the library will
+    /// run which can be used to help solve problems in the library and debug application code.
+    ovrInit_Debug          = 0x00000001,
+
+    /// When a version is requested, the LibOVR runtime respects the RequestedMinorVersion
+    /// field and verifies that the RequestedMinorVersion is supported.
+    ovrInit_RequestVersion = 0x00000004,
+
+    // These bits are writable by user code.
+    ovrinit_WritableBits   = 0x00ffffff,
+
+    ovrInit_EnumSize       = 0x7fffffff ///< \internal Force type int32_t.
+} ovrInitFlags;
+
+
+/// Logging levels
+///
+/// \see ovrInitParams, ovrLogCallback
+///
+typedef enum ovrLogLevel_
+{
+    ovrLogLevel_Debug    = 0, ///< Debug-level log event.
+    ovrLogLevel_Info     = 1, ///< Info-level log event.
+    ovrLogLevel_Error    = 2, ///< Error-level log event.
+
+    ovrLogLevel_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrLogLevel;
+
+
+/// Signature of the logging callback function pointer type.
+///
+/// \param[in] userData is an arbitrary value specified by the user of ovrInitParams.
+/// \param[in] level is one of the ovrLogLevel constants.
+/// \param[in] message is a UTF8-encoded null-terminated string.
+/// \see ovrInitParams, ovrLogLevel, ovr_Initialize
+///
+typedef void (OVR_CDECL* ovrLogCallback)(uintptr_t userData, int level, const char* message);
+
+
+/// Parameters for ovr_Initialize.
+///
+/// \see ovr_Initialize
+///
+typedef struct OVR_ALIGNAS(8) ovrInitParams_
+{
+    /// Flags from ovrInitFlags to override default behavior.
+    /// Use 0 for the defaults.
+    uint32_t       Flags;
+
+    /// Requests a specific minimum minor version of the LibOVR runtime.
+    /// Flags must include ovrInit_RequestVersion or this will be ignored
+    /// and OVR_MINOR_VERSION will be used.
+    uint32_t       RequestedMinorVersion;
+
+    /// User-supplied log callback function, which may be called at any time
+    /// asynchronously from multiple threads until ovr_Shutdown completes.
+    /// Use NULL to specify no log callback.
+    ovrLogCallback LogCallback;
+
+    /// User-supplied data which is passed as-is to LogCallback. Typically this 
+    /// is used to store an application-specific pointer which is read in the 
+    /// callback function.
+    uintptr_t      UserData;
+
+    /// Relative number of milliseconds to wait for a connection to the server
+    /// before failing. Use 0 for the default timeout.
+    uint32_t       ConnectionTimeoutMS;
+
+    OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4)) ///< \internal
+
+} ovrInitParams;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+// -----------------------------------------------------------------------------------
+// ***** API Interfaces
+
+// Overview of the API
+//
+// Setup:
+//  - ovr_Initialize().
+//  - ovr_Create(&hmd, &graphicsId).
+//  - Use hmd members and ovr_GetFovTextureSize() to determine graphics configuration
+//    and ovr_GetRenderDesc() to get per-eye rendering parameters.
+//  - Allocate texture swap chains with ovr_CreateTextureSwapChainDX() or
+//    ovr_CreateTextureSwapChainGL(). Create any associated render target views or
+//    frame buffer objects.
+//
+// Application Loop:
+//  - Call ovr_GetPredictedDisplayTime() to get the current frame timing information.
+//  - Call ovr_GetTrackingState() and ovr_CalcEyePoses() to obtain the predicted
+//    rendering pose for each eye based on timing.
+//  - Render the scene content into the current buffer of the texture swapchains
+//    for each eye and layer you plan to update this frame. If you render into a
+//    texture swap chain, you must call ovr_CommitTextureSwapChain() on it to commit
+//    the changes before you reference the chain this frame (otherwise, your latest
+//    changes won't be picked up).
+//  - Call ovr_SubmitFrame() to render the distorted layers to and present them on the HMD.
+//    If ovr_SubmitFrame returns ovrSuccess_NotVisible, there is no need to render the scene
+//    for the next loop iteration. Instead, just call ovr_SubmitFrame again until it returns
+//    ovrSuccess. 
+//
+// Shutdown:
+//  - ovr_Destroy().
+//  - ovr_Shutdown().
+
+
+/// Initializes LibOVR
+///
+/// Initialize LibOVR for application usage. This includes finding and loading the LibOVRRT
+/// shared library. No LibOVR API functions, other than ovr_GetLastErrorInfo and ovr_Detect, can
+/// be called unless ovr_Initialize succeeds. A successful call to ovr_Initialize must be eventually
+/// followed by a call to ovr_Shutdown. ovr_Initialize calls are idempotent.
+/// Calling ovr_Initialize twice does not require two matching calls to ovr_Shutdown.
+/// If already initialized, the return value is ovr_Success.
+/// 
+/// LibOVRRT shared library search order:
+///      -# Current working directory (often the same as the application directory).
+///      -# Module directory (usually the same as the application directory,
+///         but not if the module is a separate shared library).
+///      -# Application directory
+///      -# Development directory (only if OVR_ENABLE_DEVELOPER_SEARCH is enabled,
+///         which is off by default).
+///      -# Standard OS shared library search location(s) (OS-specific).
+///
+/// \param params Specifies custom initialization options. May be NULL to indicate default options.
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information. Example failed results include:
+///     - ovrError_Initialize: Generic initialization error.
+///     - ovrError_LibLoad: Couldn't load LibOVRRT.
+///     - ovrError_LibVersion: LibOVRRT version incompatibility.
+///     - ovrError_ServiceConnection: Couldn't connect to the OVR Service.
+///     - ovrError_ServiceVersion: OVR Service version incompatibility.
+///     - ovrError_IncompatibleOS: The operating system version is incompatible.
+///     - ovrError_DisplayInit: Unable to initialize the HMD display.
+///     - ovrError_ServerStart:  Unable to start the server. Is it already running?
+///     - ovrError_Reinitialization: Attempted to re-initialize with a different version.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrResult result = ovr_Initialize(NULL);
+///         if(OVR_FAILURE(result)) {
+///             ovrErrorInfo errorInfo;
+///             ovr_GetLastErrorInfo(&errorInfo);
+///             DebugLog("ovr_Initialize failed: %s", errorInfo.ErrorString);
+///             return false;
+///         }
+///         [...]
+///     \endcode
+///
+/// \see ovr_Shutdown
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Initialize(const ovrInitParams* params);
+
+
+/// Shuts down LibOVR
+///
+/// A successful call to ovr_Initialize must be eventually matched by a call to ovr_Shutdown.
+/// After calling ovr_Shutdown, no LibOVR functions can be called except ovr_GetLastErrorInfo
+/// or another ovr_Initialize. ovr_Shutdown invalidates all pointers, references, and created objects
+/// previously returned by LibOVR functions. The LibOVRRT shared library can be unloaded by
+/// ovr_Shutdown.
+///
+/// \see ovr_Initialize
+///
+OVR_PUBLIC_FUNCTION(void) ovr_Shutdown();
+
+/// Returns information about the most recent failed return value by the
+/// current thread for this library.
+///
+/// This function itself can never generate an error.
+/// The last error is never cleared by LibOVR, but will be overwritten by new errors.
+/// Do not use this call to determine if there was an error in the last API
+/// call as successful API calls don't clear the last ovrErrorInfo.
+/// To avoid any inconsistency, ovr_GetLastErrorInfo should be called immediately
+/// after an API function that returned a failed ovrResult, with no other API
+/// functions called in the interim.
+///
+/// \param[out] errorInfo The last ovrErrorInfo for the current thread.
+///
+/// \see ovrErrorInfo
+///
+OVR_PUBLIC_FUNCTION(void) ovr_GetLastErrorInfo(ovrErrorInfo* errorInfo);
+
+
+/// Returns the version string representing the LibOVRRT version.
+///
+/// The returned string pointer is valid until the next call to ovr_Shutdown.
+///
+/// Note that the returned version string doesn't necessarily match the current
+/// OVR_MAJOR_VERSION, etc., as the returned string refers to the LibOVRRT shared
+/// library version and not the locally compiled interface version.
+///
+/// The format of this string is subject to change in future versions and its contents
+/// should not be interpreted.
+///
+/// \return Returns a UTF8-encoded null-terminated version string.
+///
+OVR_PUBLIC_FUNCTION(const char*) ovr_GetVersionString();
+
+
+/// Writes a message string to the LibOVR tracing mechanism (if enabled).
+///
+/// This message will be passed back to the application via the ovrLogCallback if
+/// it was registered.
+///
+/// \param[in] level One of the ovrLogLevel constants.
+/// \param[in] message A UTF8-encoded null-terminated string.
+/// \return returns the strlen of the message or a negative value if the message is too large.
+///
+/// \see ovrLogLevel, ovrLogCallback
+///
+OVR_PUBLIC_FUNCTION(int) ovr_TraceMessage(int level, const char* message);
+
+
+//-------------------------------------------------------------------------------------
+/// @name HMD Management
+///
+/// Handles the enumeration, creation, destruction, and properties of an HMD (head-mounted display).
+///@{
+
+
+/// Returns information about the current HMD.
+///
+/// ovr_Initialize must have first been called in order for this to succeed, otherwise ovrHmdDesc::Type
+/// will be reported as ovrHmd_None.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create, else NULL in which
+///                case this function detects whether an HMD is present and returns its info if so.
+///
+/// \return Returns an ovrHmdDesc. If the hmd is NULL and ovrHmdDesc::Type is ovrHmd_None then 
+///         no HMD is present.
+///
+OVR_PUBLIC_FUNCTION(ovrHmdDesc) ovr_GetHmdDesc(ovrSession session);
+
+
+/// Returns the number of sensors. 
+///
+/// The number of sensors may change at any time, so this function should be called before use 
+/// as opposed to once on startup.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \return Returns unsigned int count.
+///
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetTrackerCount(ovrSession session);
+
+
+/// Returns a given sensor description.
+///
+/// It's possible that sensor desc [0] may indicate a unconnnected or non-pose tracked sensor, but 
+/// sensor desc [1] may be connected.
+///
+/// ovr_Initialize must have first been called in order for this to succeed, otherwise the returned
+/// trackerDescArray will be zero-initialized. The data returned by this function can change at runtime.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// 
+/// \param[in] trackerDescIndex Specifies a sensor index. The valid indexes are in the range of 0 to 
+///            the sensor count returned by ovr_GetTrackerCount.
+///
+/// \return Returns ovrTrackerDesc. An empty ovrTrackerDesc will be returned if trackerDescIndex is out of range.
+///
+/// \see ovrTrackerDesc, ovr_GetTrackerCount
+///
+OVR_PUBLIC_FUNCTION(ovrTrackerDesc) ovr_GetTrackerDesc(ovrSession session, unsigned int trackerDescIndex);
+
+
+/// Creates a handle to a VR session.
+///
+/// Upon success the returned ovrSession must be eventually freed with ovr_Destroy when it is no longer needed.
+/// A second call to ovr_Create will result in an error return value if the previous Hmd has not been destroyed.
+///
+/// \param[out] pSession Provides a pointer to an ovrSession which will be written to upon success.
+/// \param[out] luid Provides a system specific graphics adapter identifier that locates which
+/// graphics adapter has the HMD attached. This must match the adapter used by the application
+/// or no rendering output will be possible. This is important for stability on multi-adapter systems. An
+/// application that simply chooses the default adapter will not run reliably on multi-adapter systems.
+/// \return Returns an ovrResult indicating success or failure. Upon failure
+///         the returned pHmd will be NULL.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrSession session;
+///         ovrGraphicsLuid luid;
+///         ovrResult result = ovr_Create(&session, &luid);
+///         if(OVR_FAILURE(result))
+///            ...
+///     \endcode
+///
+/// \see ovr_Destroy
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Create(ovrSession* pSession, ovrGraphicsLuid* pLuid);
+
+
+/// Destroys the HMD.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \see ovr_Create
+///
+OVR_PUBLIC_FUNCTION(void) ovr_Destroy(ovrSession session);
+
+
+/// Specifies status information for the current session.
+///
+/// \see ovr_GetSessionStatus
+///
+typedef struct ovrSessionStatus_
+{
+    ovrBool IsVisible;    ///< True if the process has VR focus and thus is visible in the HMD.
+    ovrBool HmdPresent;   ///< True if an HMD is present.
+    ovrBool HmdMounted;   ///< True if the HMD is on the user's head.
+    ovrBool DisplayLost;  ///< True if the session is in a display-lost state. See ovr_SubmitFrame.
+    ovrBool ShouldQuit;   ///< True if the application should initiate shutdown.    
+    ovrBool ShouldRecenter;  ///< True if UX has requested re-centering. Must call ovr_ClearShouldRecenterFlag or ovr_RecenterTrackingOrigin.
+}ovrSessionStatus;
+
+
+/// Returns status information for the application.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[out] sessionStatus Provides an ovrSessionStatus that is filled in.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of
+///         failure, use ovr_GetLastErrorInfo to get more information.
+//          Return values include but aren't limited to:
+///     - ovrSuccess: Completed successfully.
+///     - ovrError_ServiceConnection: The service connection was lost and the application
+//        must destroy the session.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetSessionStatus(ovrSession session, ovrSessionStatus* sessionStatus);
+
+
+//@}
+
+
+
+//-------------------------------------------------------------------------------------
+/// @name Tracking
+///
+/// Tracking functions handle the position, orientation, and movement of the HMD in space.
+///
+/// All tracking interface functions are thread-safe, allowing tracking state to be sampled
+/// from different threads.
+///
+///@{
+
+
+
+/// Sets the tracking origin type
+///
+/// When the tracking origin is changed, all of the calls that either provide
+/// or accept ovrPosef will use the new tracking origin provided.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] origin Specifies an ovrTrackingOrigin to be used for all ovrPosef
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \see ovrTrackingOrigin, ovr_GetTrackingOriginType
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetTrackingOriginType(ovrSession session, ovrTrackingOrigin origin);
+
+
+/// Gets the tracking origin state
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \return Returns the ovrTrackingOrigin that was either set by default, or previous set by the application.
+///
+/// \see ovrTrackingOrigin, ovr_SetTrackingOriginType
+OVR_PUBLIC_FUNCTION(ovrTrackingOrigin) ovr_GetTrackingOriginType(ovrSession session);
+
+
+/// Re-centers the sensor position and orientation.
+///
+/// This resets the (x,y,z) positional components and the yaw orientation component.
+/// The Roll and pitch orientation components are always determined by gravity and cannot
+/// be redefined. All future tracking will report values relative to this new reference position.
+/// If you are using ovrTrackerPoses then you will need to call ovr_GetTrackerPose after 
+/// this, because the sensor position(s) will change as a result of this.
+/// 
+/// The headset cannot be facing vertically upward or downward but rather must be roughly
+/// level otherwise this function will fail with ovrError_InvalidHeadsetOrientation.
+///
+/// For more info, see the notes on each ovrTrackingOrigin enumeration to understand how
+/// recenter will vary slightly in its behavior based on the current ovrTrackingOrigin setting.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information. Return values include but aren't limited to:
+///     - ovrSuccess: Completed successfully.
+///     - ovrError_InvalidHeadsetOrientation: The headset was facing an invalid direction when
+///       attempting recentering, such as facing vertically.
+///
+/// \see ovrTrackingOrigin, ovr_GetTrackerPose
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_RecenterTrackingOrigin(ovrSession session);
+
+
+/// Clears the ShouldRecenter status bit in ovrSessionStatus.
+///
+/// Clears the ShouldRecenter status bit in ovrSessionStatus, allowing further recenter 
+/// requests to be detected. Since this is automatically done by ovr_RecenterTrackingOrigin,
+/// this is only needs to be called when application is doing its own re-centering.
+OVR_PUBLIC_FUNCTION(void) ovr_ClearShouldRecenterFlag(ovrSession session);
+
+
+/// Returns tracking state reading based on the specified absolute system time.
+///
+/// Pass an absTime value of 0.0 to request the most recent sensor reading. In this case
+/// both PredictedPose and SamplePose will have the same value.
+///
+/// This may also be used for more refined timing of front buffer rendering logic, and so on.
+/// This may be called by multiple threads.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] absTime Specifies the absolute future time to predict the return
+///            ovrTrackingState value. Use 0 to request the most recent tracking state.
+/// \param[in] latencyMarker Specifies that this call is the point in time where
+///            the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer
+///            provides "SensorSampleTimestamp", that will override the value stored here.
+/// \return Returns the ovrTrackingState that is predicted for the given absTime.
+///
+/// \see ovrTrackingState, ovr_GetEyePoses, ovr_GetTimeInSeconds
+///
+OVR_PUBLIC_FUNCTION(ovrTrackingState) ovr_GetTrackingState(ovrSession session, double absTime, ovrBool latencyMarker);
+
+
+
+/// Returns the ovrTrackerPose for the given sensor.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] trackerPoseIndex Index of the sensor being requested.
+///
+/// \return Returns the requested ovrTrackerPose. An empty ovrTrackerPose will be returned if trackerPoseIndex is out of range.
+///
+/// \see ovr_GetTrackerCount
+///
+OVR_PUBLIC_FUNCTION(ovrTrackerPose) ovr_GetTrackerPose(ovrSession session, unsigned int trackerPoseIndex);
+
+
+
+/// Returns the most recent input state for controllers, without positional tracking info.
+///
+/// \param[out] inputState Input state that will be filled in.
+/// \param[in] ovrControllerType Specifies which controller the input will be returned for.
+/// \return Returns ovrSuccess if the new state was successfully obtained.
+///
+/// \see ovrControllerType
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetInputState(ovrSession session, ovrControllerType controllerType, ovrInputState* inputState);
+
+
+/// Returns controller types connected to the system OR'ed together.
+///
+/// \return A bitmask of ovrControllerTypes connected to the system.
+///
+/// \see ovrControllerType
+///
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetConnectedControllerTypes(ovrSession session);
+
+
+/// Turns on vibration of the given controller.
+///
+/// To disable vibration, call ovr_SetControllerVibration with an amplitude of 0.
+/// Vibration automatically stops after a nominal amount of time, so if you want vibration 
+/// to be continuous over multiple seconds then you need to call this function periodically.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] controllerType Specifies the controller to apply the vibration to.
+/// \param[in] frequency Specifies a vibration frequency in the range of 0.0 to 1.0. 
+///            Currently the only valid values are 0.0, 0.5, and 1.0 and other values will
+///            be clamped to one of these.
+/// \param[in] amplitude Specifies a vibration amplitude in the range of 0.0 to 1.0.
+///
+/// \return Returns ovrSuccess upon success.
+///
+/// \see ovrControllerType
+/// 
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetControllerVibration(ovrSession session, ovrControllerType controllerType,
+                                                            float frequency, float amplitude);
+
+///@}
+
+
+//-------------------------------------------------------------------------------------
+// @name Layers
+//
+///@{
+
+
+///  Specifies the maximum number of layers supported by ovr_SubmitFrame.
+///
+///  /see ovr_SubmitFrame
+///
+enum {
+    ovrMaxLayerCount = 16
+};
+
+/// Describes layer types that can be passed to ovr_SubmitFrame.
+/// Each layer type has an associated struct, such as ovrLayerEyeFov.
+///
+/// \see ovrLayerHeader
+///
+typedef enum ovrLayerType_
+{
+    ovrLayerType_Disabled    = 0,         ///< Layer is disabled.
+    ovrLayerType_EyeFov      = 1,         ///< Described by ovrLayerEyeFov.
+    ovrLayerType_Quad        = 3,         ///< Described by ovrLayerQuad. Previously called ovrLayerType_QuadInWorld.
+    /// enum 4 used to be ovrLayerType_QuadHeadLocked. Instead, use ovrLayerType_Quad with ovrLayerFlag_HeadLocked.
+    ovrLayerType_EyeMatrix   = 5,         ///< Described by ovrLayerEyeMatrix.
+    ovrLayerType_EnumSize    = 0x7fffffff ///< Force type int32_t.
+} ovrLayerType;
+
+
+/// Identifies flags used by ovrLayerHeader and which are passed to ovr_SubmitFrame.
+///
+/// \see ovrLayerHeader
+///
+typedef enum ovrLayerFlags_
+{
+    /// ovrLayerFlag_HighQuality enables 4x anisotropic sampling during the composition of the layer.
+    /// The benefits are mostly visible at the periphery for high-frequency & high-contrast visuals.
+    /// For best results consider combining this flag with an ovrTextureSwapChain that has mipmaps and
+    /// instead of using arbitrary sized textures, prefer texture sizes that are powers-of-two.
+    /// Actual rendered viewport and doesn't necessarily have to fill the whole texture.
+    ovrLayerFlag_HighQuality               = 0x01,
+
+    /// ovrLayerFlag_TextureOriginAtBottomLeft: the opposite is TopLeft.
+    /// Generally this is false for D3D, true for OpenGL.
+    ovrLayerFlag_TextureOriginAtBottomLeft = 0x02,
+
+    /// Mark this surface as "headlocked", which means it is specified
+    /// relative to the HMD and moves with it, rather than being specified
+    /// relative to sensor/torso space and remaining still while the head moves.
+    /// What used to be ovrLayerType_QuadHeadLocked is now ovrLayerType_Quad plus this flag.
+    /// However the flag can be applied to any layer type to achieve a similar effect.
+    ovrLayerFlag_HeadLocked                = 0x04
+
+} ovrLayerFlags;
+
+
+/// Defines properties shared by all ovrLayer structs, such as ovrLayerEyeFov.
+///
+/// ovrLayerHeader is used as a base member in these larger structs.
+/// This struct cannot be used by itself except for the case that Type is ovrLayerType_Disabled.
+///
+/// \see ovrLayerType, ovrLayerFlags
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerHeader_
+{
+    ovrLayerType    Type;   ///< Described by ovrLayerType.
+    unsigned        Flags;  ///< Described by ovrLayerFlags.
+} ovrLayerHeader;
+
+
+/// Describes a layer that specifies a monoscopic or stereoscopic view.
+/// This is the kind of layer that's typically used as layer 0 to ovr_SubmitFrame,
+/// as it is the kind of layer used to render a 3D stereoscopic view.
+///
+/// Three options exist with respect to mono/stereo texture usage:
+///    - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, respectively.
+///      Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.
+///    - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,
+///      and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].
+///    - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and
+///      Viewport[1] both refer to that rendering.
+///
+/// \see ovrTextureSwapChain, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFov_
+{
+    /// Header.Type must be ovrLayerType_EyeFov.
+    ovrLayerHeader      Header;
+
+    /// ovrTextureSwapChains for the left and right eye respectively.
+    /// The second one of which can be NULL for cases described above.
+    ovrTextureSwapChain  ColorTexture[ovrEye_Count];
+
+    /// Specifies the ColorTexture sub-rect UV coordinates.
+    /// Both Viewport[0] and Viewport[1] must be valid.
+    ovrRecti            Viewport[ovrEye_Count];
+
+    /// The viewport field of view.
+    ovrFovPort          Fov[ovrEye_Count];
+
+    /// Specifies the position and orientation of each eye view, with the position specified in meters.
+    /// RenderPose will typically be the value returned from ovr_CalcEyePoses,
+    /// but can be different in special cases if a different head pose is used for rendering.
+    ovrPosef            RenderPose[ovrEye_Count];
+
+    /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)
+    /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds
+    /// around the instant the application calls ovr_GetTrackingState
+    /// The main purpose for this is to accurately track app tracking latency.
+    double              SensorSampleTime;
+
+} ovrLayerEyeFov;
+
+
+
+
+/// Describes a layer that specifies a monoscopic or stereoscopic view.
+/// This uses a direct 3x4 matrix to map from view space to the UV coordinates.
+/// It is essentially the same thing as ovrLayerEyeFov but using a much
+/// lower level. This is mainly to provide compatibility with specific apps.
+/// Unless the application really requires this flexibility, it is usually better
+/// to use ovrLayerEyeFov.
+///
+/// Three options exist with respect to mono/stereo texture usage:
+///    - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, respectively.
+///      Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.
+///    - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,
+///      and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].
+///    - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and
+///      Viewport[1] both refer to that rendering.
+///
+/// \see ovrTextureSwapChain, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeMatrix_
+{
+    /// Header.Type must be ovrLayerType_EyeMatrix.
+    ovrLayerHeader      Header;
+
+    /// ovrTextureSwapChains for the left and right eye respectively.
+    /// The second one of which can be NULL for cases described above.
+    ovrTextureSwapChain  ColorTexture[ovrEye_Count];
+
+    /// Specifies the ColorTexture sub-rect UV coordinates.
+    /// Both Viewport[0] and Viewport[1] must be valid.
+    ovrRecti            Viewport[ovrEye_Count];
+
+    /// Specifies the position and orientation of each eye view, with the position specified in meters.
+    /// RenderPose will typically be the value returned from ovr_CalcEyePoses,
+    /// but can be different in special cases if a different head pose is used for rendering.
+    ovrPosef            RenderPose[ovrEye_Count];
+
+    /// Specifies the mapping from a view-space vector
+    /// to a UV coordinate on the textures given above.
+    /// P = (x,y,z,1)*Matrix
+    /// TexU  = P.x/P.z
+    /// TexV  = P.y/P.z
+    ovrMatrix4f         Matrix[ovrEye_Count];
+
+    /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)
+    /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds
+    /// around the instant the application calls ovr_GetTrackingState
+    /// The main purpose for this is to accurately track app tracking latency.
+    double              SensorSampleTime;
+
+} ovrLayerEyeMatrix;
+
+
+
+
+
+/// Describes a layer of Quad type, which is a single quad in world or viewer space.
+/// It is used for ovrLayerType_Quad. This type of layer represents a single
+/// object placed in the world and not a stereo view of the world itself.
+///
+/// A typical use of ovrLayerType_Quad is to draw a television screen in a room
+/// that for some reason is more convenient to draw as a layer than as part of the main
+/// view in layer 0. For example, it could implement a 3D popup GUI that is drawn at a
+/// higher resolution than layer 0 to improve fidelity of the GUI.
+///
+/// Quad layers are visible from both sides; they are not back-face culled.
+///
+/// \see ovrTextureSwapChain, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerQuad_
+{
+    /// Header.Type must be ovrLayerType_Quad.
+    ovrLayerHeader      Header;
+
+    /// Contains a single image, never with any stereo view.
+    ovrTextureSwapChain  ColorTexture;
+
+    /// Specifies the ColorTexture sub-rect UV coordinates.
+    ovrRecti            Viewport;
+
+    /// Specifies the orientation and position of the center point of a Quad layer type.
+    /// The supplied direction is the vector perpendicular to the quad.
+    /// The position is in real-world meters (not the application's virtual world,
+    /// the physical world the user is in) and is relative to the "zero" position
+    /// set by ovr_RecenterTrackingOrigin unless the ovrLayerFlag_HeadLocked flag is used.
+    ovrPosef            QuadPoseCenter;
+
+    /// Width and height (respectively) of the quad in meters.
+    ovrVector2f         QuadSize;
+
+} ovrLayerQuad;
+
+
+
+
+/// Union that combines ovrLayer types in a way that allows them
+/// to be used in a polymorphic way.
+typedef union ovrLayer_Union_
+{
+    ovrLayerHeader      Header;
+    ovrLayerEyeFov      EyeFov;
+    ovrLayerQuad        Quad;
+} ovrLayer_Union;
+
+
+//@}
+
+
+
+/// @name SDK Distortion Rendering
+///
+/// All of rendering functions including the configure and frame functions
+/// are not thread safe. It is OK to use ConfigureRendering on one thread and handle
+/// frames on another thread, but explicit synchronization must be done since
+/// functions that depend on configured state are not reentrant.
+///
+/// These functions support rendering of distortion by the SDK.
+///
+//@{
+
+/// TextureSwapChain creation is rendering API-specific.
+/// ovr_CreateTextureSwapChainDX and ovr_CreateTextureSwapChainGL can be found in the
+/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h
+
+/// Gets the number of buffers in an ovrTextureSwapChain.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain for which the length should be retrieved.
+/// \param[out] out_Length Returns the number of buffers in the specified chain.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainLength(ovrSession session, ovrTextureSwapChain chain, int* out_Length);
+
+/// Gets the current index in an ovrTextureSwapChain.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain for which the index should be retrieved.
+/// \param[out] out_Index Returns the current (free) index in specified chain.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainCurrentIndex(ovrSession session, ovrTextureSwapChain chain, int* out_Index);
+
+/// Gets the description of the buffers in an ovrTextureSwapChain
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain for which the description should be retrieved.
+/// \param[out] out_Desc Returns the description of the specified chain.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainDesc(ovrSession session, ovrTextureSwapChain chain, ovrTextureSwapChainDesc* out_Desc);
+
+/// Commits any pending changes to an ovrTextureSwapChain, and advances its current index
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain to commit.
+///
+/// \note When Commit is called, the texture at the current index is considered ready for use by the
+/// runtime, and further writes to it should be avoided. The swap chain's current index is advanced,
+/// providing there's room in the chain. The next time the SDK dereferences this texture swap chain,
+/// it will synchronize with the app's graphics context and pick up the submitted index, opening up
+/// room in the swap chain for further commits.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///         Failures include but aren't limited to:
+///     - ovrError_TextureSwapChainFull: ovr_CommitTextureSwapChain was called too many times on a texture swapchain without calling submit to use the chain.
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CommitTextureSwapChain(ovrSession session, ovrTextureSwapChain chain);
+
+/// Destroys an ovrTextureSwapChain and frees all the resources associated with it.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] chain Specifies the ovrTextureSwapChain to destroy. If it is NULL then this function has no effect.
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(void) ovr_DestroyTextureSwapChain(ovrSession session, ovrTextureSwapChain chain);
+
+
+/// MirrorTexture creation is rendering API-specific.
+/// ovr_CreateMirrorTextureDX and ovr_CreateMirrorTextureGL can be found in the
+/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h
+
+/// Destroys a mirror texture previously created by one of the mirror texture creation functions.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] mirrorTexture Specifies the ovrTexture to destroy. If it is NULL then this function has no effect.
+///
+/// \see ovr_CreateMirrorTextureDX, ovr_CreateMirrorTextureGL
+///
+OVR_PUBLIC_FUNCTION(void) ovr_DestroyMirrorTexture(ovrSession session, ovrMirrorTexture mirrorTexture);
+
+
+/// Calculates the recommended viewport size for rendering a given eye within the HMD
+/// with a given FOV cone.
+///
+/// Higher FOV will generally require larger textures to maintain quality.
+/// Apps packing multiple eye views together on the same texture should ensure there are
+/// at least 8 pixels of padding between them to prevent texture filtering and chromatic
+/// aberration causing images to leak between the two eye views.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] eye Specifies which eye (left or right) to calculate for.
+/// \param[in] fov Specifies the ovrFovPort to use.
+/// \param[in] pixelsPerDisplayPixel Specifies the ratio of the number of render target pixels
+///            to display pixels at the center of distortion. 1.0 is the default value. Lower
+///            values can improve performance, higher values give improved quality.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);
+///         ovrSizei eyeSizeLeft  = ovr_GetFovTextureSize(session, ovrEye_Left,  hmdDesc.DefaultEyeFov[ovrEye_Left],  1.0f);
+///         ovrSizei eyeSizeRight = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[ovrEye_Right], 1.0f);
+///     \endcode
+///
+/// \return Returns the texture width and height size.
+///
+OVR_PUBLIC_FUNCTION(ovrSizei) ovr_GetFovTextureSize(ovrSession session, ovrEyeType eye, ovrFovPort fov,
+                                                       float pixelsPerDisplayPixel);
+
+/// Computes the distortion viewport, view adjust, and other rendering parameters for
+/// the specified eye.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] eyeType Specifies which eye (left or right) for which to perform calculations.
+/// \param[in] fov Specifies the ovrFovPort to use.
+///
+/// \return Returns the computed ovrEyeRenderDesc for the given eyeType and field of view.
+///
+/// \see ovrEyeRenderDesc
+///
+OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc) ovr_GetRenderDesc(ovrSession session,
+                                                           ovrEyeType eyeType, ovrFovPort fov);
+
+/// Submits layers for distortion and display.
+///
+/// ovr_SubmitFrame triggers distortion and processing which might happen asynchronously.
+/// The function will return when there is room in the submission queue and surfaces
+/// are available. Distortion might or might not have completed.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \param[in] frameIndex Specifies the targeted application frame index, or 0 to refer to one frame
+///        after the last time ovr_SubmitFrame was called.
+///
+/// \param[in] viewScaleDesc Provides additional information needed only if layerPtrList contains
+///        an ovrLayerType_Quad. If NULL, a default version is used based on the current configuration and a 1.0 world scale.
+///
+/// \param[in] layerPtrList Specifies a list of ovrLayer pointers, which can include NULL entries to
+///        indicate that any previously shown layer at that index is to not be displayed.
+///        Each layer header must be a part of a layer structure such as ovrLayerEyeFov or ovrLayerQuad,
+///        with Header.Type identifying its type. A NULL layerPtrList entry in the array indicates the
+//         absence of the given layer.
+///
+/// \param[in] layerCount Indicates the number of valid elements in layerPtrList. The maximum
+///        supported layerCount is not currently specified, but may be specified in a future version.
+///
+/// - Layers are drawn in the order they are specified in the array, regardless of the layer type.
+///
+/// - Layers are not remembered between successive calls to ovr_SubmitFrame. A layer must be
+///   specified in every call to ovr_SubmitFrame or it won't be displayed.
+///
+/// - If a layerPtrList entry that was specified in a previous call to ovr_SubmitFrame is
+///   passed as NULL or is of type ovrLayerType_Disabled, that layer is no longer displayed.
+///
+/// - A layerPtrList entry can be of any layer type and multiple entries of the same layer type
+///   are allowed. No layerPtrList entry may be duplicated (i.e. the same pointer as an earlier entry).
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrLayerEyeFov  layer0;
+///         ovrLayerQuad    layer1;
+///           ...
+///         ovrLayerHeader* layers[2] = { &layer0.Header, &layer1.Header };
+///         ovrResult result = ovr_SubmitFrame(hmd, frameIndex, nullptr, layers, 2);
+///     \endcode
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true
+///         upon success. Return values include but aren't limited to:
+///     - ovrSuccess: rendering completed successfully.
+///     - ovrSuccess_NotVisible: rendering completed successfully but was not displayed on the HMD,
+///       usually because another application currently has ownership of the HMD. Applications receiving
+///       this result should stop rendering new content, but continue to call ovr_SubmitFrame periodically
+///       until it returns a value other than ovrSuccess_NotVisible.
+///     - ovrError_DisplayLost: The session has become invalid (such as due to a device removal)
+///       and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session needs to
+///       destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be created
+///       (ovr_CreateTextureSwapChainXXX). The application's existing private graphics resources do not
+///       need to be recreated unless the new ovr_Create call returns a different GraphicsLuid.
+///     - ovrError_TextureSwapChainInvalid: The ovrTextureSwapChain is in an incomplete or inconsistent state. 
+///       Ensure ovr_CommitTextureSwapChain was called at least once first.
+///
+/// \see ovr_GetPredictedDisplayTime, ovrViewScaleDesc, ovrLayerHeader
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SubmitFrame(ovrSession session, long long frameIndex,
+                                                  const ovrViewScaleDesc* viewScaleDesc,
+                                                  ovrLayerHeader const * const * layerPtrList, unsigned int layerCount);
+///@}
+
+
+
+//-------------------------------------------------------------------------------------
+/// @name Frame Timing
+///
+//@{
+
+
+/// Gets the time of the specified frame midpoint.
+///
+/// Predicts the time at which the given frame will be displayed. The predicted time 
+/// is the middle of the time period during which the corresponding eye images will 
+/// be displayed. 
+///
+/// The application should increment frameIndex for each successively targeted frame,
+/// and pass that index to any relevent OVR functions that need to apply to the frame
+/// identified by that index.
+///
+/// This function is thread-safe and allows for multiple application threads to target
+/// their processing to the same displayed frame.
+/// 
+/// In the even that prediction fails due to various reasons (e.g. the display being off
+/// or app has yet to present any frames), the return value will be current CPU time.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] frameIndex Identifies the frame the caller wishes to target.
+///            A value of zero returns the next frame index.
+/// \return Returns the absolute frame midpoint time for the given frameIndex. 
+/// \see ovr_GetTimeInSeconds
+///
+OVR_PUBLIC_FUNCTION(double) ovr_GetPredictedDisplayTime(ovrSession session, long long frameIndex);
+
+
+/// Returns global, absolute high-resolution time in seconds.
+///
+/// The time frame of reference for this function is not specified and should not be
+/// depended upon.
+///
+/// \return Returns seconds as a floating point value.
+/// \see ovrPoseStatef, ovrFrameTiming
+///
+OVR_PUBLIC_FUNCTION(double) ovr_GetTimeInSeconds();
+
+
+/// Performance HUD enables the HMD user to see information critical to
+/// the real-time operation of the VR application such as latency timing,
+/// and CPU & GPU performance metrics
+///
+///     App can toggle performance HUD modes as such:
+///     \code{.cpp}
+///         ovrPerfHudMode PerfHudMode = ovrPerfHud_LatencyTiming;
+///         ovr_SetInt(Hmd, OVR_PERF_HUD_MODE, (int)PerfHudMode);
+///     \endcode
+///
+typedef enum ovrPerfHudMode_
+{
+    ovrPerfHud_Off                = 0,  ///< Turns off the performance HUD
+    ovrPerfHud_PerfSummary        = 1,  ///< Shows performance summary and headroom
+    ovrPerfHud_LatencyTiming      = 2,  ///< Shows latency related timing info
+    ovrPerfHud_AppRenderTiming    = 3,  ///< Shows render timing info for application
+    ovrPerfHud_CompRenderTiming   = 4,  ///< Shows render timing info for OVR compositor
+    ovrPerfHud_VersionInfo        = 5,  ///< Shows SDK & HMD version Info
+    ovrPerfHud_Count              = 6,  ///< \internal Count of enumerated elements.
+    ovrPerfHud_EnumSize = 0x7fffffff    ///< \internal Force type int32_t.
+} ovrPerfHudMode;
+
+/// Layer HUD enables the HMD user to see information about a layer
+///
+///     App can toggle layer HUD modes as such:
+///     \code{.cpp}
+///         ovrLayerHudMode LayerHudMode = ovrLayerHud_Info;
+///         ovr_SetInt(Hmd, OVR_LAYER_HUD_MODE, (int)LayerHudMode);
+///     \endcode
+///
+typedef enum ovrLayerHudMode_
+{
+    ovrLayerHud_Off = 0, ///< Turns off the layer HUD
+    ovrLayerHud_Info = 1, ///< Shows info about a specific layer
+    ovrLayerHud_EnumSize = 0x7fffffff
+} ovrLayerHudMode;
+
+///@}
+
+/// Debug HUD is provided to help developers gauge and debug the fidelity of their app's
+/// stereo rendering characteristics. Using the provided quad and crosshair guides, 
+/// the developer can verify various aspects such as VR tracking units (e.g. meters),
+/// stereo camera-parallax properties (e.g. making sure objects at infinity are rendered
+/// with the proper separation), measuring VR geometry sizes and distances and more.
+///
+///     App can toggle the debug HUD modes as such:
+///     \code{.cpp}
+///         ovrDebugHudStereoMode DebugHudMode = ovrDebugHudStereo_QuadWithCrosshair;
+///         ovr_SetInt(Hmd, OVR_DEBUG_HUD_STEREO_MODE, (int)DebugHudMode);
+///     \endcode
+///
+/// The app can modify the visual properties of the stereo guide (i.e. quad, crosshair)
+/// using the ovr_SetFloatArray function. For a list of tweakable properties,
+/// see the OVR_DEBUG_HUD_STEREO_GUIDE_* keys in the OVR_CAPI_Keys.h header file.
+typedef enum ovrDebugHudStereoMode_
+{
+    ovrDebugHudStereo_Off                 = 0,  ///< Turns off the Stereo Debug HUD
+    ovrDebugHudStereo_Quad                = 1,  ///< Renders Quad in world for Stereo Debugging
+    ovrDebugHudStereo_QuadWithCrosshair   = 2,  ///< Renders Quad+crosshair in world for Stereo Debugging
+    ovrDebugHudStereo_CrosshairAtInfinity = 3,  ///< Renders screen-space crosshair at infinity for Stereo Debugging
+    ovrDebugHudStereo_Count,                    ///< \internal Count of enumerated elements
+
+    ovrDebugHudStereo_EnumSize = 0x7fffffff     ///< \internal Force type int32_t
+} ovrDebugHudStereoMode;
+
+
+
+
+// -----------------------------------------------------------------------------------
+/// @name Property Access
+///
+/// These functions read and write OVR properties. Supported properties
+/// are defined in OVR_CAPI_Keys.h
+///
+//@{
+
+/// Reads a boolean property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid for only the call.
+/// \param[in] defaultVal specifes the value to return if the property couldn't be read.
+/// \return Returns the property interpreted as a boolean value. Returns defaultVal if
+///         the property doesn't exist.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_GetBool(ovrSession session, const char* propertyName, ovrBool defaultVal);
+
+/// Writes or creates a boolean property.
+/// If the property wasn't previously a boolean property, it is changed to a boolean property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The value to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetBool(ovrSession session, const char* propertyName, ovrBool value);
+
+
+/// Reads an integer property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] defaultVal Specifes the value to return if the property couldn't be read.
+/// \return Returns the property interpreted as an integer value. Returns defaultVal if
+///         the property doesn't exist.
+OVR_PUBLIC_FUNCTION(int) ovr_GetInt(ovrSession session, const char* propertyName, int defaultVal);
+
+/// Writes or creates an integer property.
+///
+/// If the property wasn't previously a boolean property, it is changed to an integer property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The value to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetInt(ovrSession session, const char* propertyName, int value);
+
+
+/// Reads a float property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] defaultVal specifes the value to return if the property couldn't be read.
+/// \return Returns the property interpreted as an float value. Returns defaultVal if
+///         the property doesn't exist.
+OVR_PUBLIC_FUNCTION(float) ovr_GetFloat(ovrSession session, const char* propertyName, float defaultVal);
+
+/// Writes or creates a float property.
+/// If the property wasn't previously a float property, it's changed to a float property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The value to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetFloat(ovrSession session, const char* propertyName, float value);
+
+
+/// Reads a float array property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] values An array of float to write to.
+/// \param[in] valuesCapacity Specifies the maximum number of elements to write to the values array.
+/// \return Returns the number of elements read, or 0 if property doesn't exist or is empty.
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetFloatArray(ovrSession session, const char* propertyName,
+                                                       float values[], unsigned int valuesCapacity);
+
+/// Writes or creates a float array property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] values An array of float to write from.
+/// \param[in] valuesSize Specifies the number of elements to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetFloatArray(ovrSession session, const char* propertyName,
+                                                  const float values[], unsigned int valuesSize);
+
+
+/// Reads a string property.
+/// Strings are UTF8-encoded and null-terminated.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] defaultVal Specifes the value to return if the property couldn't be read.
+/// \return Returns the string property if it exists. Otherwise returns defaultVal, which can be specified as NULL.
+///         The return memory is guaranteed to be valid until next call to ovr_GetString or
+///         until the HMD is destroyed, whichever occurs first.
+OVR_PUBLIC_FUNCTION(const char*) ovr_GetString(ovrSession session, const char* propertyName,
+                                                  const char* defaultVal);
+
+/// Writes or creates a string property.
+/// Strings are UTF8-encoded and null-terminated.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The string property, which only needs to be valid for the duration of the call.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetString(ovrSession session, const char* propertyName,
+                                              const char* value);
+
+///@}
+
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+
+#if defined(_MSC_VER)
+    #pragma warning(pop)
+#endif
+
+/// @cond DoxygenIgnore
+//-----------------------------------------------------------------------------
+// ***** Compiler packing validation
+//
+// These checks ensure that the compiler settings being used will be compatible
+// with with pre-built dynamic library provided with the runtime.
+
+OVR_STATIC_ASSERT(sizeof(ovrBool) == 1,         "ovrBool size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrVector2i) == 4 * 2, "ovrVector2i size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrSizei) == 4 * 2,    "ovrSizei size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrRecti) == sizeof(ovrVector2i) + sizeof(ovrSizei), "ovrRecti size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrQuatf) == 4 * 4,    "ovrQuatf size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrVector2f) == 4 * 2, "ovrVector2f size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrVector3f) == 4 * 3, "ovrVector3f size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrMatrix4f) == 4 * 16, "ovrMatrix4f size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrPosef) == (7 * 4),       "ovrPosef size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrPoseStatef) == (22 * 4), "ovrPoseStatef size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrFovPort) == (4 * 4),     "ovrFovPort size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrHmdCaps) == 4,      "ovrHmdCaps size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTrackingCaps) == 4, "ovrTrackingCaps size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrEyeType) == 4,      "ovrEyeType size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrHmdType) == 4,      "ovrHmdType size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrTrackerDesc) == 4 + 4 + 4 + 4, "ovrTrackerDesc size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTrackerPose) == 4 + 4 + sizeof(ovrPosef) + sizeof(ovrPosef), "ovrTrackerPose size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTrackingState) == sizeof(ovrPoseStatef) + 4 + 4 + (sizeof(ovrPoseStatef) * 2) + (sizeof(unsigned int) * 2) + sizeof(ovrPosef) + 4, "ovrTrackingState size mismatch");
+
+
+//OVR_STATIC_ASSERT(sizeof(ovrTextureHeader) == sizeof(ovrRenderAPIType) + sizeof(ovrSizei),
+//                      "ovrTextureHeader size mismatch");
+//OVR_STATIC_ASSERT(sizeof(ovrTexture) == sizeof(ovrTextureHeader) OVR_ON64(+4) + sizeof(uintptr_t) * 8,
+//                      "ovrTexture size mismatch");
+//
+OVR_STATIC_ASSERT(sizeof(ovrStatusBits) == 4, "ovrStatusBits size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrSessionStatus) == 6, "ovrSessionStatus size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrEyeRenderDesc) == sizeof(ovrEyeType) + sizeof(ovrFovPort) + sizeof(ovrRecti) +
+                                                  sizeof(ovrVector2f) + sizeof(ovrVector3f),
+                      "ovrEyeRenderDesc size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTimewarpProjectionDesc) == 4 * 3, "ovrTimewarpProjectionDesc size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrInitFlags) == 4, "ovrInitFlags size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrLogLevel) == 4, "ovrLogLevel size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrInitParams) == 4 + 4 + sizeof(ovrLogCallback) + sizeof(uintptr_t) + 4 + 4,
+                      "ovrInitParams size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrHmdDesc) == 
+    + sizeof(ovrHmdType)                // Type
+    OVR_ON64(+ 4)                       // pad0
+    + 64                                // ProductName 
+    + 64                                // Manufacturer
+    + 2                                 // VendorId
+    + 2                                 // ProductId
+    + 24                                // SerialNumber
+    + 2                                 // FirmwareMajor
+    + 2                                 // FirmwareMinor
+    + 4 * 4                             // AvailableHmdCaps - DefaultTrackingCaps
+    + sizeof(ovrFovPort) * 2            // DefaultEyeFov
+    + sizeof(ovrFovPort) * 2            // MaxEyeFov
+    + sizeof(ovrSizei)                  // Resolution
+    + 4                                 // DisplayRefreshRate
+    OVR_ON64(+ 4)                       // pad1
+    , "ovrHmdDesc size mismatch");
+
+
+// -----------------------------------------------------------------------------------
+// ***** Backward compatibility #includes
+//
+// This is at the bottom of this file because the following is dependent on the
+// declarations above.
+
+#if !defined(OVR_CAPI_NO_UTILS)
+    #include "Extras/OVR_CAPI_Util.h"
+#endif
+
+/// @endcond
+
+#endif // OVR_CAPI_h

+ 76 - 0
src/external/OculusSDK/LibOVR/Include/OVR_CAPI_Audio.h

@@ -0,0 +1,76 @@
+/********************************************************************************//**
+\file      OVR_CAPI_Audio.h
+\brief     CAPI audio functions.
+\copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.
+************************************************************************************/
+
+
+#ifndef OVR_CAPI_Audio_h
+#define OVR_CAPI_Audio_h
+
+#ifdef _WIN32
+#include <windows.h>
+#include "OVR_CAPI.h"
+#define OVR_AUDIO_MAX_DEVICE_STR_SIZE 128
+
+/// Gets the ID of the preferred VR audio output device.
+///
+/// \param[out] deviceOutId The ID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be WAVE_MAPPER.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(UINT* deviceOutId);
+
+/// Gets the ID of the preferred VR audio input device.
+///
+/// \param[out] deviceInId The ID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be WAVE_MAPPER.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(UINT* deviceInId);
+
+
+/// Gets the GUID of the preferred VR audio device as a string.
+///
+/// \param[out] deviceOutStrBuffer A buffer where the GUID string for the device will copied to.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuidStr(WCHAR deviceOutStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);
+
+
+/// Gets the GUID of the preferred VR audio device.
+///
+/// \param[out] deviceOutGuid The GUID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be NULL.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid);
+
+
+/// Gets the GUID of the preferred VR microphone device as a string.
+///
+/// \param[out] deviceInStrBuffer A buffer where the GUID string for the device will copied to.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuidStr(WCHAR deviceInStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);
+
+
+/// Gets the GUID of the preferred VR microphone device.
+///
+/// \param[out] deviceInGuid The GUID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be NULL.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid);
+
+#endif //OVR_OS_MS
+
+#endif    // OVR_CAPI_Audio_h

+ 155 - 0
src/external/OculusSDK/LibOVR/Include/OVR_CAPI_D3D.h

@@ -0,0 +1,155 @@
+/********************************************************************************//**
+\file      OVR_CAPI_D3D.h
+\brief     D3D specific structures used by the CAPI interface.
+\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_D3D_h
+#define OVR_CAPI_D3D_h
+
+#include "OVR_CAPI.h"
+#include "OVR_Version.h"
+
+
+#if defined (_WIN32)
+#include <Unknwn.h>
+
+//-----------------------------------------------------------------------------------
+// ***** Direct3D Specific
+
+/// Create Texture Swap Chain suitable for use with Direct3D 11 and 12.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  d3dPtr Specifies the application's D3D11Device to create resources with or the D3D12CommandQueue
+///             which must be the same one the application renders to the eye textures with.
+/// \param[in]  desc Specifies requested texture properties. See notes for more info about texture format.
+/// \param[in]  bindFlags Specifies what ovrTextureBindFlags the application requires for this texture chain.
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid upon a successful return value, else it will be NULL.
+///             This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The texture format provided in \a desc should be thought of as the format the distortion-compositor will use for the
+/// ShaderResourceView when reading the contents of the texture. To that end, it is highly recommended that the application
+/// requests texture swapchain formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor
+/// does sRGB-correct rendering. As such, the compositor relies on the GPU's hardware sampler to do the sRGB-to-linear
+/// conversion. If the application still prefers to render to a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) while handling the
+/// linear-to-gamma conversion via HLSL code, then the application must still request the corresponding sRGB format and also use
+/// the \a ovrTextureMisc_DX_Typeless flag in the ovrTextureSwapChainDesc's Flag field. This will allow the application to create
+/// a RenderTargetView that is the desired linear format while the compositor continues to treat it as sRGB. Failure to do so
+/// will cause the compositor to apply unexpected gamma conversions leading to gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless
+/// flag for depth buffer formats (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always converted to be typeless.
+///
+/// \see ovr_GetTextureSwapChainLength
+/// \see ovr_GetTextureSwapChainCurrentIndex
+/// \see ovr_GetTextureSwapChainDesc
+/// \see ovr_GetTextureSwapChainBufferDX
+/// \see ovr_DestroyTextureSwapChain
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateTextureSwapChainDX(ovrSession session,
+                                                            IUnknown* d3dPtr,
+                                                            const ovrTextureSwapChainDesc* desc,
+                                                            ovrTextureSwapChain* out_TextureSwapChain);
+
+
+/// Get a specific buffer within the chain as any compatible COM interface (similar to QueryInterface)
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies an ovrTextureSwapChain previously returned by ovr_CreateTextureSwapChainDX
+/// \param[in]  index Specifies the index within the chain to retrieve. Must be between 0 and length (see ovr_GetTextureSwapChainLength),
+///             or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to GetTextureSwapChainCurrentIndex)
+/// \param[in]  iid Specifies the interface ID of the interface pointer to query the buffer for.
+/// \param[out] out_Buffer Returns the COM interface pointer retrieved.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovr_GetTextureSwapChainBufferDX(session, chain, 0, IID_ID3D11Texture2D, &d3d11Texture);
+///         ovr_GetTextureSwapChainBufferDX(session, chain, 1, IID_PPV_ARGS(&dxgiResource));
+///     \endcode
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainBufferDX(ovrSession session,
+                                                               ovrTextureSwapChain chain,
+                                                               int index,
+                                                               IID iid,
+                                                               void** out_Buffer);
+
+
+/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this application.
+///
+/// A second call to ovr_CreateMirrorTextureDX for a given ovrSession before destroying the first one
+/// is not supported and will result in an error return.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  d3dPtr Specifies the application's D3D11Device to create resources with or the D3D12CommandQueue
+///             which must be the same one the application renders to the textures with.
+/// \param[in]  desc Specifies requested texture properties. See notes for more info about texture format.
+/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be valid upon a successful return value, else it will be NULL.
+///             This texture must be eventually destroyed via ovr_DestroyMirrorTexture before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The texture format provided in \a desc should be thought of as the format the compositor will use for the RenderTargetView when
+/// writing into mirror texture. To that end, it is highly recommended that the application requests a mirror texture format that is
+/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct rendering. If however the application wants
+/// to still read the mirror texture as a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion in
+/// HLSL code, then it is recommended the application still requests an sRGB format and also use the \a ovrTextureMisc_DX_Typeless flag in the
+/// ovrMirrorTextureDesc's Flags field. This will allow the application to bind a ShaderResourceView that is a linear format while the
+/// compositor continues to treat is as sRGB. Failure to do so will cause the compositor to apply unexpected gamma conversions leading to 
+/// gamma-curve artifacts.
+///
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrMirrorTexture     mirrorTexture = nullptr;
+///         ovrMirrorTextureDesc mirrorDesc = {};
+///         mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
+///         mirrorDesc.Width  = mirrorWindowWidth;
+///         mirrorDesc.Height = mirrorWindowHeight;
+///         ovrResult result = ovr_CreateMirrorTextureDX(session, d3d11Device, &mirrorDesc, &mirrorTexture);
+///         [...]
+///         // Destroy the texture when done with it.
+///         ovr_DestroyMirrorTexture(session, mirrorTexture);
+///         mirrorTexture = nullptr;
+///     \endcode
+///
+/// \see ovr_GetMirrorTextureBufferDX
+/// \see ovr_DestroyMirrorTexture
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateMirrorTextureDX(ovrSession session,
+                                                         IUnknown* d3dPtr,
+                                                         const ovrMirrorTextureDesc* desc,
+                                                         ovrMirrorTexture* out_MirrorTexture);
+
+/// Get a the underlying buffer as any compatible COM interface (similar to QueryInterface) 
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  mirrorTexture Specifies an ovrMirrorTexture previously returned by ovr_CreateMirrorTextureDX
+/// \param[in]  iid Specifies the interface ID of the interface pointer to query the buffer for.
+/// \param[out] out_Buffer Returns the COM interface pointer retrieved.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ID3D11Texture2D* d3d11Texture = nullptr;
+///         ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&d3d11Texture));
+///         d3d11DeviceContext->CopyResource(d3d11TextureBackBuffer, d3d11Texture);
+///         d3d11Texture->Release();
+///         dxgiSwapChain->Present(0, 0);
+///     \endcode
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetMirrorTextureBufferDX(ovrSession session,
+                                                            ovrMirrorTexture mirrorTexture,
+                                                            IID iid,
+                                                            void** out_Buffer);
+
+
+#endif // _WIN32
+
+#endif    // OVR_CAPI_D3D_h

+ 99 - 0
src/external/OculusSDK/LibOVR/Include/OVR_CAPI_GL.h

@@ -0,0 +1,99 @@
+/********************************************************************************//**
+\file      OVR_CAPI_GL.h
+\brief     OpenGL-specific structures used by the CAPI interface.
+\copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_GL_h
+#define OVR_CAPI_GL_h
+
+#include "OVR_CAPI.h"
+
+/// Creates a TextureSwapChain suitable for use with OpenGL.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  desc Specifies the requested texture properties. See notes for more info about texture format.
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid upon
+///             a successful return value, else it will be NULL. This texture swap chain must be eventually
+///             destroyed via ovr_DestroyTextureSwapChain before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The \a format provided should be thought of as the format the distortion compositor will use when reading
+/// the contents of the texture. To that end, it is highly recommended that the application requests texture swap chain
+/// formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the distortion compositor does sRGB-correct
+/// rendering. Furthermore, the app should then make sure "glEnable(GL_FRAMEBUFFER_SRGB);" is called before rendering
+/// into these textures. Even though it is not recommended, if the application would like to treat the texture as a linear
+/// format and do linear-to-gamma conversion in GLSL, then the application can avoid calling "glEnable(GL_FRAMEBUFFER_SRGB);",
+/// but should still pass in an sRGB variant for the \a format. Failure to do so will cause the distortion compositor
+/// to apply incorrect gamma conversions leading to gamma-curve artifacts.
+///
+/// \see ovr_GetTextureSwapChainLength
+/// \see ovr_GetTextureSwapChainCurrentIndex
+/// \see ovr_GetTextureSwapChainDesc
+/// \see ovr_GetTextureSwapChainBufferGL
+/// \see ovr_DestroyTextureSwapChain
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateTextureSwapChainGL(ovrSession session,
+                                                            const ovrTextureSwapChainDesc* desc,
+                                                            ovrTextureSwapChain* out_TextureSwapChain);
+
+/// Get a specific buffer within the chain as a GL texture name
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies an ovrTextureSwapChain previously returned by ovr_CreateTextureSwapChainGL
+/// \param[in]  index Specifies the index within the chain to retrieve. Must be between 0 and length (see ovr_GetTextureSwapChainLength)
+///             or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to GetTextureSwapChainCurrentIndex)
+/// \param[out] out_TexId Returns the GL texture object name associated with the specific index requested
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainBufferGL(ovrSession session,
+                                                               ovrTextureSwapChain chain,
+                                                               int index,
+                                                               unsigned int* out_TexId);
+
+
+/// Creates a Mirror Texture which is auto-refreshed to mirror Rift contents produced by this application.
+///
+/// A second call to ovr_CreateMirrorTextureGL for a given ovrSession before destroying the first one
+/// is not supported and will result in an error return.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  desc Specifies the requested mirror texture description.
+/// \param[out] out_MirrorTexture Specifies the created ovrMirrorTexture, which will be valid upon a successful return value, else it will be NULL.
+///             This texture must be eventually destroyed via ovr_DestroyMirrorTexture before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The \a format provided should be thought of as the format the distortion compositor will use when writing into the mirror
+/// texture. It is highly recommended that mirror textures are requested as sRGB formats because the distortion compositor
+/// does sRGB-correct rendering. If the application requests a non-sRGB format (e.g. R8G8B8A8_UNORM) as the mirror texture,
+/// then the application might have to apply a manual linear-to-gamma conversion when reading from the mirror texture.
+/// Failure to do so can result in incorrect gamma conversions leading to gamma-curve artifacts and color banding.
+///
+/// \see ovr_GetMirrorTextureBufferGL
+/// \see ovr_DestroyMirrorTexture
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateMirrorTextureGL(ovrSession session,
+                                                         const ovrMirrorTextureDesc* desc,
+                                                         ovrMirrorTexture* out_MirrorTexture);
+
+/// Get a the underlying buffer as a GL texture name
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  mirrorTexture Specifies an ovrMirrorTexture previously returned by ovr_CreateMirrorTextureGL
+/// \param[out] out_TexId Specifies the GL texture object name associated with the mirror texture
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetMirrorTextureBufferGL(ovrSession session,
+                                                            ovrMirrorTexture mirrorTexture,
+                                                            unsigned int* out_TexId);
+
+
+#endif    // OVR_CAPI_GL_h

+ 53 - 0
src/external/OculusSDK/LibOVR/Include/OVR_CAPI_Keys.h

@@ -0,0 +1,53 @@
+/********************************************************************************//**
+\file      OVR_CAPI.h
+\brief     Keys for CAPI proprty function calls
+\copyright Copyright 2015 Oculus VR, LLC All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_Keys_h
+#define OVR_CAPI_Keys_h
+
+#include "OVR_Version.h"
+
+
+
+#define OVR_KEY_USER                        "User"                // string
+
+#define OVR_KEY_NAME                        "Name"                // string
+
+#define OVR_KEY_GENDER                      "Gender"              // string "Male", "Female", or "Unknown"
+#define OVR_DEFAULT_GENDER                  "Unknown"
+
+#define OVR_KEY_PLAYER_HEIGHT               "PlayerHeight"        // float meters
+#define OVR_DEFAULT_PLAYER_HEIGHT           1.778f
+
+#define OVR_KEY_EYE_HEIGHT                  "EyeHeight"           // float meters
+#define OVR_DEFAULT_EYE_HEIGHT              1.675f
+
+#define OVR_KEY_NECK_TO_EYE_DISTANCE        "NeckEyeDistance"     // float[2] meters
+#define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL  0.0805f
+#define OVR_DEFAULT_NECK_TO_EYE_VERTICAL    0.075f
+
+
+#define OVR_KEY_EYE_TO_NOSE_DISTANCE        "EyeToNoseDist"       // float[2] meters
+
+
+
+
+
+#define OVR_PERF_HUD_MODE                       "PerfHudMode"                       // int, allowed values are defined in enum ovrPerfHudMode
+
+#define OVR_LAYER_HUD_MODE                      "LayerHudMode"                      // int, allowed values are defined in enum ovrLayerHudMode
+#define OVR_LAYER_HUD_CURRENT_LAYER             "LayerHudCurrentLayer"              // int, The layer to show 
+#define OVR_LAYER_HUD_SHOW_ALL_LAYERS           "LayerHudShowAll"                   // bool, Hide other layers when the hud is enabled
+
+#define OVR_DEBUG_HUD_STEREO_MODE               "DebugHudStereoMode"                // int, allowed values are defined in enum ovrDebugHudStereoMode
+#define OVR_DEBUG_HUD_STEREO_GUIDE_INFO_ENABLE  "DebugHudStereoGuideInfoEnable"     // bool
+#define OVR_DEBUG_HUD_STEREO_GUIDE_SIZE         "DebugHudStereoGuideSize2f"         // float[2]
+#define OVR_DEBUG_HUD_STEREO_GUIDE_POSITION     "DebugHudStereoGuidePosition3f"     // float[3]
+#define OVR_DEBUG_HUD_STEREO_GUIDE_YAWPITCHROLL "DebugHudStereoGuideYawPitchRoll3f" // float[3]
+#define OVR_DEBUG_HUD_STEREO_GUIDE_COLOR        "DebugHudStereoGuideColor4f"        // float[4]
+
+
+
+#endif // OVR_CAPI_Keys_h

+ 209 - 0
src/external/OculusSDK/LibOVR/Include/OVR_ErrorCode.h

@@ -0,0 +1,209 @@
+/********************************************************************************//**
+\file  OVR_ErrorCode.h
+\brief     This header provides LibOVR error code declarations.
+\copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_ErrorCode_h
+#define OVR_ErrorCode_h
+
+
+#include "OVR_Version.h"
+#include <stdint.h>
+
+
+
+
+
+
+
+#ifndef OVR_RESULT_DEFINED
+#define OVR_RESULT_DEFINED ///< Allows ovrResult to be independently defined.
+/// API call results are represented at the highest level by a single ovrResult.
+typedef int32_t ovrResult;
+#endif
+
+
+/// \brief Indicates if an ovrResult indicates success.
+///
+/// Some functions return additional successful values other than ovrSucces and
+/// require usage of this macro to indicate successs.
+///
+#if !defined(OVR_SUCCESS)
+    #define OVR_SUCCESS(result) (result >= 0)
+#endif
+
+
+/// \brief Indicates if an ovrResult indicates an unqualified success.
+///
+/// This is useful for indicating that the code intentionally wants to
+/// check for result == ovrSuccess as opposed to OVR_SUCCESS(), which
+/// checks for result >= ovrSuccess.
+///
+#if !defined(OVR_UNQUALIFIED_SUCCESS)
+    #define OVR_UNQUALIFIED_SUCCESS(result) (result == ovrSuccess)
+#endif
+
+
+/// \brief Indicates if an ovrResult indicates failure.
+///
+#if !defined(OVR_FAILURE)
+    #define OVR_FAILURE(result) (!OVR_SUCCESS(result))
+#endif
+
+
+// Success is a value greater or equal to 0, while all error types are negative values.
+#ifndef OVR_SUCCESS_DEFINED
+#define OVR_SUCCESS_DEFINED ///< Allows ovrResult to be independently defined.
+typedef enum ovrSuccessType_
+{
+    /// This is a general success result. Use OVR_SUCCESS to test for success.
+    ovrSuccess = 0,
+
+    /// Returned from a call to SubmitFrame. The call succeeded, but what the app
+    /// rendered will not be visible on the HMD. Ideally the app should continue
+    /// calling SubmitFrame, but not do any rendering. When the result becomes
+    /// ovrSuccess, rendering should continue as usual.
+    ovrSuccess_NotVisible                 = 1000,
+
+    ovrSuccess_HMDFirmwareMismatch        = 4100,   ///< The HMD Firmware is out of date but is acceptable.
+    ovrSuccess_TrackerFirmwareMismatch    = 4101,   ///< The Tracker Firmware is out of date but is acceptable.
+    ovrSuccess_ControllerFirmwareMismatch = 4104,   ///< The controller firmware is out of date but is acceptable.
+    ovrSuccess_TrackerDriverNotFound      = 4105,   ///< The tracker driver interface was not found. Can be a temporary error
+
+} ovrSuccessType;
+#endif
+
+
+typedef enum ovrErrorType_
+{
+    /* General errors */
+    ovrError_MemoryAllocationFailure    = -1000,   ///< Failure to allocate memory.
+    ovrError_SocketCreationFailure      = -1001,   ///< Failure to create a socket.
+    ovrError_InvalidSession             = -1002,   ///< Invalid ovrSession parameter provided.
+    ovrError_Timeout                    = -1003,   ///< The operation timed out.
+    ovrError_NotInitialized             = -1004,   ///< The system or component has not been initialized.
+    ovrError_InvalidParameter           = -1005,   ///< Invalid parameter provided. See error info or log for details.
+    ovrError_ServiceError               = -1006,   ///< Generic service error. See error info or log for details.
+    ovrError_NoHmd                      = -1007,   ///< The given HMD doesn't exist.
+    ovrError_Unsupported                = -1009,   ///< Function call is not supported on this hardware/software
+    ovrError_DeviceUnavailable          = -1010,   ///< Specified device type isn't available.
+    ovrError_InvalidHeadsetOrientation  = -1011,   ///< The headset was in an invalid orientation for the requested operation (e.g. vertically oriented during ovr_RecenterPose).
+    ovrError_ClientSkippedDestroy       = -1012,   ///< The client failed to call ovr_Destroy on an active session before calling ovr_Shutdown. Or the client crashed.
+    ovrError_ClientSkippedShutdown      = -1013,   ///< The client failed to call ovr_Shutdown or the client crashed.
+    ovrError_ServiceDeadlockDetected    = -1014,   ///< The service watchdog discovered a deadlock.
+
+    /* Audio error range, reserved for Audio errors. */
+    ovrError_AudioReservedBegin         = -2000,   ///< First Audio error.
+    ovrError_AudioDeviceNotFound        = -2001,   ///< Failure to find the specified audio device.
+    ovrError_AudioComError              = -2002,   ///< Generic COM error.
+    ovrError_AudioReservedEnd           = -2999,   ///< Last Audio error.
+
+    /* Initialization errors. */
+    ovrError_Initialize                 = -3000,   ///< Generic initialization error.
+    ovrError_LibLoad                    = -3001,   ///< Couldn't load LibOVRRT.
+    ovrError_LibVersion                 = -3002,   ///< LibOVRRT version incompatibility.
+    ovrError_ServiceConnection          = -3003,   ///< Couldn't connect to the OVR Service.
+    ovrError_ServiceVersion             = -3004,   ///< OVR Service version incompatibility.
+    ovrError_IncompatibleOS             = -3005,   ///< The operating system version is incompatible.
+    ovrError_DisplayInit                = -3006,   ///< Unable to initialize the HMD display.
+    ovrError_ServerStart                = -3007,   ///< Unable to start the server. Is it already running?
+    ovrError_Reinitialization           = -3008,   ///< Attempting to re-initialize with a different version.
+    ovrError_MismatchedAdapters         = -3009,   ///< Chosen rendering adapters between client and service do not match
+    ovrError_LeakingResources           = -3010,   ///< Calling application has leaked resources
+    ovrError_ClientVersion              = -3011,   ///< Client version too old to connect to service
+    ovrError_OutOfDateOS                = -3012,   ///< The operating system is out of date.
+    ovrError_OutOfDateGfxDriver         = -3013,   ///< The graphics driver is out of date.
+    ovrError_IncompatibleGPU            = -3014,   ///< The graphics hardware is not supported
+    ovrError_NoValidVRDisplaySystem     = -3015,   ///< No valid VR display system found.
+    ovrError_Obsolete                   = -3016,   ///< Feature or API is obsolete and no longer supported.
+    ovrError_DisabledOrDefaultAdapter   = -3017,   ///< No supported VR display system found, but disabled or driverless adapter found.
+    ovrError_HybridGraphicsNotSupported = -3018,   ///< The system is using hybrid graphics (Optimus, etc...), which is not support.
+    ovrError_DisplayManagerInit         = -3019,   ///< Initialization of the DisplayManager failed.
+    ovrError_TrackerDriverInit          = -3020,   ///< Failed to get the interface for an attached tracker
+
+    /* Hardware errors */
+    ovrError_InvalidBundleAdjustment    = -4000,   ///< Headset has no bundle adjustment data.
+    ovrError_USBBandwidth               = -4001,   ///< The USB hub cannot handle the camera frame bandwidth.
+    ovrError_USBEnumeratedSpeed         = -4002,   ///< The USB camera is not enumerating at the correct device speed.
+    ovrError_ImageSensorCommError       = -4003,   ///< Unable to communicate with the image sensor.
+    ovrError_GeneralTrackerFailure      = -4004,   ///< We use this to report various sensor issues that don't fit in an easily classifiable bucket.
+    ovrError_ExcessiveFrameTruncation   = -4005,   ///< A more than acceptable number of frames are coming back truncated.
+    ovrError_ExcessiveFrameSkipping     = -4006,   ///< A more than acceptable number of frames have been skipped.
+    ovrError_SyncDisconnected           = -4007,   ///< The sensor is not receiving the sync signal (cable disconnected?).
+    ovrError_TrackerMemoryReadFailure   = -4008,   ///< Failed to read memory from the sensor.
+    ovrError_TrackerMemoryWriteFailure  = -4009,   ///< Failed to write memory from the sensor.
+    ovrError_TrackerFrameTimeout        = -4010,   ///< Timed out waiting for a camera frame.
+    ovrError_TrackerTruncatedFrame      = -4011,   ///< Truncated frame returned from sensor.
+    ovrError_TrackerDriverFailure       = -4012,   ///< The sensor driver has encountered a problem.
+    ovrError_TrackerNRFFailure          = -4013,   ///< The sensor wireless subsystem has encountered a problem.
+    ovrError_HardwareGone               = -4014,   ///< The hardware has been unplugged
+    ovrError_NordicEnabledNoSync        = -4015,   ///< The nordic indicates that sync is enabled but it is not sending sync pulses
+    ovrError_NordicSyncNoFrames         = -4016,   ///< It looks like we're getting a sync signal, but no camera frames have been received
+    ovrError_CatastrophicFailure        = -4017,   ///< A catastrophic failure has occurred.  We will attempt to recover by resetting the device
+    ovrError_CatastrophicTimeout        = -4018,   ///< The catastrophic recovery has timed out.
+    ovrError_RepeatCatastrophicFail     = -4019,   ///< Catastrophic failure has repeated too many times.
+    ovrError_USBOpenDeviceFailure       = -4020,   ///< Could not open handle for Rift device (likely already in use by another process).
+    ovrError_HMDGeneralFailure          = -4021,   ///< Unexpected HMD issues that don't fit a specific bucket.
+
+    ovrError_HMDFirmwareMismatch        = -4100,   ///< The HMD Firmware is out of date and is unacceptable.
+    ovrError_TrackerFirmwareMismatch    = -4101,   ///< The sensor Firmware is out of date and is unacceptable.
+    ovrError_BootloaderDeviceDetected   = -4102,   ///< A bootloader HMD is detected by the service.
+    ovrError_TrackerCalibrationError    = -4103,   ///< The sensor calibration is missing or incorrect.
+    ovrError_ControllerFirmwareMismatch = -4104,   ///< The controller firmware is out of date and is unacceptable.
+    ovrError_DevManDeviceDetected       = -4105,   ///< A DeviceManagement mode HMD is detected by the service.
+    ovrError_RebootedBootloaderDevice   = -4106,   ///< Had to reboot bootloader device, which succeeded.
+    ovrError_FailedRebootBootloaderDev  = -4107,   ///< Had to reboot bootloader device, which failed.  Device is stuck in bootloader mode.
+
+    ovrError_IMUTooManyLostSamples      = -4200,   ///< Too many lost IMU samples.
+    ovrError_IMURateError               = -4201,   ///< IMU rate is outside of the expected range.
+    ovrError_FeatureReportFailure       = -4202,   ///< A feature report has failed.
+    ovrError_HMDWirelessTimeout         = -4203,   ///< HMD wireless interface never returned from busy state.
+
+    ovrError_BootloaderAssertLog        = -4300,   ///< HMD Bootloader Assert Log was not empty.
+    ovrError_AppAssertLog               = -4301,   ///< HMD App Assert Log was not empty.
+
+    /* Synchronization errors */
+    ovrError_Incomplete                 = -5000,   ///< Requested async work not yet complete.
+    ovrError_Abandoned                  = -5001,   ///< Requested async work was abandoned and result is incomplete.
+
+    /* Rendering errors */
+    ovrError_DisplayLost                = -6000,   ///< In the event of a system-wide graphics reset or cable unplug this is returned to the app.
+    ovrError_TextureSwapChainFull       = -6001,   ///< ovr_CommitTextureSwapChain was called too many times on a texture swapchain without calling submit to use the chain.
+    ovrError_TextureSwapChainInvalid    = -6002,   ///< The ovrTextureSwapChain is in an incomplete or inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first.
+    ovrError_GraphicsDeviceReset        = -6003,   ///< Graphics device has been reset (TDR, etc...)
+    ovrError_DisplayRemoved             = -6004,   ///< HMD removed from the display adapter
+    ovrError_ContentProtectionNotAvailable = -6005,///<Content protection is not available for the display
+    ovrError_ApplicationInvisible       = -6006,   ///< Application declared itself as an invisible type and is not allowed to submit frames.
+    ovrError_Disallowed                 = -6007,   ///< The given request is disallowed under the current conditions.
+    ovrError_DisplayPluggedIncorrectly  = -6008,   ///< Display portion of HMD is plugged into an incompatible port (ex: IGP)
+
+    /* Fatal errors */
+    ovrError_RuntimeException           = -7000,   ///< A runtime exception occurred. The application is required to shutdown LibOVR and re-initialize it before this error state will be cleared.
+
+
+    ovrError_MetricsUnknownApp            = -90000,
+    ovrError_MetricsDuplicateApp          = -90001,
+    ovrError_MetricsNoEvents              = -90002,
+    ovrError_MetricsRuntime               = -90003,
+    ovrError_MetricsFile                  = -90004,
+    ovrError_MetricsNoClientInfo          = -90005,
+    ovrError_MetricsNoAppMetaData         = -90006,
+    ovrError_MetricsNoApp                 = -90007,
+    ovrError_MetricsOafFailure            = -90008,
+    ovrError_MetricsSessionAlreadyActive  = -90009,
+    ovrError_MetricsSessionNotActive      = -90010,
+
+} ovrErrorType;
+
+
+
+/// Provides information about the last error.
+/// \see ovr_GetLastErrorInfo
+typedef struct ovrErrorInfo_
+{
+    ovrResult Result;               ///< The result from the last API call that generated an error ovrResult.
+    char      ErrorString[512];     ///< A UTF8-encoded null-terminated English string describing the problem. The format of this string is subject to change in future versions.
+} ovrErrorInfo;
+
+#endif /* OVR_ErrorCode_h */

+ 60 - 0
src/external/OculusSDK/LibOVR/Include/OVR_Version.h

@@ -0,0 +1,60 @@
+/********************************************************************************//**
+\file      OVR_Version.h
+\brief     This header provides LibOVR version identification.
+\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_Version_h
+#define OVR_Version_h
+
+
+
+/// Conventional string-ification macro.
+#if !defined(OVR_STRINGIZE)
+    #define OVR_STRINGIZEIMPL(x) #x
+    #define OVR_STRINGIZE(x)     OVR_STRINGIZEIMPL(x)
+#endif
+
+
+// Master version numbers
+#define OVR_PRODUCT_VERSION 1  // Product version doesn't participate in semantic versioning.
+#define OVR_MAJOR_VERSION   1  // If you change these values then you need to also make sure to change LibOVR/Projects/Windows/LibOVR.props in parallel.
+#define OVR_MINOR_VERSION   4  // 
+#define OVR_PATCH_VERSION   0
+#define OVR_BUILD_NUMBER    0
+
+// This is the ((product * 100) + major) version of the service that the DLL is compatible with.
+// When we backport changes to old versions of the DLL we update the old DLLs
+// to move this version number up to the latest version.
+// The DLL is responsible for checking that the service is the version it supports
+// and returning an appropriate error message if it has not been made compatible.
+#define OVR_DLL_COMPATIBLE_VERSION 101
+
+#define OVR_FEATURE_VERSION 0
+
+
+/// "Major.Minor.Patch"
+#if !defined(OVR_VERSION_STRING)
+    #define OVR_VERSION_STRING  OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION)
+#endif
+
+
+/// "Major.Minor.Patch.Build"
+#if !defined(OVR_DETAILED_VERSION_STRING)
+    #define OVR_DETAILED_VERSION_STRING OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION.OVR_BUILD_NUMBER)
+#endif
+
+
+/// \brief file description for version info
+/// This appears in the user-visible file properties. It is intended to convey publicly
+/// available additional information such as feature builds.
+#if !defined(OVR_FILE_DESCRIPTION_STRING)
+    #if defined(_DEBUG)
+        #define OVR_FILE_DESCRIPTION_STRING "dev build debug"
+    #else
+        #define OVR_FILE_DESCRIPTION_STRING "dev build"
+    #endif
+#endif
+
+
+#endif // OVR_Version_h

BIN
src/external/OculusSDK/LibOVR/LibOVRRT32_1.dll


+ 17 - 17
src/gestures.c

@@ -111,6 +111,19 @@ static double GetCurrentTime(void);
 // Module Functions Definition
 //----------------------------------------------------------------------------------
 
+// Enable only desired getures to be detected
+void SetGesturesEnabled(unsigned int gestureFlags)
+{
+    enabledGestures = gestureFlags;
+}
+
+// Check if a gesture have been detected
+bool IsGestureDetected(int gesture)
+{
+    if ((enabledGestures & currentGesture) == gesture) return true;
+    else return false;
+}
+
 // Process gesture event and translate it into gestures
 void ProcessGestureEvent(GestureEvent event)
 {
@@ -291,20 +304,6 @@ void UpdateGestures(void)
     }
 }
 
-// Check if a gesture have been detected
-bool IsGestureDetected(int gesture)
-{
-    if ((enabledGestures & currentGesture) == gesture) return true;
-    else return false;
-}
-
-// Check gesture type
-int GetGestureDetected(void)
-{
-    // Get current gesture only if enabled
-    return (enabledGestures & currentGesture);
-}
-
 // Get number of touch points
 int GetTouchPointsCount(void)
 {
@@ -313,10 +312,11 @@ int GetTouchPointsCount(void)
     return pointCount;
 }
 
-// Enable only desired getures to be detected
-void SetGesturesEnabled(unsigned int gestureFlags)
+// Get latest detected gesture
+int GetGestureDetected(void)
 {
-    enabledGestures = gestureFlags;
+    // Get current gesture only if enabled
+    return (enabledGestures & currentGesture);
 }
 
 // Hold time measured in ms

+ 4 - 4
src/gestures.h

@@ -90,13 +90,13 @@ extern "C" {            // Prevents name mangling of functions
 //----------------------------------------------------------------------------------
 // Module Functions Declaration
 //----------------------------------------------------------------------------------
+void SetGesturesEnabled(unsigned int gestureFlags);     // Enable a set of gestures using flags
+bool IsGestureDetected(int gesture);                    // Check if a gesture have been detected
 void ProcessGestureEvent(GestureEvent event);           // Process gesture event and translate it into gestures
 void UpdateGestures(void);                              // Update gestures detected (must be called every frame)
-bool IsGestureDetected(int gesture);                    // Check if a gesture have been detected
-int GetGestureDetected(void);                           // Get latest detected gesture
-void SetGesturesEnabled(unsigned int gestureFlags);     // Enable a set of gestures using flags
-int GetTouchPointsCount(void);                          // Get touch points count
 
+int GetTouchPointsCount(void);                          // Get touch points count
+int GetGestureDetected(void);                           // Get latest detected gesture
 float GetGestureHoldDuration(void);                     // Get gesture hold time in milliseconds
 Vector2 GetGestureDragVector(void);                     // Get gesture drag vector
 float GetGestureDragAngle(void);                        // Get gesture drag angle

+ 107 - 101
src/raygui.h

@@ -242,106 +242,7 @@ typedef enum GuiProperty {
 //----------------------------------------------------------------------------------
 // Global Variables Definition
 //----------------------------------------------------------------------------------
-static const char *guiPropertyName[] = {
-    "GLOBAL_BASE_COLOR",
-    "GLOBAL_BORDER_COLOR",
-    "GLOBAL_TEXT_COLOR",
-    "GLOBAL_TEXT_FONTSIZE",
-    "GLOBAL_BORDER_WIDTH",
-    "BACKGROUND_COLOR",
-    "LABEL_BORDER_WIDTH",
-    "LABEL_TEXT_COLOR",
-    "LABEL_TEXT_PADDING",
-    "BUTTON_BORDER_WIDTH",
-    "BUTTON_TEXT_PADDING",
-    "BUTTON_DEFAULT_BORDER_COLOR",
-    "BUTTON_DEFAULT_INSIDE_COLOR",
-    "BUTTON_DEFAULT_TEXT_COLOR",
-    "BUTTON_HOVER_BORDER_COLOR",
-    "BUTTON_HOVER_INSIDE_COLOR",
-    "BUTTON_HOVER_TEXT_COLOR",
-    "BUTTON_PRESSED_BORDER_COLOR",
-    "BUTTON_PRESSED_INSIDE_COLOR",
-    "BUTTON_PRESSED_TEXT_COLOR",
-    "TOGGLE_TEXT_PADDING",
-    "TOGGLE_BORDER_WIDTH",
-    "TOGGLE_DEFAULT_BORDER_COLOR",
-    "TOGGLE_DEFAULT_INSIDE_COLOR",
-    "TOGGLE_DEFAULT_TEXT_COLOR",
-    "TOGGLE_HOVER_BORDER_COLOR",
-    "TOGGLE_HOVER_INSIDE_COLOR",
-    "TOGGLE_HOVER_TEXT_COLOR",
-    "TOGGLE_PRESSED_BORDER_COLOR",
-    "TOGGLE_PRESSED_INSIDE_COLOR",
-    "TOGGLE_PRESSED_TEXT_COLOR",
-    "TOGGLE_ACTIVE_BORDER_COLOR",
-    "TOGGLE_ACTIVE_INSIDE_COLOR",
-    "TOGGLE_ACTIVE_TEXT_COLOR",
-    "TOGGLEGROUP_PADDING",
-    "SLIDER_BORDER_WIDTH",
-    "SLIDER_BUTTON_BORDER_WIDTH",
-    "SLIDER_BORDER_COLOR",
-    "SLIDER_INSIDE_COLOR",
-    "SLIDER_DEFAULT_COLOR",
-    "SLIDER_HOVER_COLOR",
-    "SLIDER_ACTIVE_COLOR",
-    "SLIDERBAR_BORDER_COLOR",
-    "SLIDERBAR_INSIDE_COLOR",
-    "SLIDERBAR_DEFAULT_COLOR",
-    "SLIDERBAR_HOVER_COLOR",
-    "SLIDERBAR_ACTIVE_COLOR",
-    "SLIDERBAR_ZERO_LINE_COLOR",
-    "PROGRESSBAR_BORDER_COLOR",
-    "PROGRESSBAR_INSIDE_COLOR",
-    "PROGRESSBAR_PROGRESS_COLOR",
-    "PROGRESSBAR_BORDER_WIDTH",
-    "SPINNER_LABEL_BORDER_COLOR",
-    "SPINNER_LABEL_INSIDE_COLOR",
-    "SPINNER_DEFAULT_BUTTON_BORDER_COLOR",
-    "SPINNER_DEFAULT_BUTTON_INSIDE_COLOR",
-    "SPINNER_DEFAULT_SYMBOL_COLOR",
-    "SPINNER_DEFAULT_TEXT_COLOR",
-    "SPINNER_HOVER_BUTTON_BORDER_COLOR",
-    "SPINNER_HOVER_BUTTON_INSIDE_COLOR",
-    "SPINNER_HOVER_SYMBOL_COLOR",
-    "SPINNER_HOVER_TEXT_COLOR",
-    "SPINNER_PRESSED_BUTTON_BORDER_COLOR",
-    "SPINNER_PRESSED_BUTTON_INSIDE_COLOR",
-    "SPINNER_PRESSED_SYMBOL_COLOR",
-    "SPINNER_PRESSED_TEXT_COLOR",
-    "COMBOBOX_PADDING",
-    "COMBOBOX_BUTTON_WIDTH",
-    "COMBOBOX_BUTTON_HEIGHT",
-    "COMBOBOX_BORDER_WIDTH",
-    "COMBOBOX_DEFAULT_BORDER_COLOR",
-    "COMBOBOX_DEFAULT_INSIDE_COLOR",
-    "COMBOBOX_DEFAULT_TEXT_COLOR",
-    "COMBOBOX_DEFAULT_LIST_TEXT_COLOR",
-    "COMBOBOX_HOVER_BORDER_COLOR",
-    "COMBOBOX_HOVER_INSIDE_COLOR",
-    "COMBOBOX_HOVER_TEXT_COLOR",
-    "COMBOBOX_HOVER_LIST_TEXT_COLOR",
-    "COMBOBOX_PRESSED_BORDER_COLOR",
-    "COMBOBOX_PRESSED_INSIDE_COLOR",
-    "COMBOBOX_PRESSED_TEXT_COLOR",
-    "COMBOBOX_PRESSED_LIST_BORDER_COLOR",
-    "COMBOBOX_PRESSED_LIST_INSIDE_COLOR",
-    "COMBOBOX_PRESSED_LIST_TEXT_COLOR",
-    "CHECKBOX_DEFAULT_BORDER_COLOR",
-    "CHECKBOX_DEFAULT_INSIDE_COLOR",
-    "CHECKBOX_HOVER_BORDER_COLOR",
-    "CHECKBOX_HOVER_INSIDE_COLOR",
-    "CHECKBOX_CLICK_BORDER_COLOR",
-    "CHECKBOX_CLICK_INSIDE_COLOR",
-    "CHECKBOX_STATUS_ACTIVE_COLOR",
-    "CHECKBOX_INSIDE_WIDTH",
-    "TEXTBOX_BORDER_WIDTH",
-    "TEXTBOX_BORDER_COLOR",
-    "TEXTBOX_INSIDE_COLOR",
-    "TEXTBOX_TEXT_COLOR",
-    "TEXTBOX_LINE_COLOR",
-    "TEXTBOX_TEXT_FONTSIZE"
-};
+// ...
 
 //----------------------------------------------------------------------------------
 // Module Functions Declaration
@@ -517,6 +418,108 @@ static int style[NUM_PROPERTIES] = {
     10                  // TEXTBOX_TEXT_FONTSIZE
 };
 
+// GUI property names (to read/write style text files)
+static const char *guiPropertyName[] = {
+    "GLOBAL_BASE_COLOR",
+    "GLOBAL_BORDER_COLOR",
+    "GLOBAL_TEXT_COLOR",
+    "GLOBAL_TEXT_FONTSIZE",
+    "GLOBAL_BORDER_WIDTH",
+    "BACKGROUND_COLOR",
+    "LABEL_BORDER_WIDTH",
+    "LABEL_TEXT_COLOR",
+    "LABEL_TEXT_PADDING",
+    "BUTTON_BORDER_WIDTH",
+    "BUTTON_TEXT_PADDING",
+    "BUTTON_DEFAULT_BORDER_COLOR",
+    "BUTTON_DEFAULT_INSIDE_COLOR",
+    "BUTTON_DEFAULT_TEXT_COLOR",
+    "BUTTON_HOVER_BORDER_COLOR",
+    "BUTTON_HOVER_INSIDE_COLOR",
+    "BUTTON_HOVER_TEXT_COLOR",
+    "BUTTON_PRESSED_BORDER_COLOR",
+    "BUTTON_PRESSED_INSIDE_COLOR",
+    "BUTTON_PRESSED_TEXT_COLOR",
+    "TOGGLE_TEXT_PADDING",
+    "TOGGLE_BORDER_WIDTH",
+    "TOGGLE_DEFAULT_BORDER_COLOR",
+    "TOGGLE_DEFAULT_INSIDE_COLOR",
+    "TOGGLE_DEFAULT_TEXT_COLOR",
+    "TOGGLE_HOVER_BORDER_COLOR",
+    "TOGGLE_HOVER_INSIDE_COLOR",
+    "TOGGLE_HOVER_TEXT_COLOR",
+    "TOGGLE_PRESSED_BORDER_COLOR",
+    "TOGGLE_PRESSED_INSIDE_COLOR",
+    "TOGGLE_PRESSED_TEXT_COLOR",
+    "TOGGLE_ACTIVE_BORDER_COLOR",
+    "TOGGLE_ACTIVE_INSIDE_COLOR",
+    "TOGGLE_ACTIVE_TEXT_COLOR",
+    "TOGGLEGROUP_PADDING",
+    "SLIDER_BORDER_WIDTH",
+    "SLIDER_BUTTON_BORDER_WIDTH",
+    "SLIDER_BORDER_COLOR",
+    "SLIDER_INSIDE_COLOR",
+    "SLIDER_DEFAULT_COLOR",
+    "SLIDER_HOVER_COLOR",
+    "SLIDER_ACTIVE_COLOR",
+    "SLIDERBAR_BORDER_COLOR",
+    "SLIDERBAR_INSIDE_COLOR",
+    "SLIDERBAR_DEFAULT_COLOR",
+    "SLIDERBAR_HOVER_COLOR",
+    "SLIDERBAR_ACTIVE_COLOR",
+    "SLIDERBAR_ZERO_LINE_COLOR",
+    "PROGRESSBAR_BORDER_COLOR",
+    "PROGRESSBAR_INSIDE_COLOR",
+    "PROGRESSBAR_PROGRESS_COLOR",
+    "PROGRESSBAR_BORDER_WIDTH",
+    "SPINNER_LABEL_BORDER_COLOR",
+    "SPINNER_LABEL_INSIDE_COLOR",
+    "SPINNER_DEFAULT_BUTTON_BORDER_COLOR",
+    "SPINNER_DEFAULT_BUTTON_INSIDE_COLOR",
+    "SPINNER_DEFAULT_SYMBOL_COLOR",
+    "SPINNER_DEFAULT_TEXT_COLOR",
+    "SPINNER_HOVER_BUTTON_BORDER_COLOR",
+    "SPINNER_HOVER_BUTTON_INSIDE_COLOR",
+    "SPINNER_HOVER_SYMBOL_COLOR",
+    "SPINNER_HOVER_TEXT_COLOR",
+    "SPINNER_PRESSED_BUTTON_BORDER_COLOR",
+    "SPINNER_PRESSED_BUTTON_INSIDE_COLOR",
+    "SPINNER_PRESSED_SYMBOL_COLOR",
+    "SPINNER_PRESSED_TEXT_COLOR",
+    "COMBOBOX_PADDING",
+    "COMBOBOX_BUTTON_WIDTH",
+    "COMBOBOX_BUTTON_HEIGHT",
+    "COMBOBOX_BORDER_WIDTH",
+    "COMBOBOX_DEFAULT_BORDER_COLOR",
+    "COMBOBOX_DEFAULT_INSIDE_COLOR",
+    "COMBOBOX_DEFAULT_TEXT_COLOR",
+    "COMBOBOX_DEFAULT_LIST_TEXT_COLOR",
+    "COMBOBOX_HOVER_BORDER_COLOR",
+    "COMBOBOX_HOVER_INSIDE_COLOR",
+    "COMBOBOX_HOVER_TEXT_COLOR",
+    "COMBOBOX_HOVER_LIST_TEXT_COLOR",
+    "COMBOBOX_PRESSED_BORDER_COLOR",
+    "COMBOBOX_PRESSED_INSIDE_COLOR",
+    "COMBOBOX_PRESSED_TEXT_COLOR",
+    "COMBOBOX_PRESSED_LIST_BORDER_COLOR",
+    "COMBOBOX_PRESSED_LIST_INSIDE_COLOR",
+    "COMBOBOX_PRESSED_LIST_TEXT_COLOR",
+    "CHECKBOX_DEFAULT_BORDER_COLOR",
+    "CHECKBOX_DEFAULT_INSIDE_COLOR",
+    "CHECKBOX_HOVER_BORDER_COLOR",
+    "CHECKBOX_HOVER_INSIDE_COLOR",
+    "CHECKBOX_CLICK_BORDER_COLOR",
+    "CHECKBOX_CLICK_INSIDE_COLOR",
+    "CHECKBOX_STATUS_ACTIVE_COLOR",
+    "CHECKBOX_INSIDE_WIDTH",
+    "TEXTBOX_BORDER_WIDTH",
+    "TEXTBOX_BORDER_COLOR",
+    "TEXTBOX_INSIDE_COLOR",
+    "TEXTBOX_TEXT_COLOR",
+    "TEXTBOX_LINE_COLOR",
+    "TEXTBOX_TEXT_FONTSIZE"
+};
+
 //----------------------------------------------------------------------------------
 // Module specific Functions Declaration
 //----------------------------------------------------------------------------------
@@ -529,7 +532,9 @@ static bool CheckCollisionPointRec(Vector2 point, Rectangle rec);  // Check if p
 static const char *FormatText(const char *text, ...);   // Formatting of text with variables to 'embed'
 
 // NOTE: raygui depend on some raylib input and drawing functions
-// TODO: Replace by your own functions
+// TODO: To use raygui as standalone library, those functions must be overwrite by custom ones
+
+// Input management functions
 static Vector2 GetMousePosition() { return (Vector2){ 0.0f, 0.0f }; }
 static int IsMouseButtonDown(int button) { return 0; }
 static int IsMouseButtonPressed(int button) { return 0; }
@@ -539,6 +544,7 @@ static int IsMouseButtonUp(int button) { return 0; }
 static int GetKeyPressed(void) { return 0; }    // NOTE: Only used by GuiTextBox()
 static int IsKeyDown(int key) { return 0; }     // NOTE: Only used by GuiSpinner()
 
+// Drawing related functions
 static int MeasureText(const char *text, int fontSize) { return 0; }
 static void DrawText(const char *text, int posX, int posY, int fontSize, Color color) { }
 static void DrawRectangleRec(Rectangle rec, Color color) { }

+ 16 - 11
src/raylib.h

@@ -64,7 +64,7 @@
 //#define PLATFORM_ANDROID      // Android device
 //#define PLATFORM_RPI          // Raspberry Pi
 //#define PLATFORM_WEB          // HTML5 (emscripten, asm.js)
-//#define PLATFORM_OCULUS       // Oculus Rift CV1
+//#define RLGL_OCULUS_SUPPORT   // Oculus Rift CV1 (complementary to PLATFORM_DESKTOP)
 
 // Security check in case no PLATFORM_* defined
 #if !defined(PLATFORM_DESKTOP) && !defined(PLATFORM_ANDROID) && !defined(PLATFORM_RPI) && !defined(PLATFORM_WEB)
@@ -545,12 +545,6 @@ void InitWindow(int width, int height, struct android_app *state);  // Init Andr
 void InitWindow(int width, int height, const char *title);  // Initialize Window and OpenGL Graphics
 #endif
 
-#if defined(PLATFORM_OCULUS)
-void InitOculusDevice(void);                                // Init Oculus Rift device
-void CloseOculusDevice(void);                               // Close Oculus Rift device
-void UpdateOculusTracking(void);                            // Update Oculus Rift tracking (position and orientation)
-#endif
-
 void CloseWindow(void);                                     // Close Window and Terminate Context
 bool WindowShouldClose(void);                               // Detect if KEY_ESCAPE pressed or Close icon pressed
 bool IsWindowMinimized(void);                               // Detect if window has been minimized (or lost focus)
@@ -644,13 +638,13 @@ bool IsButtonReleased(int button);                      // Detect if an android
 //------------------------------------------------------------------------------------
 // Gestures and Touch Handling Functions (Module: gestures)
 //------------------------------------------------------------------------------------
+void SetGesturesEnabled(unsigned int gestureFlags);     // Enable a set of gestures using flags
+bool IsGestureDetected(int gesture);                    // Check if a gesture have been detected
 void ProcessGestureEvent(GestureEvent event);           // Process gesture event and translate it into gestures
 void UpdateGestures(void);                              // Update gestures detected (called automatically in PollInputEvents())
-bool IsGestureDetected(int gesture);                    // Check if a gesture have been detected
-int GetGestureDetected(void);                           // Get latest detected gesture
-void SetGesturesEnabled(unsigned int gestureFlags);     // Enable a set of gestures using flags
-int GetTouchPointsCount(void);                          // Get touch points count
 
+int GetTouchPointsCount(void);                          // Get touch points count
+int GetGestureDetected(void);                           // Get latest detected gesture
 float GetGestureHoldDuration(void);                     // Get gesture hold time in milliseconds
 Vector2 GetGestureDragVector(void);                     // Get gesture drag vector
 float GetGestureDragAngle(void);                        // Get gesture drag angle
@@ -852,6 +846,17 @@ void EndBlendMode(void);                                            // End blend
 Light CreateLight(int type, Vector3 position, Color diffuse);       // Create a new light, initialize it and add to pool
 void DestroyLight(Light light);                                     // Destroy a light and take it out of the list
 
+//------------------------------------------------------------------------------------
+// Oculus Rift CV1 Functions (Module: rlgl)
+// NOTE: This functions are useless when using OpenGL 1.1
+//------------------------------------------------------------------------------------
+void InitOculusDevice(void);                // Init Oculus Rift device
+void CloseOculusDevice(void);               // Close Oculus Rift device
+void UpdateOculusTracking(void);            // Update Oculus Rift tracking (position and orientation)
+void SetOculusMatrix(int eye);              // Set internal projection and modelview matrix depending on eyes tracking data
+void BeginOculusDrawing(void);              // Begin Oculus drawing configuration
+void EndOculusDrawing(void);                // End Oculus drawing process (and desktop mirror)
+
 //------------------------------------------------------------------------------------
 // Audio Loading and Playing Functions (Module: audio)
 //------------------------------------------------------------------------------------

+ 378 - 1
src/rlgl.c

@@ -72,6 +72,10 @@
     #include "standard_shader.h"    // Standard shader to embed
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+    #include "external/OculusSDK/LibOVR/Include/OVR_CAPI_GL.h"    // Oculus SDK for OpenGL
+#endif
+
 //----------------------------------------------------------------------------------
 // Defines and Macros
 //----------------------------------------------------------------------------------
@@ -172,6 +176,32 @@ typedef struct {
     //Guint fboId;
 } DrawCall;
 
+#if defined(RLGL_OCULUS_SUPPORT)
+typedef struct OculusBuffer {
+    ovrTextureSwapChain textureChain;
+    GLuint depthId;
+    GLuint fboId;
+    int width;
+    int height;
+} OculusBuffer;
+
+typedef struct OculusMirror {
+    ovrMirrorTexture texture;
+    GLuint fboId;
+    int width;
+    int height;
+} OculusMirror;
+
+typedef struct OculusLayer {
+    ovrViewScaleDesc viewScaleDesc;
+    ovrLayerEyeFov eyeLayer;      // layer 0
+    //ovrLayerQuad quadLayer;     // TODO: layer 1: '2D' quad for GUI
+    Matrix eyeProjections[2];
+    int width;
+    int height;
+} OculusLayer;
+#endif
+
 //----------------------------------------------------------------------------------
 // Global Variables Definition
 //----------------------------------------------------------------------------------
@@ -221,6 +251,17 @@ static Light lights[MAX_LIGHTS];            // Lights pool
 static int lightsCount;                     // Counts current enabled physic objects
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+// OVR device variables
+static ovrSession session;              // Oculus session (pointer to ovrHmdStruct)
+static ovrHmdDesc hmdDesc;              // Oculus device descriptor parameters
+static ovrGraphicsLuid luid;            // Oculus locally unique identifier for the program (64 bit)
+static OculusLayer layer;               // Oculus drawing layer (similar to photoshop)
+static OculusBuffer buffer;             // Oculus internal buffers (texture chain and fbo)
+static OculusMirror mirror;             // Oculus mirror texture and fbo
+static unsigned int frameIndex = 0;     // Oculus frames counter, used to discard frames from chain
+#endif
+
 // Compressed textures support flags
 static bool texCompDXTSupported = false;    // DDS texture compression support
 static bool npotSupported = false;          // NPOT textures full support
@@ -261,6 +302,16 @@ static void SetShaderLights(Shader shader); // Sets shader uniform values for li
 static char *ReadTextFile(const char *fileName);
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)            // Oculus Rift functions
+static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height);    // Load Oculus required buffers
+static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer);            // Unload texture required buffers
+static OculusMirror LoadOculusMirror(ovrSession session, int width, int height);    // Load Oculus mirror buffers
+static void UnloadOculusMirror(ovrSession session, OculusMirror mirror);            // Unload Oculus mirror buffers
+static void BlitOculusMirror(ovrSession session, OculusMirror mirror);              // Copy Oculus screen buffer to mirror texture
+static OculusLayer InitOculusLayer(ovrSession session);                             // Init Oculus layer (similar to photoshop)
+static Matrix FromOvrMatrix(ovrMatrix4f ovrM);  // Convert from Oculus ovrMatrix4f struct to raymath Matrix struct
+#endif
+
 #if defined(GRAPHICS_API_OPENGL_11)
 static int GenerateMipmaps(unsigned char *data, int baseWidth, int baseHeight);
 static Color *GenNextMipmap(Color *srcData, int srcWidth, int srcHeight);
@@ -1153,6 +1204,23 @@ void rlglInitGraphics(int offsetX, int offsetY, int width, int height)
     TraceLog(INFO, "OpenGL graphic device initialized successfully");
 }
 
+// Load OpenGL extensions
+// NOTE: External loader function could be passed as a pointer
+void rlglLoadExtensions(void *loader)
+{
+#if defined(GRAPHICS_API_OPENGL_33)
+    // NOTE: glad is generated and contains only required OpenGL 3.3 Core extensions
+    if (!gladLoadGLLoader((GLADloadproc)loader)) TraceLog(WARNING, "GLAD: Cannot load OpenGL extensions");
+    else TraceLog(INFO, "GLAD: OpenGL extensions loaded successfully");
+
+    if (GLAD_GL_VERSION_3_3) TraceLog(INFO, "OpenGL 3.3 Core profile supported");
+    else TraceLog(ERROR, "OpenGL 3.3 Core profile not supported");
+
+    // With GLAD, we can check if an extension is supported using the GLAD_GL_xxx booleans
+    //if (GLAD_GL_ARB_vertex_array_object) // Use GL_ARB_vertex_array_object
+#endif
+}
+
 // Get world coordinates from screen coordinates
 Vector3 rlglUnproject(Vector3 source, Matrix proj, Matrix view)
 {
@@ -1184,11 +1252,13 @@ unsigned int rlglLoadTexture(void *data, int width, int height, int textureForma
     GLuint id = 0;
     
     // Check texture format support by OpenGL 1.1 (compressed textures not supported)
-    if ((rlGetVersion() == OPENGL_11) && (textureFormat >= 8))
+#if defined(GRAPHICS_API_OPENGL_11) 
+    if (textureFormat >= 8)
     {
         TraceLog(WARNING, "OpenGL 1.1 does not support GPU compressed texture formats");
         return id;
     }
+#endif
     
     if ((!texCompDXTSupported) && ((textureFormat == COMPRESSED_DXT1_RGB) || (textureFormat == COMPRESSED_DXT1_RGBA) ||
         (textureFormat == COMPRESSED_DXT3_RGBA) || (textureFormat == COMPRESSED_DXT5_RGBA)))
@@ -2391,6 +2461,132 @@ void DestroyLight(Light light)
 #endif
 }
 
+#if defined(RLGL_OCULUS_SUPPORT)
+// Init Oculus Rift device
+// NOTE: Device initialization should be done before window creation?
+void InitOculusDevice(void)
+{
+    // Initialize Oculus device
+    ovrResult result = ovr_Initialize(NULL);
+    if (OVR_FAILURE(result)) TraceLog(WARNING, "OVR: Could not initialize Oculus device");
+
+    result = ovr_Create(&session, &luid);
+    if (OVR_FAILURE(result))
+    {
+        TraceLog(WARNING, "OVR: Could not create Oculus session");
+        ovr_Shutdown();
+    }
+
+    hmdDesc = ovr_GetHmdDesc(session);
+    
+    TraceLog(INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
+    TraceLog(INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
+    TraceLog(INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
+    TraceLog(INFO, "OVR: Product Type: %i", hmdDesc.Type);
+    //TraceLog(INFO, "OVR: Serial Number: %s", hmdDesc.SerialNumber);
+    TraceLog(INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
+    
+    // NOTE: Oculus mirror is set to defined screenWidth and screenHeight...
+    // ...ideally, it should be (hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2)
+    
+    // Initialize Oculus Buffers
+    layer = InitOculusLayer(session);   
+    buffer = LoadOculusBuffer(session, layer.width, layer.height);
+    mirror = LoadOculusMirror(session, hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2);     // NOTE: hardcoded...
+    layer.eyeLayer.ColorTexture[0] = buffer.textureChain;     //SetOculusLayerTexture(eyeLayer, buffer.textureChain);
+    
+    // Recenter OVR tracking origin
+    ovr_RecenterTrackingOrigin(session);
+}
+
+// Close Oculus Rift device
+void CloseOculusDevice(void)
+{
+    UnloadOculusMirror(session, mirror);    // Unload Oculus mirror buffer
+    UnloadOculusBuffer(session, buffer);    // Unload Oculus texture buffers
+
+    ovr_Destroy(session);   // Free Oculus session data
+    ovr_Shutdown();         // Close Oculus device connection
+}
+
+// Update Oculus Rift tracking (position and orientation)
+void UpdateOculusTracking(void)
+{
+    frameIndex++;
+
+    ovrPosef eyePoses[2];
+    ovr_GetEyePoses(session, frameIndex, ovrTrue, layer.viewScaleDesc.HmdToEyeOffset, eyePoses, &layer.eyeLayer.SensorSampleTime);
+    
+    layer.eyeLayer.RenderPose[0] = eyePoses[0];
+    layer.eyeLayer.RenderPose[1] = eyePoses[1];
+}
+
+void SetOculusMatrix(int eye)
+{
+    rlViewport(layer.eyeLayer.Viewport[eye].Pos.x, layer.eyeLayer.Viewport[eye].Pos.y, layer.eyeLayer.Viewport[eye].Size.w, layer.eyeLayer.Viewport[eye].Size.h);
+
+    Quaternion eyeRPose = (Quaternion){ layer.eyeLayer.RenderPose[eye].Orientation.x, 
+                                        layer.eyeLayer.RenderPose[eye].Orientation.y, 
+                                        layer.eyeLayer.RenderPose[eye].Orientation.z, 
+                                        layer.eyeLayer.RenderPose[eye].Orientation.w };
+    QuaternionInvert(&eyeRPose);
+    Matrix eyeOrientation = QuaternionToMatrix(eyeRPose);
+    Matrix eyeTranslation = MatrixTranslate(-layer.eyeLayer.RenderPose[eye].Position.x, 
+                                            -layer.eyeLayer.RenderPose[eye].Position.y, 
+                                            -layer.eyeLayer.RenderPose[eye].Position.z);
+
+    Matrix eyeView = MatrixMultiply(eyeTranslation, eyeOrientation);
+    Matrix modelEyeView = MatrixMultiply(modelview, eyeView);  // Using internal camera modelview matrix
+
+    SetMatrixModelview(modelEyeView);
+    SetMatrixProjection(layer.eyeProjections[eye]);
+}
+
+void BeginOculusDrawing(void)
+{
+    GLuint currentTexId;
+    int currentIndex;
+    
+    ovr_GetTextureSwapChainCurrentIndex(session, buffer.textureChain, &currentIndex);
+    ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, currentIndex, &currentTexId);
+
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer.fboId);
+    glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, currentTexId, 0);
+    //glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, buffer.depthId, 0);    // Already binded
+
+    //glViewport(0, 0, buffer.width, buffer.height);        // Useful if rendering to separate framebuffers (every eye)
+    //glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);   // Same as rlClearScreenBuffers()
+    
+    // NOTE: If your application is configured to treat the texture as a linear format (e.g. GL_RGBA) 
+    // and performs linear-to-gamma conversion in GLSL or does not care about gamma-correction, then:
+    //     - Require OculusBuffer format to be OVR_FORMAT_R8G8B8A8_UNORM_SRGB
+    //     - Do NOT enable GL_FRAMEBUFFER_SRGB
+    //glEnable(GL_FRAMEBUFFER_SRGB);
+    
+    rlClearScreenBuffers();             // Clear current framebuffer(s)
+}
+
+void EndOculusDrawing(void)
+{
+    glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+    
+    ovr_CommitTextureSwapChain(session, buffer.textureChain);
+    
+    ovrLayerHeader *layers = &layer.eyeLayer.Header;
+    ovr_SubmitFrame(session, frameIndex, &layer.viewScaleDesc, &layers, 1);
+
+    // Blit mirror texture to back buffer
+    BlitOculusMirror(session, mirror);
+
+    // Get session status information
+    ovrSessionStatus sessionStatus;
+    ovr_GetSessionStatus(session, &sessionStatus);
+    if (sessionStatus.ShouldQuit) TraceLog(WARNING, "OVR: Session should quit...");
+    if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session);
+}
+#endif
+
 //----------------------------------------------------------------------------------
 // Module specific Functions Definition
 //----------------------------------------------------------------------------------
@@ -3371,6 +3567,187 @@ static Color *GenNextMipmap(Color *srcData, int srcWidth, int srcHeight)
 }
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+// Load Oculus required buffers: texture-swap-chain, fbo, texture-depth
+static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height)
+{
+    OculusBuffer buffer;
+    buffer.width = width;
+    buffer.height = height;
+    
+    // Create OVR texture chain
+    ovrTextureSwapChainDesc desc = {};
+    desc.Type = ovrTexture_2D;
+    desc.ArraySize = 1;
+    desc.Width = width;
+    desc.Height = height;
+    desc.MipLevels = 1;
+    desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;   // Requires glEnable(GL_FRAMEBUFFER_SRGB);
+    desc.SampleCount = 1;
+    desc.StaticImage = ovrFalse;
+
+    ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &buffer.textureChain);
+    
+    if (!OVR_SUCCESS(result)) TraceLog(WARNING, "OVR: Failed to create swap textures buffer");
+
+    int textureCount = 0;
+    ovr_GetTextureSwapChainLength(session, buffer.textureChain, &textureCount);
+    
+    if (!OVR_SUCCESS(result) || !textureCount) TraceLog(WARNING, "OVR: Unable to count swap chain textures");
+
+    for (int i = 0; i < textureCount; ++i)
+    {
+        GLuint chainTexId;
+        ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, i, &chainTexId);
+        glBindTexture(GL_TEXTURE_2D, chainTexId);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    }
+    
+    glBindTexture(GL_TEXTURE_2D, 0);
+    
+    /*
+    // Setup framebuffer object (using depth texture)
+    glGenFramebuffers(1, &buffer.fboId);
+    glGenTextures(1, &buffer.depthId);
+    glBindTexture(GL_TEXTURE_2D, buffer.depthId);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, buffer.width, buffer.height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
+    */
+    
+    // Setup framebuffer object (using depth renderbuffer)
+    glGenFramebuffers(1, &buffer.fboId);
+    glGenRenderbuffers(1, &buffer.depthId);
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, buffer.fboId);
+    glBindRenderbuffer(GL_RENDERBUFFER, buffer.depthId);
+    glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, buffer.width, buffer.height);
+    glBindRenderbuffer(GL_RENDERBUFFER, 0);
+    glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, buffer.depthId);
+    glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+
+    return buffer;
+}
+
+// Unload texture required buffers
+static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer)
+{
+    if (buffer.textureChain)
+    {
+        ovr_DestroyTextureSwapChain(session, buffer.textureChain);
+        buffer.textureChain = NULL;
+    }
+
+    if (buffer.depthId != 0) glDeleteTextures(1, &buffer.depthId);
+    if (buffer.fboId != 0) glDeleteFramebuffers(1, &buffer.fboId);
+}
+
+// Load Oculus mirror buffers
+static OculusMirror LoadOculusMirror(ovrSession session, int width, int height)
+{
+    OculusMirror mirror;
+    mirror.width = width;
+    mirror.height = height;
+    
+    ovrMirrorTextureDesc mirrorDesc;
+    memset(&mirrorDesc, 0, sizeof(mirrorDesc));
+    mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
+    mirrorDesc.Width = mirror.width;
+    mirrorDesc.Height = mirror.height;
+    
+    if (!OVR_SUCCESS(ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirror.texture))) TraceLog(WARNING, "Could not create mirror texture");
+
+    glGenFramebuffers(1, &mirror.fboId);
+
+    return mirror;
+}
+
+// Unload Oculus mirror buffers
+static void UnloadOculusMirror(ovrSession session, OculusMirror mirror)
+{
+    if (mirror.fboId != 0) glDeleteFramebuffers(1, &mirror.fboId);
+    if (mirror.texture) ovr_DestroyMirrorTexture(session, mirror.texture);
+}
+
+// Copy Oculus screen buffer to mirror texture
+static void BlitOculusMirror(ovrSession session, OculusMirror mirror)
+{
+    GLuint mirrorTextureId;
+    
+    ovr_GetMirrorTextureBufferGL(session, mirror.texture, &mirrorTextureId);
+    
+    glBindFramebuffer(GL_READ_FRAMEBUFFER, mirror.fboId);
+    glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0);
+    glBlitFramebuffer(0, 0, mirror.width, mirror.height, 0, mirror.height, mirror.width, 0, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+    glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+}
+
+// Init Oculus layer (similar to photoshop)
+static OculusLayer InitOculusLayer(ovrSession session)
+{
+    OculusLayer layer = { 0 };
+    
+    layer.viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
+
+    memset(&layer.eyeLayer, 0, sizeof(ovrLayerEyeFov));
+    layer.eyeLayer.Header.Type = ovrLayerType_EyeFov;
+    layer.eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;
+
+    ovrEyeRenderDesc eyeRenderDescs[2];
+    
+    for (int eye = 0; eye < 2; eye++)
+    {
+        eyeRenderDescs[eye] = ovr_GetRenderDesc(session, eye, hmdDesc.DefaultEyeFov[eye]);
+        ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(eyeRenderDescs[eye].Fov, 0.01f, 10000.0f, ovrProjection_None); //ovrProjection_ClipRangeOpenGL);
+        layer.eyeProjections[eye] = FromOvrMatrix(ovrPerspectiveProjection);      // NOTE: struct ovrMatrix4f { float M[4][4] } --> struct Matrix
+
+        layer.viewScaleDesc.HmdToEyeOffset[eye] = eyeRenderDescs[eye].HmdToEyeOffset;
+        layer.eyeLayer.Fov[eye] = eyeRenderDescs[eye].Fov;
+        
+        ovrSizei eyeSize = ovr_GetFovTextureSize(session, eye, layer.eyeLayer.Fov[eye], 1.0f);
+        layer.eyeLayer.Viewport[eye].Size = eyeSize;
+        layer.eyeLayer.Viewport[eye].Pos.x = layer.width;
+        layer.eyeLayer.Viewport[eye].Pos.y = 0;
+
+        layer.height = eyeSize.h;     //std::max(renderTargetSize.y, (uint32_t)eyeSize.h);
+        layer.width += eyeSize.w;
+    }
+    
+    return layer;
+}
+
+// Convert from Oculus ovrMatrix4f struct to raymath Matrix struct
+static Matrix FromOvrMatrix(ovrMatrix4f ovrmat)
+{
+    Matrix rmat;
+    
+    rmat.m0 = ovrmat.M[0][0];
+    rmat.m1 = ovrmat.M[1][0];
+    rmat.m2 = ovrmat.M[2][0];
+    rmat.m3 = ovrmat.M[3][0];
+    rmat.m4 = ovrmat.M[0][1];
+    rmat.m5 = ovrmat.M[1][1];
+    rmat.m6 = ovrmat.M[2][1];
+    rmat.m7 = ovrmat.M[3][1];
+    rmat.m8 = ovrmat.M[0][2];
+    rmat.m9 = ovrmat.M[1][2];
+    rmat.m10 = ovrmat.M[2][2];
+    rmat.m11 = ovrmat.M[3][2];
+    rmat.m12 = ovrmat.M[0][3];
+    rmat.m13 = ovrmat.M[1][3];
+    rmat.m14 = ovrmat.M[2][3];
+    rmat.m15 = ovrmat.M[3][3];
+    
+    MatrixTranspose(&rmat);
+    
+    return rmat;
+}
+#endif
+
 #if defined(RLGL_STANDALONE)
 // Output a trace log message
 // NOTE: Expected msgType: (0)Info, (1)Error, (2)Warning

+ 11 - 1
src/rlgl.h

@@ -48,7 +48,7 @@
 
 // Choose opengl version here or just define it at compile time: -DGRAPHICS_API_OPENGL_33
 //#define GRAPHICS_API_OPENGL_11     // Only available on PLATFORM_DESKTOP
-//#define GRAPHICS_API_OPENGL_33     // Only available on PLATFORM_DESKTOP
+//#define GRAPHICS_API_OPENGL_33     // Only available on PLATFORM_DESKTOP or PLATFORM_OCULUS
 //#define GRAPHICS_API_OPENGL_ES2    // Only available on PLATFORM_ANDROID or PLATFORM_RPI or PLATFORM_WEB
 
 // Security check in case no GRAPHICS_API_OPENGL_* defined
@@ -296,6 +296,7 @@ void rlglInit(void);                            // Initialize rlgl (shaders, VAO
 void rlglClose(void);                           // De-init rlgl
 void rlglDraw(void);                            // Draw VAO/VBO
 void rlglInitGraphics(int offsetX, int offsetY, int width, int height);  // Initialize Graphics (OpenGL stuff)
+void rlglLoadExtensions(void *loader);          // Load OpenGL extensions
 
 unsigned int rlglLoadTexture(void *data, int width, int height, int textureFormat, int mipmapCount);    // Load texture in GPU
 RenderTexture2D rlglLoadRenderTexture(int width, int height);   // Load a texture to be used for rendering (fbo with color and depth attachments)
@@ -346,6 +347,15 @@ void DestroyLight(Light light);                                     // Destroy a
 void TraceLog(int msgType, const char *text, ...);
 #endif
 
+#if defined(RLGL_OCULUS_SUPPORT)
+void InitOculusDevice(void);                // Init Oculus Rift device
+void CloseOculusDevice(void);               // Close Oculus Rift device
+void UpdateOculusTracking(void);            // Update Oculus Rift tracking (position and orientation)
+void SetOculusMatrix(int eye);              // Set internal projection and modelview matrix depending on eyes tracking data
+void BeginOculusDrawing(void);              // Begin Oculus drawing configuration
+void EndOculusDrawing(void);                // End Oculus drawing process (and desktop mirror)
+#endif
+
 #ifdef __cplusplus
 }
 #endif