浏览代码

Oculus SDK 1.3 + GLFW3 sample -NOT WORKING-

raysan5 9 年之前
父节点
当前提交
173529e048

+ 4 - 1
.gitignore

@@ -61,4 +61,7 @@ xcschememanagement.plist
 xcuserdata/
 DerivedData/
 *.dll
-src/libraylib.a
+src/libraylib.a
+
+# oculus example
+!examples/oculus_glfw_sample/LibOVRRT32_1.dll

二进制
examples/oculus_glfw_sample/LibOVRRT32_1.dll


+ 196 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/Extras/OVR_CAPI_Util.h

@@ -0,0 +1,196 @@
+/********************************************************************************//**
+\file      OVR_CAPI_Util.h
+\brief     This header provides LibOVR utility function declarations
+\copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_CAPI_Util_h
+#define OVR_CAPI_Util_h
+
+
+#include "../OVR_CAPI.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/// Enumerates modifications to the projection matrix based on the application's needs.
+///
+/// \see ovrMatrix4f_Projection
+///
+typedef enum ovrProjectionModifier_
+{
+    /// Use for generating a default projection matrix that is:
+    /// * Right-handed.
+    /// * Near depth values stored in the depth buffer are smaller than far depth values.
+    /// * Both near and far are explicitly defined.
+    /// * With a clipping range that is (0 to w).
+    ovrProjection_None = 0x00,
+
+    /// Enable if using left-handed transformations in your application.
+    ovrProjection_LeftHanded = 0x01,
+
+    /// After the projection transform is applied, far values stored in the depth buffer will be less than closer depth values.
+    /// NOTE: Enable only if the application is using a floating-point depth buffer for proper precision.
+    ovrProjection_FarLessThanNear = 0x02,
+
+    /// When this flag is used, the zfar value pushed into ovrMatrix4f_Projection() will be ignored
+    /// NOTE: Enable only if ovrProjection_FarLessThanNear is also enabled where the far clipping plane will be pushed to infinity.
+    ovrProjection_FarClipAtInfinity = 0x04,
+
+    /// Enable if the application is rendering with OpenGL and expects a projection matrix with a clipping range of (-w to w).
+    /// Ignore this flag if your application already handles the conversion from D3D range (0 to w) to OpenGL.
+    ovrProjection_ClipRangeOpenGL = 0x08,
+} ovrProjectionModifier;
+
+
+/// Return values for ovr_Detect.
+///
+/// \see ovr_Detect
+///
+typedef struct OVR_ALIGNAS(8) ovrDetectResult_
+{
+    /// Is ovrFalse when the Oculus Service is not running.
+    ///   This means that the Oculus Service is either uninstalled or stopped.
+    ///   IsOculusHMDConnected will be ovrFalse in this case.
+    /// Is ovrTrue when the Oculus Service is running.
+    ///   This means that the Oculus Service is installed and running.
+    ///   IsOculusHMDConnected will reflect the state of the HMD.
+    ovrBool IsOculusServiceRunning;
+
+    /// Is ovrFalse when an Oculus HMD is not detected.
+    ///   If the Oculus Service is not running, this will be ovrFalse.
+    /// Is ovrTrue when an Oculus HMD is detected.
+    ///   This implies that the Oculus Service is also installed and running.
+    ovrBool IsOculusHMDConnected;
+
+    OVR_UNUSED_STRUCT_PAD(pad0, 6) ///< \internal struct padding
+
+} ovrDetectResult;
+
+OVR_STATIC_ASSERT(sizeof(ovrDetectResult) == 8, "ovrDetectResult size mismatch");
+
+
+/// Detects Oculus Runtime and Device Status
+///
+/// Checks for Oculus Runtime and Oculus HMD device status without loading the LibOVRRT
+/// shared library.  This may be called before ovr_Initialize() to help decide whether or
+/// not to initialize LibOVR.
+///
+/// \param[in] timeoutMilliseconds Specifies a timeout to wait for HMD to be attached or 0 to poll.
+///
+/// \return Returns an ovrDetectResult object indicating the result of detection.
+///
+/// \see ovrDetectResult
+///
+OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds);
+
+// On the Windows platform,
+#ifdef _WIN32
+    /// This is the Windows Named Event name that is used to check for HMD connected state.
+    #define OVR_HMD_CONNECTED_EVENT_NAME L"OculusHMDConnected"
+#endif // _WIN32
+
+
+/// Used to generate projection from ovrEyeDesc::Fov.
+///
+/// \param[in] fov Specifies the ovrFovPort to use.
+/// \param[in] znear Distance to near Z limit.
+/// \param[in] zfar Distance to far Z limit.
+/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.
+///
+/// \return Returns the calculated projection matrix.
+/// 
+/// \see ovrProjectionModifier
+///
+OVR_PUBLIC_FUNCTION(ovrMatrix4f) ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags);
+
+
+/// Extracts the required data from the result of ovrMatrix4f_Projection.
+///
+/// \param[in] projection Specifies the project matrix from which to extract ovrTimewarpProjectionDesc.
+/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.
+/// \return Returns the extracted ovrTimewarpProjectionDesc.
+/// \see ovrTimewarpProjectionDesc
+///
+OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc) ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f projection, unsigned int projectionModFlags);
+
+
+/// Generates an orthographic sub-projection.
+///
+/// Used for 2D rendering, Y is down.
+///
+/// \param[in] projection The perspective matrix that the orthographic matrix is derived from.
+/// \param[in] orthoScale Equal to 1.0f / pixelsPerTanAngleAtCenter.
+/// \param[in] orthoDistance Equal to the distance from the camera in meters, such as 0.8m.
+/// \param[in] HmdToEyeOffsetX Specifies the offset of the eye from the center.
+///
+/// \return Returns the calculated projection matrix.
+///
+OVR_PUBLIC_FUNCTION(ovrMatrix4f) ovrMatrix4f_OrthoSubProjection(ovrMatrix4f projection, ovrVector2f orthoScale,
+                                                                float orthoDistance, float HmdToEyeOffsetX);
+
+
+
+/// Computes offset eye poses based on headPose returned by ovrTrackingState.
+///
+/// \param[in] headPose Indicates the HMD position and orientation to use for the calculation.
+/// \param[in] HmdToEyeOffset Can be ovrEyeRenderDesc.HmdToEyeOffset returned from 
+///            ovr_GetRenderDesc. For monoscopic rendering, use a vector that is the average 
+///            of the two vectors for both eyes.
+/// \param[out] outEyePoses If outEyePoses are used for rendering, they should be passed to 
+///             ovr_SubmitFrame in ovrLayerEyeFov::RenderPose or ovrLayerEyeFovDepth::RenderPose.
+///
+OVR_PUBLIC_FUNCTION(void) ovr_CalcEyePoses(ovrPosef headPose,
+                                           const ovrVector3f HmdToEyeOffset[2],
+                                           ovrPosef outEyePoses[2]);
+
+
+/// Returns the predicted head pose in outHmdTrackingState and offset eye poses in outEyePoses.
+///
+/// This is a thread-safe function where caller should increment frameIndex with every frame
+/// and pass that index where applicable to functions called on the rendering thread.
+/// Assuming outEyePoses are used for rendering, it should be passed as a part of ovrLayerEyeFov.
+/// The caller does not need to worry about applying HmdToEyeOffset to the returned outEyePoses variables.
+///
+/// \param[in]  hmd Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  frameIndex Specifies the targeted frame index, or 0 to refer to one frame after 
+///             the last time ovr_SubmitFrame was called.
+/// \param[in]  HmdToEyeOffset Can be ovrEyeRenderDesc.HmdToEyeOffset returned from 
+///             ovr_GetRenderDesc. For monoscopic rendering, use a vector that is the average 
+///             of the two vectors for both eyes.
+/// \param[in]  latencyMarker Specifies that this call is the point in time where
+///             the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer
+///             provides "SensorSampleTimestamp", that will override the value stored here.
+/// \param[out] outEyePoses The predicted eye poses.
+/// \param[out] outSensorSampleTime The time when this function was called. May be NULL, in which case it is ignored.
+///
+OVR_PUBLIC_FUNCTION(void) ovr_GetEyePoses(ovrSession session, long long frameIndex, ovrBool latencyMarker,
+                                             const ovrVector3f HmdToEyeOffset[2],
+                                             ovrPosef outEyePoses[2],
+                                             double* outSensorSampleTime);
+
+
+
+/// Tracking poses provided by the SDK come in a right-handed coordinate system. If an application
+/// is passing in ovrProjection_LeftHanded into ovrMatrix4f_Projection, then it should also use
+/// this function to flip the HMD tracking poses to be left-handed.
+///
+/// While this utility function is intended to convert a left-handed ovrPosef into a right-handed
+/// coordinate system, it will also work for converting right-handed to left-handed since the
+/// flip operation is the same for both cases.
+/// 
+/// \param[in]  inPose that is right-handed
+/// \param[out] outPose that is requested to be left-handed (can be the same pointer to inPose)
+///
+OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+
+#endif // Header include guard

+ 3785 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/Extras/OVR_Math.h

@@ -0,0 +1,3785 @@
+/********************************************************************************//**
+\file      OVR_Math.h
+\brief     Implementation of 3D primitives such as vectors, matrices.
+\copyright Copyright 2015 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_Math_h
+#define OVR_Math_h
+
+
+// This file is intended to be independent of the rest of LibOVR and LibOVRKernel and thus 
+// has no #include dependencies on either.
+
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <float.h>
+#include "../OVR_CAPI.h" // Currently required due to a dependence on the ovrFovPort_ declaration.
+
+#if defined(_MSC_VER)
+    #pragma warning(push)
+    #pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+
+#if defined(_MSC_VER)
+    #define OVRMath_sprintf sprintf_s
+#else
+    #define OVRMath_sprintf snprintf
+#endif
+
+
+//-------------------------------------------------------------------------------------
+// ***** OVR_MATH_ASSERT
+//
+// Independent debug break implementation for OVR_Math.h.
+
+#if !defined(OVR_MATH_DEBUG_BREAK)
+    #if defined(_DEBUG)
+        #if defined(_MSC_VER)
+            #define OVR_MATH_DEBUG_BREAK __debugbreak()
+        #else
+            #define OVR_MATH_DEBUG_BREAK __builtin_trap()
+        #endif
+    #else
+        #define OVR_MATH_DEBUG_BREAK ((void)0)
+    #endif
+#endif
+
+
+//-------------------------------------------------------------------------------------
+// ***** OVR_MATH_ASSERT
+//
+// Independent OVR_MATH_ASSERT implementation for OVR_Math.h.
+
+#if !defined(OVR_MATH_ASSERT)
+    #if defined(_DEBUG)
+        #define OVR_MATH_ASSERT(p) if (!(p)) { OVR_MATH_DEBUG_BREAK; }
+    #else
+        #define OVR_MATH_ASSERT(p) ((void)0)
+    #endif
+#endif
+
+
+//-------------------------------------------------------------------------------------
+// ***** OVR_MATH_STATIC_ASSERT
+//
+// Independent OVR_MATH_ASSERT implementation for OVR_Math.h.
+
+#if !defined(OVR_MATH_STATIC_ASSERT)
+    #if defined(__cplusplus) && ((defined(_MSC_VER) && (defined(_MSC_VER) >= 1600)) || defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L))
+        #define OVR_MATH_STATIC_ASSERT static_assert
+    #else
+        #if !defined(OVR_SA_UNUSED)
+            #if defined(__GNUC__) || defined(__clang__)
+                #define OVR_SA_UNUSED __attribute__((unused))
+            #else
+                #define OVR_SA_UNUSED
+            #endif
+            #define OVR_SA_PASTE(a,b) a##b
+            #define OVR_SA_HELP(a,b)  OVR_SA_PASTE(a,b)
+        #endif
+
+        #define OVR_MATH_STATIC_ASSERT(expression, msg) typedef char OVR_SA_HELP(compileTimeAssert, __LINE__) [((expression) != 0) ? 1 : -1] OVR_SA_UNUSED
+    #endif
+#endif
+
+
+
+namespace OVR {
+
+template<class T>
+const T OVRMath_Min(const T a, const T b)
+{ return (a < b) ? a : b; }
+
+template<class T>
+const T OVRMath_Max(const T a, const T b)
+{ return (b < a) ? a : b; }
+
+template<class T>
+void OVRMath_Swap(T& a, T& b) 
+{  T temp(a); a = b; b = temp; }
+
+
+//-------------------------------------------------------------------------------------
+// ***** Constants for 3D world/axis definitions.
+
+// Definitions of axes for coordinate and rotation conversions.
+enum Axis
+{
+    Axis_X = 0, Axis_Y = 1, Axis_Z = 2
+};
+
+// RotateDirection describes the rotation direction around an axis, interpreted as follows:
+//  CW  - Clockwise while looking "down" from positive axis towards the origin.
+//  CCW - Counter-clockwise while looking from the positive axis towards the origin,
+//        which is in the negative axis direction.
+//  CCW is the default for the RHS coordinate system. Oculus standard RHS coordinate
+//  system defines Y up, X right, and Z back (pointing out from the screen). In this
+//  system Rotate_CCW around Z will specifies counter-clockwise rotation in XY plane.
+enum RotateDirection
+{
+    Rotate_CCW = 1,
+    Rotate_CW  = -1 
+};
+
+// Constants for right handed and left handed coordinate systems
+enum HandedSystem
+{
+    Handed_R = 1, Handed_L = -1
+};
+
+// AxisDirection describes which way the coordinate axis points. Used by WorldAxes.
+enum AxisDirection
+{
+    Axis_Up    =  2,
+    Axis_Down  = -2,
+    Axis_Right =  1,
+    Axis_Left  = -1,
+    Axis_In    =  3,
+    Axis_Out   = -3
+};
+
+struct WorldAxes
+{
+    AxisDirection XAxis, YAxis, ZAxis;
+
+    WorldAxes(AxisDirection x, AxisDirection y, AxisDirection z)
+        : XAxis(x), YAxis(y), ZAxis(z) 
+    { OVR_MATH_ASSERT(abs(x) != abs(y) && abs(y) != abs(z) && abs(z) != abs(x));}
+};
+
+} // namespace OVR
+
+
+//------------------------------------------------------------------------------------//
+// ***** C Compatibility Types
+
+// These declarations are used to support conversion between C types used in
+// LibOVR C interfaces and their C++ versions. As an example, they allow passing
+// Vector3f into a function that expects ovrVector3f.
+
+typedef struct ovrQuatf_ ovrQuatf;
+typedef struct ovrQuatd_ ovrQuatd;
+typedef struct ovrSizei_ ovrSizei;
+typedef struct ovrSizef_ ovrSizef;
+typedef struct ovrSized_ ovrSized;
+typedef struct ovrRecti_ ovrRecti;
+typedef struct ovrVector2i_ ovrVector2i;
+typedef struct ovrVector2f_ ovrVector2f;
+typedef struct ovrVector2d_ ovrVector2d;
+typedef struct ovrVector3f_ ovrVector3f;
+typedef struct ovrVector3d_ ovrVector3d;
+typedef struct ovrVector4f_ ovrVector4f;
+typedef struct ovrVector4d_ ovrVector4d;
+typedef struct ovrMatrix2f_ ovrMatrix2f;
+typedef struct ovrMatrix2d_ ovrMatrix2d;
+typedef struct ovrMatrix3f_ ovrMatrix3f;
+typedef struct ovrMatrix3d_ ovrMatrix3d;
+typedef struct ovrMatrix4f_ ovrMatrix4f;
+typedef struct ovrMatrix4d_ ovrMatrix4d;
+typedef struct ovrPosef_ ovrPosef;
+typedef struct ovrPosed_ ovrPosed;
+typedef struct ovrPoseStatef_ ovrPoseStatef;
+typedef struct ovrPoseStated_ ovrPoseStated;
+
+namespace OVR {
+
+// Forward-declare our templates.
+template<class T> class Quat;
+template<class T> class Size;
+template<class T> class Rect;
+template<class T> class Vector2;
+template<class T> class Vector3;
+template<class T> class Vector4;
+template<class T> class Matrix2;
+template<class T> class Matrix3;
+template<class T> class Matrix4;
+template<class T> class Pose;
+template<class T> class PoseState;
+
+// CompatibleTypes::Type is used to lookup a compatible C-version of a C++ class.
+template<class C>
+struct CompatibleTypes
+{    
+    // Declaration here seems necessary for MSVC; specializations are
+    // used instead.
+    typedef struct {} Type;
+};
+
+// Specializations providing CompatibleTypes::Type value.
+template<> struct CompatibleTypes<Quat<float> >     { typedef ovrQuatf Type; };
+template<> struct CompatibleTypes<Quat<double> >    { typedef ovrQuatd Type; };
+template<> struct CompatibleTypes<Matrix2<float> >  { typedef ovrMatrix2f Type; };
+template<> struct CompatibleTypes<Matrix2<double> > { typedef ovrMatrix2d Type; };
+template<> struct CompatibleTypes<Matrix3<float> >  { typedef ovrMatrix3f Type; };
+template<> struct CompatibleTypes<Matrix3<double> > { typedef ovrMatrix3d Type; };
+template<> struct CompatibleTypes<Matrix4<float> >  { typedef ovrMatrix4f Type; };
+template<> struct CompatibleTypes<Matrix4<double> > { typedef ovrMatrix4d Type; };
+template<> struct CompatibleTypes<Size<int> >       { typedef ovrSizei Type; };
+template<> struct CompatibleTypes<Size<float> >     { typedef ovrSizef Type; };
+template<> struct CompatibleTypes<Size<double> >    { typedef ovrSized Type; };
+template<> struct CompatibleTypes<Rect<int> >       { typedef ovrRecti Type; };
+template<> struct CompatibleTypes<Vector2<int> >    { typedef ovrVector2i Type; };
+template<> struct CompatibleTypes<Vector2<float> >  { typedef ovrVector2f Type; };
+template<> struct CompatibleTypes<Vector2<double> > { typedef ovrVector2d Type; };
+template<> struct CompatibleTypes<Vector3<float> >  { typedef ovrVector3f Type; };
+template<> struct CompatibleTypes<Vector3<double> > { typedef ovrVector3d Type; };
+template<> struct CompatibleTypes<Vector4<float> >  { typedef ovrVector4f Type; };
+template<> struct CompatibleTypes<Vector4<double> > { typedef ovrVector4d Type; };
+template<> struct CompatibleTypes<Pose<float> >     { typedef ovrPosef Type; };
+template<> struct CompatibleTypes<Pose<double> >    { typedef ovrPosed Type; };
+
+//------------------------------------------------------------------------------------//
+// ***** Math
+//
+// Math class contains constants and functions. This class is a template specialized
+// per type, with Math<float> and Math<double> being distinct.
+template<class T>
+class Math
+{  
+public:
+    // By default, support explicit conversion to float. This allows Vector2<int> to
+    // compile, for example.
+    typedef float OtherFloatType;
+
+    static int Tolerance() { return 0; }  // Default value so integer types compile
+};
+
+
+//------------------------------------------------------------------------------------//
+// ***** double constants
+#define MATH_DOUBLE_PI              3.14159265358979323846
+#define MATH_DOUBLE_TWOPI           (2*MATH_DOUBLE_PI)
+#define MATH_DOUBLE_PIOVER2         (0.5*MATH_DOUBLE_PI)
+#define MATH_DOUBLE_PIOVER4         (0.25*MATH_DOUBLE_PI)
+#define MATH_FLOAT_MAXVALUE             (FLT_MAX) 
+
+#define MATH_DOUBLE_RADTODEGREEFACTOR (360.0 / MATH_DOUBLE_TWOPI)
+#define MATH_DOUBLE_DEGREETORADFACTOR (MATH_DOUBLE_TWOPI / 360.0)
+
+#define MATH_DOUBLE_E               2.71828182845904523536
+#define MATH_DOUBLE_LOG2E           1.44269504088896340736
+#define MATH_DOUBLE_LOG10E          0.434294481903251827651
+#define MATH_DOUBLE_LN2             0.693147180559945309417
+#define MATH_DOUBLE_LN10            2.30258509299404568402
+
+#define MATH_DOUBLE_SQRT2           1.41421356237309504880
+#define MATH_DOUBLE_SQRT1_2         0.707106781186547524401
+
+#define MATH_DOUBLE_TOLERANCE       1e-12   // a default number for value equality tolerance: about 4500*Epsilon;
+#define MATH_DOUBLE_SINGULARITYRADIUS 1e-12 // about 1-cos(.0001 degree), for gimbal lock numerical problems    
+
+//------------------------------------------------------------------------------------//
+// ***** float constants
+#define MATH_FLOAT_PI               float(MATH_DOUBLE_PI)
+#define MATH_FLOAT_TWOPI            float(MATH_DOUBLE_TWOPI)
+#define MATH_FLOAT_PIOVER2          float(MATH_DOUBLE_PIOVER2)
+#define MATH_FLOAT_PIOVER4          float(MATH_DOUBLE_PIOVER4)
+
+#define MATH_FLOAT_RADTODEGREEFACTOR float(MATH_DOUBLE_RADTODEGREEFACTOR)
+#define MATH_FLOAT_DEGREETORADFACTOR float(MATH_DOUBLE_DEGREETORADFACTOR)
+
+#define MATH_FLOAT_E                float(MATH_DOUBLE_E)
+#define MATH_FLOAT_LOG2E            float(MATH_DOUBLE_LOG2E)
+#define MATH_FLOAT_LOG10E           float(MATH_DOUBLE_LOG10E)
+#define MATH_FLOAT_LN2              float(MATH_DOUBLE_LN2)
+#define MATH_FLOAT_LN10             float(MATH_DOUBLE_LN10)
+
+#define MATH_FLOAT_SQRT2            float(MATH_DOUBLE_SQRT2)
+#define MATH_FLOAT_SQRT1_2          float(MATH_DOUBLE_SQRT1_2)
+
+#define MATH_FLOAT_TOLERANCE        1e-5f   // a default number for value equality tolerance: 1e-5, about 84*EPSILON;
+#define MATH_FLOAT_SINGULARITYRADIUS 1e-7f  // about 1-cos(.025 degree), for gimbal lock numerical problems   
+
+
+
+// Single-precision Math constants class.
+template<>
+class Math<float>
+{
+public:
+     typedef double OtherFloatType;
+
+    static inline float Tolerance()         { return MATH_FLOAT_TOLERANCE; }; // a default number for value equality tolerance
+    static inline float SingularityRadius() { return MATH_FLOAT_SINGULARITYRADIUS; };    // for gimbal lock numerical problems    
+};
+
+// Double-precision Math constants class
+template<>
+class Math<double>
+{
+public:
+    typedef float OtherFloatType;
+
+    static inline double Tolerance()         { return MATH_DOUBLE_TOLERANCE; }; // a default number for value equality tolerance
+    static inline double SingularityRadius() { return MATH_DOUBLE_SINGULARITYRADIUS; };    // for gimbal lock numerical problems    
+};
+
+typedef Math<float>  Mathf;
+typedef Math<double> Mathd;
+
+// Conversion functions between degrees and radians
+// (non-templated to ensure passing int arguments causes warning)
+inline float  RadToDegree(float rad)         { return rad * MATH_FLOAT_RADTODEGREEFACTOR; }
+inline double RadToDegree(double rad)        { return rad * MATH_DOUBLE_RADTODEGREEFACTOR; }
+
+inline float  DegreeToRad(float deg)         { return deg * MATH_FLOAT_DEGREETORADFACTOR; }
+inline double DegreeToRad(double deg)        { return deg * MATH_DOUBLE_DEGREETORADFACTOR; }
+
+// Square function
+template<class T>
+inline T Sqr(T x) { return x*x; }
+
+// Sign: returns 0 if x == 0, -1 if x < 0, and 1 if x > 0
+template<class T>
+inline T Sign(T x) { return (x != T(0)) ? (x < T(0) ? T(-1) : T(1)) : T(0); }
+
+// Numerically stable acos function
+inline float Acos(float x)   { return (x > 1.0f) ? 0.0f : (x < -1.0f) ? MATH_FLOAT_PI : acosf(x); }
+inline double Acos(double x) { return (x > 1.0) ? 0.0 : (x < -1.0) ? MATH_DOUBLE_PI : acos(x); }
+
+// Numerically stable asin function
+inline float Asin(float x)   { return (x > 1.0f) ? MATH_FLOAT_PIOVER2 : (x < -1.0f) ? -MATH_FLOAT_PIOVER2 : asinf(x); }
+inline double Asin(double x) { return (x > 1.0) ? MATH_DOUBLE_PIOVER2 : (x < -1.0) ? -MATH_DOUBLE_PIOVER2 : asin(x); }
+
+#if defined(_MSC_VER)
+    inline int isnan(double x) { return ::_isnan(x); }
+#elif !defined(isnan) // Some libraries #define isnan.
+    inline int isnan(double x) { return ::isnan(x); }
+#endif
+
+template<class T>
+class Quat;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Vector2<>
+
+// Vector2f (Vector2d) represents a 2-dimensional vector or point in space,
+// consisting of coordinates x and y
+
+template<class T>
+class Vector2
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 2;
+
+    T x, y;
+
+    Vector2() : x(0), y(0) { }
+    Vector2(T x_, T y_) : x(x_), y(y_) { }
+    explicit Vector2(T s) : x(s), y(s) { }
+    explicit Vector2(const Vector2<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y) { }
+
+    static Vector2 Zero() { return Vector2(0, 0); }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes<Vector2<T> >::Type CompatibleType;
+
+    Vector2(const CompatibleType& s) : x(s.x), y(s.y) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Vector2<T>) == sizeof(CompatibleType), "sizeof(Vector2<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+        
+    bool     operator== (const Vector2& b) const  { return x == b.x && y == b.y; }
+    bool     operator!= (const Vector2& b) const  { return x != b.x || y != b.y; }
+             
+    Vector2  operator+  (const Vector2& b) const  { return Vector2(x + b.x, y + b.y); }
+    Vector2& operator+= (const Vector2& b)        { x += b.x; y += b.y; return *this; }
+    Vector2  operator-  (const Vector2& b) const  { return Vector2(x - b.x, y - b.y); }
+    Vector2& operator-= (const Vector2& b)        { x -= b.x; y -= b.y; return *this; }
+    Vector2  operator- () const                   { return Vector2(-x, -y); }
+
+    // Scalar multiplication/division scales vector.
+    Vector2  operator*  (T s) const               { return Vector2(x*s, y*s); }
+    Vector2& operator*= (T s)                     { x *= s; y *= s; return *this; }
+
+    Vector2  operator/  (T s) const               { T rcp = T(1)/s;
+                                                    return Vector2(x*rcp, y*rcp); }
+    Vector2& operator/= (T s)                     { T rcp = T(1)/s;
+                                                    x *= rcp; y *= rcp;
+                                                    return *this; }
+
+    static Vector2  Min(const Vector2& a, const Vector2& b) { return Vector2((a.x < b.x) ? a.x : b.x,
+                                                                             (a.y < b.y) ? a.y : b.y); }
+    static Vector2  Max(const Vector2& a, const Vector2& b) { return Vector2((a.x > b.x) ? a.x : b.x,
+                                                                             (a.y > b.y) ? a.y : b.y); }
+
+    Vector2 Clamped(T maxMag) const
+    {
+        T magSquared = LengthSq();
+        if (magSquared <= Sqr(maxMag))
+            return *this;
+        else
+            return *this * (maxMag / sqrt(magSquared));
+    }
+
+    // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance.
+    bool IsEqual(const Vector2& b, T tolerance =Math<T>::Tolerance()) const
+    {
+        return (fabs(b.x-x) <= tolerance) &&
+               (fabs(b.y-y) <= tolerance);
+    }
+    bool Compare(const Vector2& b, T tolerance = Math<T>::Tolerance()) const 
+    {
+        return IsEqual(b, tolerance);
+    }
+
+    // Access element by index
+    T& operator[] (int idx)
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 2);
+        return *(&x + idx);
+    }
+    const T& operator[] (int idx) const
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 2);
+        return *(&x + idx);
+    }
+
+    // Entry-wise product of two vectors
+    Vector2    EntrywiseMultiply(const Vector2& b) const    { return Vector2(x * b.x, y * b.y);}
+
+
+    // Multiply and divide operators do entry-wise math. Used Dot() for dot product.
+    Vector2  operator*  (const Vector2& b) const        { return Vector2(x * b.x,  y * b.y); }
+    Vector2  operator/  (const Vector2& b) const        { return Vector2(x / b.x,  y / b.y); }
+
+    // Dot product
+    // Used to calculate angle q between two vectors among other things,
+    // as (A dot B) = |a||b|cos(q).
+    T        Dot(const Vector2& b) const                 { return x*b.x + y*b.y; }
+
+    // Returns the angle from this vector to b, in radians.
+    T       Angle(const Vector2& b) const        
+    { 
+        T div = LengthSq()*b.LengthSq();
+        OVR_MATH_ASSERT(div != T(0));
+        T result = Acos((this->Dot(b))/sqrt(div));
+        return result;
+    }
+
+    // Return Length of the vector squared.
+    T       LengthSq() const                     { return (x * x + y * y); }
+
+    // Return vector length.
+    T       Length() const                       { return sqrt(LengthSq()); }
+
+    // Returns squared distance between two points represented by vectors.
+    T       DistanceSq(const Vector2& b) const   { return (*this - b).LengthSq(); }
+
+    // Returns distance between two points represented by vectors.
+    T       Distance(const Vector2& b) const     { return (*this - b).Length(); }
+
+    // Determine if this a unit vector.
+    bool    IsNormalized() const                 { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    // Normalize, convention vector length to 1.    
+    void    Normalize()                          
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    // Returns normalized (unit) version of the vector without modifying itself.
+    Vector2 Normalized() const                   
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s; 
+    }
+
+    // Linearly interpolates from this vector to another.
+    // Factor should be between 0.0 and 1.0, with 0 giving full value to this.
+    Vector2 Lerp(const Vector2& b, T f) const    { return *this*(T(1) - f) + b*f; }
+
+    // Projects this vector onto the argument; in other words,
+    // A.Project(B) returns projection of vector A onto B.
+    Vector2 ProjectTo(const Vector2& b) const    
+    { 
+        T l2 = b.LengthSq();
+        OVR_MATH_ASSERT(l2 != T(0));
+        return b * ( Dot(b) / l2 ); 
+    }
+    
+    // returns true if vector b is clockwise from this vector
+    bool IsClockwise(const Vector2& b) const
+    {
+        return (x * b.y - y * b.x) < 0;
+    }
+};
+
+
+typedef Vector2<float>  Vector2f;
+typedef Vector2<double> Vector2d;
+typedef Vector2<int>    Vector2i;
+
+typedef Vector2<float>  Point2f;
+typedef Vector2<double> Point2d;
+typedef Vector2<int>    Point2i;
+
+//-------------------------------------------------------------------------------------
+// ***** Vector3<> - 3D vector of {x, y, z}
+
+//
+// Vector3f (Vector3d) represents a 3-dimensional vector or point in space,
+// consisting of coordinates x, y and z.
+
+template<class T>
+class Vector3
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 3;
+
+    T x, y, z;
+
+    // FIXME: default initialization of a vector class can be very expensive in a full-blown
+    // application.  A few hundred thousand vector constructions is not unlikely and can add
+    // up to milliseconds of time on processors like the PS3 PPU.
+    Vector3() : x(0), y(0), z(0) { }
+    Vector3(T x_, T y_, T z_ = 0) : x(x_), y(y_), z(z_) { }
+    explicit Vector3(T s) : x(s), y(s), z(s) { }
+    explicit Vector3(const Vector3<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y), z((T)src.z) { }
+
+    static Vector3 Zero() { return Vector3(0, 0, 0); }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes<Vector3<T> >::Type CompatibleType;
+
+    Vector3(const CompatibleType& s) : x(s.x), y(s.y), z(s.z) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Vector3<T>) == sizeof(CompatibleType), "sizeof(Vector3<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    bool     operator== (const Vector3& b) const  { return x == b.x && y == b.y && z == b.z; }
+    bool     operator!= (const Vector3& b) const  { return x != b.x || y != b.y || z != b.z; }
+             
+    Vector3  operator+  (const Vector3& b) const  { return Vector3(x + b.x, y + b.y, z + b.z); }
+    Vector3& operator+= (const Vector3& b)        { x += b.x; y += b.y; z += b.z; return *this; }
+    Vector3  operator-  (const Vector3& b) const  { return Vector3(x - b.x, y - b.y, z - b.z); }
+    Vector3& operator-= (const Vector3& b)        { x -= b.x; y -= b.y; z -= b.z; return *this; }
+    Vector3  operator- () const                   { return Vector3(-x, -y, -z); }
+
+    // Scalar multiplication/division scales vector.
+    Vector3  operator*  (T s) const               { return Vector3(x*s, y*s, z*s); }
+    Vector3& operator*= (T s)                     { x *= s; y *= s; z *= s; return *this; }
+
+    Vector3  operator/  (T s) const               { T rcp = T(1)/s;
+                                                    return Vector3(x*rcp, y*rcp, z*rcp); }
+    Vector3& operator/= (T s)                     { T rcp = T(1)/s;
+                                                    x *= rcp; y *= rcp; z *= rcp;
+                                                    return *this; }
+
+    static Vector3  Min(const Vector3& a, const Vector3& b)
+    {
+        return Vector3((a.x < b.x) ? a.x : b.x,
+                       (a.y < b.y) ? a.y : b.y,
+                       (a.z < b.z) ? a.z : b.z);
+    }
+    static Vector3  Max(const Vector3& a, const Vector3& b)
+    { 
+        return Vector3((a.x > b.x) ? a.x : b.x,
+                       (a.y > b.y) ? a.y : b.y,
+                       (a.z > b.z) ? a.z : b.z);
+    }        
+
+    Vector3 Clamped(T maxMag) const
+    {
+        T magSquared = LengthSq();
+        if (magSquared <= Sqr(maxMag))
+            return *this;
+        else
+            return *this * (maxMag / sqrt(magSquared));
+    }
+
+    // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance.
+    bool IsEqual(const Vector3& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return (fabs(b.x-x) <= tolerance) && 
+               (fabs(b.y-y) <= tolerance) && 
+               (fabs(b.z-z) <= tolerance);
+    }
+    bool Compare(const Vector3& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return IsEqual(b, tolerance);
+    }
+
+    T& operator[] (int idx)
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 3);
+        return *(&x + idx);
+    }
+
+    const T& operator[] (int idx) const
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 3);
+        return *(&x + idx);
+    }
+
+    // Entrywise product of two vectors
+    Vector3    EntrywiseMultiply(const Vector3& b) const    { return Vector3(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z);}
+
+    // Multiply and divide operators do entry-wise math
+    Vector3  operator*  (const Vector3& b) const        { return Vector3(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z); }
+
+    Vector3  operator/  (const Vector3& b) const        { return Vector3(x / b.x, 
+                                                                         y / b.y, 
+                                                                         z / b.z); }
+
+
+    // Dot product
+    // Used to calculate angle q between two vectors among other things,
+    // as (A dot B) = |a||b|cos(q).
+     T      Dot(const Vector3& b) const          { return x*b.x + y*b.y + z*b.z; }
+
+    // Compute cross product, which generates a normal vector.
+    // Direction vector can be determined by right-hand rule: Pointing index finder in
+    // direction a and middle finger in direction b, thumb will point in a.Cross(b).
+    Vector3 Cross(const Vector3& b) const        { return Vector3(y*b.z - z*b.y,
+                                                                  z*b.x - x*b.z,
+                                                                  x*b.y - y*b.x); }
+
+    // Returns the angle from this vector to b, in radians.
+    T       Angle(const Vector3& b) const 
+    {
+        T div = LengthSq()*b.LengthSq();
+        OVR_MATH_ASSERT(div != T(0));
+        T result = Acos((this->Dot(b))/sqrt(div));
+        return result;
+    }
+
+    // Return Length of the vector squared.
+    T       LengthSq() const                     { return (x * x + y * y + z * z); }
+
+    // Return vector length.
+    T       Length() const                       { return (T)sqrt(LengthSq()); }
+
+    // Returns squared distance between two points represented by vectors.
+    T       DistanceSq(Vector3 const& b) const         { return (*this - b).LengthSq(); }
+
+    // Returns distance between two points represented by vectors.
+    T       Distance(Vector3 const& b) const     { return (*this - b).Length(); }
+    
+    bool    IsNormalized() const                 { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    // Normalize, convention vector length to 1.    
+    void    Normalize()                          
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    // Returns normalized (unit) version of the vector without modifying itself.
+    Vector3 Normalized() const                   
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s;
+    }
+
+    // Linearly interpolates from this vector to another.
+    // Factor should be between 0.0 and 1.0, with 0 giving full value to this.
+    Vector3 Lerp(const Vector3& b, T f) const    { return *this*(T(1) - f) + b*f; }
+
+    // Projects this vector onto the argument; in other words,
+    // A.Project(B) returns projection of vector A onto B.
+    Vector3 ProjectTo(const Vector3& b) const    
+    { 
+        T l2 = b.LengthSq();
+        OVR_MATH_ASSERT(l2 != T(0));
+        return b * ( Dot(b) / l2 ); 
+    }
+
+    // Projects this vector onto a plane defined by a normal vector
+    Vector3 ProjectToPlane(const Vector3& normal) const { return *this - this->ProjectTo(normal); }
+};
+
+typedef Vector3<float>  Vector3f;
+typedef Vector3<double> Vector3d;
+typedef Vector3<int32_t>  Vector3i;
+    
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3f) == 3*sizeof(float)), "sizeof(Vector3f) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3d) == 3*sizeof(double)), "sizeof(Vector3d) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Vector3i) == 3*sizeof(int32_t)), "sizeof(Vector3i) failure");
+
+typedef Vector3<float>   Point3f;
+typedef Vector3<double>  Point3d;
+typedef Vector3<int32_t>  Point3i;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Vector4<> - 4D vector of {x, y, z, w}
+
+//
+// Vector4f (Vector4d) represents a 3-dimensional vector or point in space,
+// consisting of coordinates x, y, z and w.
+
+template<class T>
+class Vector4
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 4;
+
+    T x, y, z, w;
+
+    // FIXME: default initialization of a vector class can be very expensive in a full-blown
+    // application.  A few hundred thousand vector constructions is not unlikely and can add
+    // up to milliseconds of time on processors like the PS3 PPU.
+    Vector4() : x(0), y(0), z(0), w(0) { }
+    Vector4(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) { }
+    explicit Vector4(T s) : x(s), y(s), z(s), w(s) { }
+    explicit Vector4(const Vector3<T>& v, const T w_=T(1)) : x(v.x), y(v.y), z(v.z), w(w_) { }
+    explicit Vector4(const Vector4<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w) { }
+
+    static Vector4 Zero() { return Vector4(0, 0, 0, 0); }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes< Vector4<T> >::Type CompatibleType;
+
+    Vector4(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Vector4<T>) == sizeof(CompatibleType), "sizeof(Vector4<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    Vector4& operator= (const Vector3<T>& other)  { x=other.x; y=other.y; z=other.z; w=1; return *this; }
+    bool     operator== (const Vector4& b) const  { return x == b.x && y == b.y && z == b.z && w == b.w; }
+    bool     operator!= (const Vector4& b) const  { return x != b.x || y != b.y || z != b.z || w != b.w; }
+             
+    Vector4  operator+  (const Vector4& b) const  { return Vector4(x + b.x, y + b.y, z + b.z, w + b.w); }
+    Vector4& operator+= (const Vector4& b)        { x += b.x; y += b.y; z += b.z; w += b.w; return *this; }
+    Vector4  operator-  (const Vector4& b) const  { return Vector4(x - b.x, y - b.y, z - b.z, w - b.w); }
+    Vector4& operator-= (const Vector4& b)        { x -= b.x; y -= b.y; z -= b.z; w -= b.w; return *this; }
+    Vector4  operator- () const                   { return Vector4(-x, -y, -z, -w); }
+
+    // Scalar multiplication/division scales vector.
+    Vector4  operator*  (T s) const               { return Vector4(x*s, y*s, z*s, w*s); }
+    Vector4& operator*= (T s)                     { x *= s; y *= s; z *= s; w *= s;return *this; }
+
+    Vector4  operator/  (T s) const               { T rcp = T(1)/s;
+                                                    return Vector4(x*rcp, y*rcp, z*rcp, w*rcp); }
+    Vector4& operator/= (T s)                     { T rcp = T(1)/s;
+                                                    x *= rcp; y *= rcp; z *= rcp; w *= rcp;
+                                                    return *this; }
+
+    static Vector4  Min(const Vector4& a, const Vector4& b)
+    {
+        return Vector4((a.x < b.x) ? a.x : b.x,
+                       (a.y < b.y) ? a.y : b.y,
+                       (a.z < b.z) ? a.z : b.z,
+                       (a.w < b.w) ? a.w : b.w);
+    }
+    static Vector4  Max(const Vector4& a, const Vector4& b)
+    { 
+        return Vector4((a.x > b.x) ? a.x : b.x,
+                       (a.y > b.y) ? a.y : b.y,
+                       (a.z > b.z) ? a.z : b.z,
+                       (a.w > b.w) ? a.w : b.w);
+    }        
+
+    Vector4 Clamped(T maxMag) const
+    {
+        T magSquared = LengthSq();
+        if (magSquared <= Sqr(maxMag))
+            return *this;
+        else
+            return *this * (maxMag / sqrt(magSquared));
+    }
+
+    // Compare two vectors for equality with tolerance. Returns true if vectors match withing tolerance.
+    bool IsEqual(const Vector4& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return (fabs(b.x-x) <= tolerance) && 
+               (fabs(b.y-y) <= tolerance) && 
+               (fabs(b.z-z) <= tolerance) &&
+               (fabs(b.w-w) <= tolerance);
+    }
+    bool Compare(const Vector4& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return IsEqual(b, tolerance);
+    }
+    
+    T& operator[] (int idx)
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 4);
+        return *(&x + idx);
+    }
+
+    const T& operator[] (int idx) const
+    {
+        OVR_MATH_ASSERT(0 <= idx && idx < 4);
+        return *(&x + idx);
+    }
+
+    // Entry wise product of two vectors
+    Vector4    EntrywiseMultiply(const Vector4& b) const    { return Vector4(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z,
+                                                                         w * b.w);}
+
+    // Multiply and divide operators do entry-wise math
+    Vector4  operator*  (const Vector4& b) const        { return Vector4(x * b.x, 
+                                                                         y * b.y, 
+                                                                         z * b.z,
+                                                                         w * b.w); }
+
+    Vector4  operator/  (const Vector4& b) const        { return Vector4(x / b.x, 
+                                                                         y / b.y, 
+                                                                         z / b.z,
+                                                                         w / b.w); }
+
+
+    // Dot product
+    T       Dot(const Vector4& b) const          { return x*b.x + y*b.y + z*b.z + w*b.w; }
+
+    // Return Length of the vector squared.
+    T       LengthSq() const                     { return (x * x + y * y + z * z + w * w); }
+
+    // Return vector length.
+    T       Length() const                       { return sqrt(LengthSq()); }
+    
+    bool    IsNormalized() const                 { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    // Normalize, convention vector length to 1.    
+    void    Normalize()                          
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    // Returns normalized (unit) version of the vector without modifying itself.
+    Vector4 Normalized() const                   
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s;
+    }
+
+    // Linearly interpolates from this vector to another.
+    // Factor should be between 0.0 and 1.0, with 0 giving full value to this.
+    Vector4 Lerp(const Vector4& b, T f) const    { return *this*(T(1) - f) + b*f; }
+};
+
+typedef Vector4<float>  Vector4f;
+typedef Vector4<double> Vector4d;
+typedef Vector4<int>    Vector4i;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Bounds3
+
+// Bounds class used to describe a 3D axis aligned bounding box.
+
+template<class T>
+class Bounds3
+{
+public:
+    Vector3<T>    b[2];
+
+    Bounds3()
+    {
+    }
+
+    Bounds3( const Vector3<T> & mins, const Vector3<T> & maxs )
+{
+        b[0] = mins;
+        b[1] = maxs;
+    }
+
+    void Clear()
+    {
+        b[0].x = b[0].y = b[0].z = Math<T>::MaxValue;
+        b[1].x = b[1].y = b[1].z = -Math<T>::MaxValue;
+    }
+
+    void AddPoint( const Vector3<T> & v )
+    {
+        b[0].x = (b[0].x < v.x ? b[0].x : v.x);
+        b[0].y = (b[0].y < v.y ? b[0].y : v.y);
+        b[0].z = (b[0].z < v.z ? b[0].z : v.z);
+        b[1].x = (v.x < b[1].x ? b[1].x : v.x);
+        b[1].y = (v.y < b[1].y ? b[1].y : v.y);
+        b[1].z = (v.z < b[1].z ? b[1].z : v.z);
+    }
+
+    const Vector3<T> & GetMins() const { return b[0]; }
+    const Vector3<T> & GetMaxs() const { return b[1]; }
+
+    Vector3<T> & GetMins() { return b[0]; }
+    Vector3<T> & GetMaxs() { return b[1]; }
+};
+
+typedef Bounds3<float>    Bounds3f;
+typedef Bounds3<double>    Bounds3d;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Size
+
+// Size class represents 2D size with Width, Height components.
+// Used to describe distentions of render targets, etc.
+
+template<class T>
+class Size
+{
+public:
+    T   w, h;
+
+    Size()              : w(0), h(0)   { }
+    Size(T w_, T h_)    : w(w_), h(h_) { }
+    explicit Size(T s)  : w(s), h(s)   { }
+    explicit Size(const Size<typename Math<T>::OtherFloatType> &src)
+        : w((T)src.w), h((T)src.h) { }
+
+    // C-interop support.
+    typedef  typename CompatibleTypes<Size<T> >::Type CompatibleType;
+
+    Size(const CompatibleType& s) : w(s.w), h(s.h) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Size<T>) == sizeof(CompatibleType), "sizeof(Size<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    bool     operator== (const Size& b) const  { return w == b.w && h == b.h; }
+    bool     operator!= (const Size& b) const  { return w != b.w || h != b.h; }
+             
+    Size  operator+  (const Size& b) const  { return Size(w + b.w, h + b.h); }
+    Size& operator+= (const Size& b)        { w += b.w; h += b.h; return *this; }
+    Size  operator-  (const Size& b) const  { return Size(w - b.w, h - b.h); }
+    Size& operator-= (const Size& b)        { w -= b.w; h -= b.h; return *this; }
+    Size  operator- () const                { return Size(-w, -h); }
+    Size  operator*  (const Size& b) const  { return Size(w * b.w, h * b.h); }
+    Size& operator*= (const Size& b)        { w *= b.w; h *= b.h; return *this; }
+    Size  operator/  (const Size& b) const  { return Size(w / b.w, h / b.h); }
+    Size& operator/= (const Size& b)        { w /= b.w; h /= b.h; return *this; }
+
+    // Scalar multiplication/division scales both components.
+    Size  operator*  (T s) const            { return Size(w*s, h*s); }
+    Size& operator*= (T s)                  { w *= s; h *= s; return *this; }    
+    Size  operator/  (T s) const            { return Size(w/s, h/s); }
+    Size& operator/= (T s)                  { w /= s; h /= s; return *this; }
+
+    static Size Min(const Size& a, const Size& b)  { return Size((a.w  < b.w)  ? a.w  : b.w,
+                                                                 (a.h < b.h) ? a.h : b.h); }
+    static Size Max(const Size& a, const Size& b)  { return Size((a.w  > b.w)  ? a.w  : b.w,
+                                                                 (a.h > b.h) ? a.h : b.h); }
+    
+    T       Area() const                    { return w * h; }
+
+    inline  Vector2<T> ToVector() const     { return Vector2<T>(w, h); }
+};
+
+
+typedef Size<int>       Sizei;
+typedef Size<unsigned>  Sizeu;
+typedef Size<float>     Sizef;
+typedef Size<double>    Sized;
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Rect
+
+// Rect describes a rectangular area for rendering, that includes position and size.
+template<class T>
+class Rect
+{
+public:
+    T x, y;
+    T w, h;
+
+    Rect() { }
+    Rect(T x1, T y1, T w1, T h1)                   : x(x1), y(y1), w(w1), h(h1) { }    
+    Rect(const Vector2<T>& pos, const Size<T>& sz) : x(pos.x), y(pos.y), w(sz.w), h(sz.h) { }
+    Rect(const Size<T>& sz)                        : x(0), y(0), w(sz.w), h(sz.h) { }
+    
+    // C-interop support.
+    typedef  typename CompatibleTypes<Rect<T> >::Type CompatibleType;
+
+    Rect(const CompatibleType& s) : x(s.Pos.x), y(s.Pos.y), w(s.Size.w), h(s.Size.h) {  }
+
+    operator const CompatibleType& () const
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(Rect<T>) == sizeof(CompatibleType), "sizeof(Rect<T>) failure");
+        return reinterpret_cast<const CompatibleType&>(*this);
+    }
+
+    Vector2<T> GetPos() const                { return Vector2<T>(x, y); }
+    Size<T>    GetSize() const               { return Size<T>(w, h); }
+    void       SetPos(const Vector2<T>& pos) { x = pos.x; y = pos.y; }
+    void       SetSize(const Size<T>& sz)    { w = sz.w; h = sz.h; }
+
+    bool operator == (const Rect& vp) const
+    { return (x == vp.x) && (y == vp.y) && (w == vp.w) && (h == vp.h); }
+    bool operator != (const Rect& vp) const
+    { return !operator == (vp); }
+};
+
+typedef Rect<int> Recti;
+
+
+//-------------------------------------------------------------------------------------//
+// ***** Quat
+//
+// Quatf represents a quaternion class used for rotations.
+// 
+// Quaternion multiplications are done in right-to-left order, to match the
+// behavior of matrices.
+
+
+template<class T>
+class Quat
+{
+public:
+    typedef T ElementType;
+    static const size_t ElementCount = 4;
+
+    // x,y,z = axis*sin(angle), w = cos(angle)
+    T x, y, z, w;    
+
+    Quat() : x(0), y(0), z(0), w(1) { }
+    Quat(T x_, T y_, T z_, T w_) : x(x_), y(y_), z(z_), w(w_) { }
+    explicit Quat(const Quat<typename Math<T>::OtherFloatType> &src)
+        : x((T)src.x), y((T)src.y), z((T)src.z), w((T)src.w)
+    {
+        // NOTE: Converting a normalized Quat<float> to Quat<double>
+        // will generally result in an un-normalized quaternion.
+        // But we don't normalize here in case the quaternion
+        // being converted is not a normalized rotation quaternion.
+    }
+
+    typedef  typename CompatibleTypes<Quat<T> >::Type CompatibleType;
+
+    // C-interop support.
+    Quat(const CompatibleType& s) : x(s.x), y(s.y), z(s.z), w(s.w) { }
+
+    operator CompatibleType () const
+    {
+        CompatibleType result;
+        result.x = x;
+        result.y = y;
+        result.z = z;
+        result.w = w;
+        return result;
+    }
+
+    // Constructs quaternion for rotation around the axis by an angle.
+    Quat(const Vector3<T>& axis, T angle)
+    {
+        // Make sure we don't divide by zero. 
+        if (axis.LengthSq() == T(0))
+        {
+            // Assert if the axis is zero, but the angle isn't
+            OVR_MATH_ASSERT(angle == T(0));
+            x = y = z = T(0); w = T(1);
+            return;
+        }
+
+        Vector3<T> unitAxis = axis.Normalized();
+        T          sinHalfAngle = sin(angle * T(0.5));
+
+        w = cos(angle * T(0.5));
+        x = unitAxis.x * sinHalfAngle;
+        y = unitAxis.y * sinHalfAngle;
+        z = unitAxis.z * sinHalfAngle;
+    }
+
+    // Constructs quaternion for rotation around one of the coordinate axis by an angle.
+    Quat(Axis A, T angle, RotateDirection d = Rotate_CCW, HandedSystem s = Handed_R)
+    {
+        T sinHalfAngle = s * d *sin(angle * T(0.5));
+        T v[3];
+        v[0] = v[1] = v[2] = T(0);
+        v[A] = sinHalfAngle;
+
+        w = cos(angle * T(0.5));
+        x = v[0];
+        y = v[1];
+        z = v[2];
+    }
+
+    Quat operator-() { return Quat(-x, -y, -z, -w); }   // unary minus
+
+    static Quat Identity() { return Quat(0, 0, 0, 1); }
+
+    // Compute axis and angle from quaternion
+    void GetAxisAngle(Vector3<T>* axis, T* angle) const
+    {
+        if ( x*x + y*y + z*z > Math<T>::Tolerance() * Math<T>::Tolerance() ) {
+            *axis  = Vector3<T>(x, y, z).Normalized();
+            *angle = 2 * Acos(w);
+            if (*angle > ((T)MATH_DOUBLE_PI)) // Reduce the magnitude of the angle, if necessary
+            {
+                *angle = ((T)MATH_DOUBLE_TWOPI) - *angle;
+                *axis = *axis * (-1);
+            }
+        }
+        else 
+        {
+            *axis = Vector3<T>(1, 0, 0);
+            *angle= T(0);
+        }
+    }
+
+    // Convert a quaternion to a rotation vector, also known as
+    // Rodrigues vector, AxisAngle vector, SORA vector, exponential map.
+    // A rotation vector describes a rotation about an axis:
+    // the axis of rotation is the vector normalized,
+    // the angle of rotation is the magnitude of the vector.
+    Vector3<T> ToRotationVector() const
+    {
+        OVR_MATH_ASSERT(IsNormalized() || LengthSq() == 0);
+        T s = T(0);
+        T sinHalfAngle = sqrt(x*x + y*y + z*z);
+        if (sinHalfAngle > T(0))
+        {
+            T cosHalfAngle = w;
+            T halfAngle = atan2(sinHalfAngle, cosHalfAngle);
+
+            // Ensure minimum rotation magnitude
+            if (cosHalfAngle < 0)
+                halfAngle -= T(MATH_DOUBLE_PI);
+
+            s = T(2) * halfAngle / sinHalfAngle;
+        }
+        return Vector3<T>(x*s, y*s, z*s);
+    }
+
+    // Faster version of the above, optimized for use with small rotations, where rotation angle ~= sin(angle)
+    inline OVR::Vector3<T> FastToRotationVector() const
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+        T s;
+        T sinHalfSquared = x*x + y*y + z*z;
+        if (sinHalfSquared < T(.0037)) // =~ sin(7/2 degrees)^2
+        {
+            // Max rotation magnitude error is about .062% at 7 degrees rotation, or about .0043 degrees
+            s = T(2) * Sign(w);
+        }
+        else
+        {
+            T sinHalfAngle = sqrt(sinHalfSquared);
+            T cosHalfAngle = w;
+            T halfAngle = atan2(sinHalfAngle, cosHalfAngle);
+
+            // Ensure minimum rotation magnitude
+            if (cosHalfAngle < 0)
+                halfAngle -= T(MATH_DOUBLE_PI);
+
+            s = T(2) * halfAngle / sinHalfAngle;
+        }
+        return Vector3<T>(x*s, y*s, z*s);
+    }
+
+    // Given a rotation vector of form unitRotationAxis * angle,
+    // returns the equivalent quaternion (unitRotationAxis * sin(angle), cos(Angle)).
+    static Quat FromRotationVector(const Vector3<T>& v)
+    {
+        T angleSquared = v.LengthSq();
+        T s = T(0);
+        T c = T(1);
+        if (angleSquared > T(0))
+        {
+            T angle = sqrt(angleSquared);
+            s = sin(angle * T(0.5)) / angle;    // normalize
+            c = cos(angle * T(0.5));
+        }
+        return Quat(s*v.x, s*v.y, s*v.z, c);
+    }
+
+    // Faster version of above, optimized for use with small rotation magnitudes, where rotation angle =~ sin(angle).
+    // If normalize is false, small-angle quaternions are returned un-normalized.
+    inline static Quat FastFromRotationVector(const OVR::Vector3<T>& v, bool normalize = true)
+    {
+        T s, c;
+        T angleSquared = v.LengthSq();
+        if (angleSquared < T(0.0076))   // =~ (5 degrees*pi/180)^2
+        {
+            s = T(0.5);
+            c = T(1.0);
+            // Max rotation magnitude error (after normalization) is about .064% at 5 degrees rotation, or .0032 degrees
+            if (normalize && angleSquared > 0)
+            {
+                // sin(angle/2)^2 ~= (angle/2)^2 and cos(angle/2)^2 ~= 1
+                T invLen = T(1) / sqrt(angleSquared * T(0.25) + T(1)); // normalize
+                s = s * invLen;
+                c = c * invLen;
+            }
+        }
+        else
+        {
+            T angle = sqrt(angleSquared);
+            s = sin(angle * T(0.5)) / angle;
+            c = cos(angle * T(0.5));
+        }
+        return Quat(s*v.x, s*v.y, s*v.z, c);
+    }
+
+    // Constructs the quaternion from a rotation matrix
+    explicit Quat(const Matrix4<T>& m)
+    {
+        T trace = m.M[0][0] + m.M[1][1] + m.M[2][2];
+
+        // In almost all cases, the first part is executed.
+        // However, if the trace is not positive, the other
+        // cases arise.
+        if (trace > T(0)) 
+        {
+            T s = sqrt(trace + T(1)) * T(2); // s=4*qw
+            w = T(0.25) * s;
+            x = (m.M[2][1] - m.M[1][2]) / s;
+            y = (m.M[0][2] - m.M[2][0]) / s;
+            z = (m.M[1][0] - m.M[0][1]) / s; 
+        } 
+        else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) 
+        {
+            T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2);
+            w = (m.M[2][1] - m.M[1][2]) / s;
+            x = T(0.25) * s;
+            y = (m.M[0][1] + m.M[1][0]) / s;
+            z = (m.M[2][0] + m.M[0][2]) / s;
+        } 
+        else if (m.M[1][1] > m.M[2][2]) 
+        {
+            T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy
+            w = (m.M[0][2] - m.M[2][0]) / s;
+            x = (m.M[0][1] + m.M[1][0]) / s;
+            y = T(0.25) * s;
+            z = (m.M[1][2] + m.M[2][1]) / s;
+        } 
+        else 
+        {
+            T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz
+            w = (m.M[1][0] - m.M[0][1]) / s;
+            x = (m.M[0][2] + m.M[2][0]) / s;
+            y = (m.M[1][2] + m.M[2][1]) / s;
+            z = T(0.25) * s;
+        }
+        OVR_MATH_ASSERT(IsNormalized());    // Ensure input matrix is orthogonal
+    }
+
+    // Constructs the quaternion from a rotation matrix
+    explicit Quat(const Matrix3<T>& m)
+    {
+        T trace = m.M[0][0] + m.M[1][1] + m.M[2][2];
+
+        // In almost all cases, the first part is executed.
+        // However, if the trace is not positive, the other
+        // cases arise.
+        if (trace > T(0)) 
+        {
+            T s = sqrt(trace + T(1)) * T(2); // s=4*qw
+            w = T(0.25) * s;
+            x = (m.M[2][1] - m.M[1][2]) / s;
+            y = (m.M[0][2] - m.M[2][0]) / s;
+            z = (m.M[1][0] - m.M[0][1]) / s; 
+        } 
+        else if ((m.M[0][0] > m.M[1][1])&&(m.M[0][0] > m.M[2][2])) 
+        {
+            T s = sqrt(T(1) + m.M[0][0] - m.M[1][1] - m.M[2][2]) * T(2);
+            w = (m.M[2][1] - m.M[1][2]) / s;
+            x = T(0.25) * s;
+            y = (m.M[0][1] + m.M[1][0]) / s;
+            z = (m.M[2][0] + m.M[0][2]) / s;
+        } 
+        else if (m.M[1][1] > m.M[2][2]) 
+        {
+            T s = sqrt(T(1) + m.M[1][1] - m.M[0][0] - m.M[2][2]) * T(2); // S=4*qy
+            w = (m.M[0][2] - m.M[2][0]) / s;
+            x = (m.M[0][1] + m.M[1][0]) / s;
+            y = T(0.25) * s;
+            z = (m.M[1][2] + m.M[2][1]) / s;
+        } 
+        else 
+        {
+            T s = sqrt(T(1) + m.M[2][2] - m.M[0][0] - m.M[1][1]) * T(2); // S=4*qz
+            w = (m.M[1][0] - m.M[0][1]) / s;
+            x = (m.M[0][2] + m.M[2][0]) / s;
+            y = (m.M[1][2] + m.M[2][1]) / s;
+            z = T(0.25) * s;
+        }
+        OVR_MATH_ASSERT(IsNormalized());    // Ensure input matrix is orthogonal
+    }
+
+    bool operator== (const Quat& b) const   { return x == b.x && y == b.y && z == b.z && w == b.w; }
+    bool operator!= (const Quat& b) const   { return x != b.x || y != b.y || z != b.z || w != b.w; }
+
+    Quat  operator+  (const Quat& b) const  { return Quat(x + b.x, y + b.y, z + b.z, w + b.w); }
+    Quat& operator+= (const Quat& b)        { w += b.w; x += b.x; y += b.y; z += b.z; return *this; }
+    Quat  operator-  (const Quat& b) const  { return Quat(x - b.x, y - b.y, z - b.z, w - b.w); }
+    Quat& operator-= (const Quat& b)        { w -= b.w; x -= b.x; y -= b.y; z -= b.z; return *this; }
+
+    Quat  operator*  (T s) const            { return Quat(x * s, y * s, z * s, w * s); }
+    Quat& operator*= (T s)                  { w *= s; x *= s; y *= s; z *= s; return *this; }
+    Quat  operator/  (T s) const            { T rcp = T(1)/s; return Quat(x * rcp, y * rcp, z * rcp, w *rcp); }
+    Quat& operator/= (T s)                  { T rcp = T(1)/s; w *= rcp; x *= rcp; y *= rcp; z *= rcp; return *this; }
+
+    // Compare two quats for equality within tolerance. Returns true if quats match withing tolerance.
+    bool IsEqual(const Quat& b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return Abs(Dot(b)) >= T(1) - tolerance;
+    }
+
+    static T Abs(const T v)                 { return (v >= 0) ? v : -v; }
+
+    // Get Imaginary part vector
+    Vector3<T> Imag() const                 { return Vector3<T>(x,y,z); }
+
+    // Get quaternion length.
+    T       Length() const                  { return sqrt(LengthSq()); }
+
+    // Get quaternion length squared.
+    T       LengthSq() const                { return (x * x + y * y + z * z + w * w); }
+
+    // Simple Euclidean distance in R^4 (not SLERP distance, but at least respects Haar measure)
+    T       Distance(const Quat& q) const    
+    { 
+        T d1 = (*this - q).Length();
+        T d2 = (*this + q).Length(); // Antipodal point check
+        return (d1 < d2) ? d1 : d2;
+    }
+
+    T       DistanceSq(const Quat& q) const
+    {
+        T d1 = (*this - q).LengthSq();
+        T d2 = (*this + q).LengthSq(); // Antipodal point check
+        return (d1 < d2) ? d1 : d2;
+    }
+
+    T       Dot(const Quat& q) const
+    {
+        return x * q.x + y * q.y + z * q.z + w * q.w;
+    }
+
+    // Angle between two quaternions in radians
+    T Angle(const Quat& q) const
+    {
+        return T(2) * Acos(Abs(Dot(q)));
+    }
+
+    // Angle of quaternion
+    T Angle() const
+    {
+        return T(2) * Acos(Abs(w));
+    }
+
+    // Normalize
+    bool    IsNormalized() const            { return fabs(LengthSq() - T(1)) < Math<T>::Tolerance(); }
+
+    void    Normalize()
+    {
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        *this *= s;
+    }
+
+    Quat    Normalized() const
+    { 
+        T s = Length();
+        if (s != T(0))
+            s = T(1) / s;
+        return *this * s;
+    }
+
+    inline void EnsureSameHemisphere(const Quat& o)
+    {
+        if (Dot(o) < T(0))
+        {
+            x = -x;
+            y = -y;
+            z = -z;
+            w = -w;
+        }
+    }
+
+    // Returns conjugate of the quaternion. Produces inverse rotation if quaternion is normalized.
+    Quat    Conj() const                    { return Quat(-x, -y, -z, w); }
+
+    // Quaternion multiplication. Combines quaternion rotations, performing the one on the 
+    // right hand side first.
+    Quat  operator* (const Quat& b) const   { return Quat(w * b.x + x * b.w + y * b.z - z * b.y,
+                                                          w * b.y - x * b.z + y * b.w + z * b.x,
+                                                          w * b.z + x * b.y - y * b.x + z * b.w,
+                                                          w * b.w - x * b.x - y * b.y - z * b.z); }
+    const Quat& operator*= (const Quat& b)  { *this = *this * b;  return *this; }
+
+    // 
+    // this^p normalized; same as rotating by this p times.
+    Quat PowNormalized(T p) const
+    {
+        Vector3<T> v;
+        T          a;
+        GetAxisAngle(&v, &a);
+        return Quat(v, a * p);
+    }
+
+    // Compute quaternion that rotates v into alignTo: alignTo = Quat::Align(alignTo, v).Rotate(v).
+    // NOTE: alignTo and v must be normalized.
+    static Quat Align(const Vector3<T>& alignTo, const Vector3<T>& v)
+    {
+        OVR_MATH_ASSERT(alignTo.IsNormalized() && v.IsNormalized());
+        Vector3<T> bisector = (v + alignTo);
+        bisector.Normalize();
+        T cosHalfAngle = v.Dot(bisector); // 0..1
+        if (cosHalfAngle > T(0))
+        {
+            Vector3<T> imag = v.Cross(bisector);
+            return Quat(imag.x, imag.y, imag.z, cosHalfAngle);
+        }
+        else
+        {
+            // cosHalfAngle == 0: a 180 degree rotation.
+            // sinHalfAngle == 1, rotation axis is any axis perpendicular
+            // to alignTo.  Choose axis to include largest magnitude components
+            if (fabs(v.x) > fabs(v.y))
+            {
+                // x or z is max magnitude component
+                // = Cross(v, (0,1,0)).Normalized();
+                T invLen = sqrt(v.x*v.x + v.z*v.z);
+                if (invLen > T(0))
+                    invLen = T(1) / invLen;
+                return Quat(-v.z*invLen, 0, v.x*invLen, 0);
+            }
+            else
+            {
+                // y or z is max magnitude component
+                // = Cross(v, (1,0,0)).Normalized();
+                T invLen = sqrt(v.y*v.y + v.z*v.z);
+                if (invLen > T(0))
+                    invLen = T(1) / invLen;
+                return Quat(0, v.z*invLen, -v.y*invLen, 0);
+            }
+        }
+    }
+
+    // Normalized linear interpolation of quaternions
+    // NOTE: This function is a bad approximation of Slerp()
+    // when the angle between the *this and b is large.
+    // Use FastSlerp() or Slerp() instead.
+    Quat Lerp(const Quat& b, T s) const
+    {
+        return (*this * (T(1) - s) + b * (Dot(b) < 0 ? -s : s)).Normalized();
+    }
+
+    // Spherical linear interpolation between rotations
+    Quat Slerp(const Quat& b, T s) const
+    {
+        Vector3<T> delta = (b * this->Inverted()).ToRotationVector();
+        return FromRotationVector(delta * s) * *this;
+    }
+
+    // Spherical linear interpolation: much faster for small rotations, accurate for large rotations. See FastTo/FromRotationVector
+    Quat FastSlerp(const Quat& b, T s) const
+    {
+        Vector3<T> delta = (b * this->Inverted()).FastToRotationVector();
+        return (FastFromRotationVector(delta * s, false) * *this).Normalized();
+    }
+
+    // Rotate transforms vector in a manner that matches Matrix rotations (counter-clockwise,
+    // assuming negative direction of the axis). Standard formula: q(t) * V * q(t)^-1. 
+    Vector3<T> Rotate(const Vector3<T>& v) const
+    {
+        OVR_MATH_ASSERT(isnan(w) || IsNormalized());
+
+        // rv = q * (v,0) * q'
+        // Same as rv = v + real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2);
+
+        // uv = 2 * Imag().Cross(v);
+        T uvx = T(2) * (y*v.z - z*v.y);
+        T uvy = T(2) * (z*v.x - x*v.z);
+        T uvz = T(2) * (x*v.y - y*v.x);
+
+        // return v + Real()*uv + Imag().Cross(uv);
+        return Vector3<T>(v.x + w*uvx + y*uvz - z*uvy,
+                          v.y + w*uvy + z*uvx - x*uvz,
+                          v.z + w*uvz + x*uvy - y*uvx);
+    }
+
+    // Rotation by inverse of *this
+    Vector3<T> InverseRotate(const Vector3<T>& v) const
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+
+        // rv = q' * (v,0) * q
+        // Same as rv = v + real * cross(-imag,v)*2 + cross(-imag, cross(-imag,v)*2);
+        //      or rv = v - real * cross(imag,v)*2 + cross(imag, cross(imag,v)*2);
+
+        // uv = 2 * Imag().Cross(v);
+        T uvx = T(2) * (y*v.z - z*v.y);
+        T uvy = T(2) * (z*v.x - x*v.z);
+        T uvz = T(2) * (x*v.y - y*v.x);
+
+        // return v - Real()*uv + Imag().Cross(uv);
+        return Vector3<T>(v.x - w*uvx + y*uvz - z*uvy,
+                          v.y - w*uvy + z*uvx - x*uvz,
+                          v.z - w*uvz + x*uvy - y*uvx);
+    }
+    
+    // Inversed quaternion rotates in the opposite direction.
+    Quat        Inverted() const
+    {
+        return Quat(-x, -y, -z, w);
+    }
+
+    Quat        Inverse() const
+    {
+        return Quat(-x, -y, -z, w);
+    }
+
+    // Sets this quaternion to the one rotates in the opposite direction.
+    void        Invert()
+    {
+        *this = Quat(-x, -y, -z, w);
+    }
+    
+    // Time integration of constant angular velocity over dt
+    Quat TimeIntegrate(Vector3<T> angularVelocity, T dt) const
+    {
+        // solution is: this * exp( omega*dt/2 ); FromRotationVector(v) gives exp(v*.5).
+        return (*this * FastFromRotationVector(angularVelocity * dt, false)).Normalized();
+    }
+
+    // Time integration of constant angular acceleration and velocity over dt
+    // These are the first two terms of the "Magnus expansion" of the solution
+    //
+    //   o = o * exp( W=(W1 + W2 + W3+...) * 0.5 );
+    //
+    //  omega1 = (omega + omegaDot*dt)
+    //  W1 = (omega + omega1)*dt/2              
+    //  W2 = cross(omega, omega1)/12*dt^2 % (= -cross(omega_dot, omega)/12*dt^3)
+    // Terms 3 and beyond are vanishingly small:
+    //  W3 = cross(omega_dot, cross(omega_dot, omega))/240*dt^5 
+    //
+    Quat TimeIntegrate(Vector3<T> angularVelocity, Vector3<T> angularAcceleration, T dt) const
+    {
+        const Vector3<T>& omega = angularVelocity;
+        const Vector3<T>& omegaDot = angularAcceleration;
+
+        Vector3<T> omega1 = (omega + omegaDot * dt);
+        Vector3<T> W = ( (omega + omega1) + omega.Cross(omega1) * (dt/T(6)) ) * (dt/T(2));
+
+        // FromRotationVector(v) is exp(v*.5)
+        return (*this * FastFromRotationVector(W, false)).Normalized();
+    }
+
+    // Decompose rotation into three rotations:
+    // roll radians about Z axis, then pitch radians about X axis, then yaw radians about Y axis.
+    // Call with nullptr if a return value is not needed.
+    void GetYawPitchRoll(T* yaw, T* pitch, T* roll) const
+    {
+        return GetEulerAngles<Axis_Y, Axis_X, Axis_Z, Rotate_CCW, Handed_R>(yaw, pitch, roll);
+    }
+
+    // GetEulerAngles extracts Euler angles from the quaternion, in the specified order of
+    // axis rotations and the specified coordinate system. Right-handed coordinate system
+    // is the default, with CCW rotations while looking in the negative axis direction.
+    // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned.
+    // Rotation order is c, b, a:
+    // rotation c around axis A3
+    // is followed by rotation b around axis A2
+    // is followed by rotation a around axis A1
+    // rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    // 
+    template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S>
+    void GetEulerAngles(T *a, T *b, T *c) const 
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+        OVR_MATH_STATIC_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)");
+
+        T Q[3] = { x, y, z };  //Quaternion components x,y,z
+
+        T ww  = w*w;
+        T Q11 = Q[A1]*Q[A1];
+        T Q22 = Q[A2]*Q[A2];
+        T Q33 = Q[A3]*Q[A3];
+
+        T psign = T(-1);
+        // Determine whether even permutation
+        if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3))
+            psign = T(1);
+        
+        T s2 = psign * T(2) * (psign*w*Q[A2] + Q[A1]*Q[A3]);
+
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (s2 < T(-1) + singularityRadius)
+        { // South pole singularity
+            if (a) *a = T(0);
+            if (b) *b = -S*D*((T)MATH_DOUBLE_PIOVER2);
+            if (c) *c = S*D*atan2(T(2)*(psign*Q[A1] * Q[A2] + w*Q[A3]), ww + Q22 - Q11 - Q33 );
+        }
+        else if (s2 > T(1) - singularityRadius)
+        {  // North pole singularity
+            if (a) *a = T(0);
+            if (b) *b = S*D*((T)MATH_DOUBLE_PIOVER2);
+            if (c) *c = S*D*atan2(T(2)*(psign*Q[A1] * Q[A2] + w*Q[A3]), ww + Q22 - Q11 - Q33);
+        }
+        else
+        {
+            if (a) *a = -S*D*atan2(T(-2)*(w*Q[A1] - psign*Q[A2] * Q[A3]), ww + Q33 - Q11 - Q22);
+            if (b) *b = S*D*asin(s2);
+            if (c) *c = S*D*atan2(T(2)*(w*Q[A3] - psign*Q[A1] * Q[A2]), ww + Q11 - Q22 - Q33);
+        }      
+    }
+
+    template <Axis A1, Axis A2, Axis A3, RotateDirection D>
+    void GetEulerAngles(T *a, T *b, T *c) const
+    { GetEulerAngles<A1, A2, A3, D, Handed_R>(a, b, c); }
+
+    template <Axis A1, Axis A2, Axis A3>
+    void GetEulerAngles(T *a, T *b, T *c) const
+    { GetEulerAngles<A1, A2, A3, Rotate_CCW, Handed_R>(a, b, c); }
+
+    // GetEulerAnglesABA extracts Euler angles from the quaternion, in the specified order of
+    // axis rotations and the specified coordinate system. Right-handed coordinate system
+    // is the default, with CCW rotations while looking in the negative axis direction.
+    // Here a,b,c, are the Yaw/Pitch/Roll angles to be returned.
+    // rotation a around axis A1
+    // is followed by rotation b around axis A2
+    // is followed by rotation c around axis A1
+    // Rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    template <Axis A1, Axis A2, RotateDirection D, HandedSystem S>
+    void GetEulerAnglesABA(T *a, T *b, T *c) const
+    {
+        OVR_MATH_ASSERT(IsNormalized());
+        OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2");
+
+        T Q[3] = {x, y, z}; // Quaternion components
+
+        // Determine the missing axis that was not supplied
+        int m = 3 - A1 - A2;
+
+        T ww = w*w;
+        T Q11 = Q[A1]*Q[A1];
+        T Q22 = Q[A2]*Q[A2];
+        T Qmm = Q[m]*Q[m];
+
+        T psign = T(-1);
+        if ((A1 + 1) % 3 == A2) // Determine whether even permutation
+        {
+            psign = T(1);
+        }
+
+        T c2 = ww + Q11 - Q22 - Qmm;
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (c2 < T(-1) + singularityRadius)
+        { // South pole singularity
+            if (a) *a = T(0);
+            if (b) *b = S*D*((T)MATH_DOUBLE_PI);
+            if (c) *c = S*D*atan2(T(2)*(w*Q[A1] - psign*Q[A2] * Q[m]),
+                            ww + Q22 - Q11 - Qmm);
+        }
+        else if (c2 > T(1) - singularityRadius)
+        {  // North pole singularity
+            if (a) *a = T(0);
+            if (b) *b = T(0);
+            if (c) *c = S*D*atan2(T(2)*(w*Q[A1] - psign*Q[A2] * Q[m]),
+                           ww + Q22 - Q11 - Qmm);
+        }
+        else
+        {
+            if (a) *a = S*D*atan2(psign*w*Q[m] + Q[A1] * Q[A2],
+                           w*Q[A2] -psign*Q[A1]*Q[m]);
+            if (b) *b = S*D*acos(c2);
+            if (c) *c = S*D*atan2(-psign*w*Q[m] + Q[A1] * Q[A2],
+                           w*Q[A2] + psign*Q[A1]*Q[m]);
+        }
+    }
+};
+
+typedef Quat<float>  Quatf;
+typedef Quat<double> Quatd;
+
+OVR_MATH_STATIC_ASSERT((sizeof(Quatf) == 4*sizeof(float)), "sizeof(Quatf) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Quatd) == 4*sizeof(double)), "sizeof(Quatd) failure");
+
+//-------------------------------------------------------------------------------------
+// ***** Pose
+//
+// Position and orientation combined.
+//
+// This structure needs to be the same size and layout on 32-bit and 64-bit arch.
+// Update OVR_PadCheck.cpp when updating this object.
+template<class T>
+class Pose
+{
+public:
+    typedef typename CompatibleTypes<Pose<T> >::Type CompatibleType;
+
+    Pose() { }
+    Pose(const Quat<T>& orientation, const Vector3<T>& pos)
+        : Rotation(orientation), Translation(pos) {  }
+    Pose(const Pose& s)
+        : Rotation(s.Rotation), Translation(s.Translation) {  }
+    Pose(const Matrix3<T>& R, const Vector3<T>& t)
+        : Rotation((Quat<T>)R), Translation(t) {  }
+    Pose(const CompatibleType& s)
+        : Rotation(s.Orientation), Translation(s.Position) {  }
+
+    explicit Pose(const Pose<typename Math<T>::OtherFloatType> &s)
+        : Rotation(s.Rotation), Translation(s.Translation)
+    {
+        // Ensure normalized rotation if converting from float to double
+        if (sizeof(T) > sizeof(Math<T>::OtherFloatType))
+            Rotation.Normalize();
+    }
+
+    static Pose Identity() { return Pose(Quat<T>(0, 0, 0, 1), Vector3<T>(0, 0, 0)); }
+
+    void SetIdentity() { Rotation = Quat<T>(0, 0, 0, 1); Translation = Vector3<T>(0, 0, 0); }
+
+    // used to make things obviously broken if someone tries to use the value
+    void SetInvalid() { Rotation = Quat<T>(NAN, NAN, NAN, NAN); Translation = Vector3<T>(NAN, NAN, NAN); }
+
+    bool IsEqual(const Pose&b, T tolerance = Math<T>::Tolerance()) const
+    {
+        return Translation.IsEqual(b.Translation, tolerance) && Rotation.IsEqual(b.Rotation, tolerance);
+    }
+
+    operator typename CompatibleTypes<Pose<T> >::Type () const
+    {
+        typename CompatibleTypes<Pose<T> >::Type result;
+        result.Orientation = Rotation;
+        result.Position = Translation;
+        return result;
+    }
+
+    Quat<T>    Rotation;
+    Vector3<T> Translation;
+    
+    OVR_MATH_STATIC_ASSERT((sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float)), "(sizeof(T) == sizeof(double) || sizeof(T) == sizeof(float))");
+
+    void ToArray(T* arr) const
+    {
+        T temp[7] =  { Rotation.x, Rotation.y, Rotation.z, Rotation.w, Translation.x, Translation.y, Translation.z };
+        for (int i = 0; i < 7; i++) arr[i] = temp[i];
+    }
+
+    static Pose<T> FromArray(const T* v)
+    {
+        Quat<T> rotation(v[0], v[1], v[2], v[3]);
+        Vector3<T> translation(v[4], v[5], v[6]);
+        // Ensure rotation is normalized, in case it was originally a float, stored in a .json file, etc.
+        return Pose<T>(rotation.Normalized(), translation);
+    }
+
+    Vector3<T> Rotate(const Vector3<T>& v) const
+    {
+        return Rotation.Rotate(v);
+    }
+
+    Vector3<T> InverseRotate(const Vector3<T>& v) const
+    {
+        return Rotation.InverseRotate(v);
+    }
+
+    Vector3<T> Translate(const Vector3<T>& v) const
+    {
+        return v + Translation;
+    }
+
+    Vector3<T> Transform(const Vector3<T>& v) const
+    {
+        return Rotate(v) + Translation;
+    }
+
+    Vector3<T> InverseTransform(const Vector3<T>& v) const
+    {
+        return InverseRotate(v - Translation);
+    }
+
+
+    Vector3<T> Apply(const Vector3<T>& v) const
+    {
+        return Transform(v);
+    }
+
+    Pose operator*(const Pose& other) const   
+    {
+        return Pose(Rotation * other.Rotation, Apply(other.Translation));
+    }
+
+    Pose Inverted() const   
+    {
+        Quat<T> inv = Rotation.Inverted();
+        return Pose(inv, inv.Rotate(-Translation));
+    }
+
+    // Interpolation between two poses: translation is interpolated with Lerp(),
+    // and rotations are interpolated with Slerp().
+    Pose Lerp(const Pose& b, T s)
+    {
+        return Pose(Rotation.Slerp(b.Rotation, s), Translation.Lerp(b.Translation, s));
+    }
+
+    // Similar to Lerp above, except faster in case of small rotation differences.  See Quat<T>::FastSlerp.
+    Pose FastLerp(const Pose& b, T s)
+    {
+        return Pose(Rotation.FastSlerp(b.Rotation, s), Translation.Lerp(b.Translation, s));
+    }
+
+    Pose TimeIntegrate(const Vector3<T>& linearVelocity, const Vector3<T>& angularVelocity, T dt) const
+    {
+        return Pose(
+                (Rotation * Quat<T>::FastFromRotationVector(angularVelocity * dt, false)).Normalized(),
+                Translation + linearVelocity * dt);
+    }
+
+    Pose TimeIntegrate(const Vector3<T>& linearVelocity, const Vector3<T>& linearAcceleration,
+                       const Vector3<T>& angularVelocity, const Vector3<T>& angularAcceleration,
+                       T dt) const
+    {
+        return Pose(Rotation.TimeIntegrate(angularVelocity, angularAcceleration, dt),
+                    Translation + linearVelocity*dt + linearAcceleration*dt*dt * T(0.5));
+    }
+};
+
+typedef Pose<float>  Posef;
+typedef Pose<double> Posed;
+
+OVR_MATH_STATIC_ASSERT((sizeof(Posed) == sizeof(Quatd) + sizeof(Vector3d)), "sizeof(Posed) failure");
+OVR_MATH_STATIC_ASSERT((sizeof(Posef) == sizeof(Quatf) + sizeof(Vector3f)), "sizeof(Posef) failure");
+    
+
+//-------------------------------------------------------------------------------------
+// ***** Matrix4
+//
+// Matrix4 is a 4x4 matrix used for 3d transformations and projections.
+// Translation stored in the last column.
+// The matrix is stored in row-major order in memory, meaning that values
+// of the first row are stored before the next one.
+//
+// The arrangement of the matrix is chosen to be in Right-Handed 
+// coordinate system and counterclockwise rotations when looking down
+// the axis
+//
+// Transformation Order:
+//   - Transformations are applied from right to left, so the expression
+//     M1 * M2 * M3 * V means that the vector V is transformed by M3 first,
+//     followed by M2 and M1. 
+//
+// Coordinate system: Right Handed
+//
+// Rotations: Counterclockwise when looking down the axis. All angles are in radians.
+//    
+//  | sx   01   02   tx |    // First column  (sx, 10, 20): Axis X basis vector.
+//  | 10   sy   12   ty |    // Second column (01, sy, 21): Axis Y basis vector.
+//  | 20   21   sz   tz |    // Third columnt (02, 12, sz): Axis Z basis vector.
+//  | 30   31   32   33 |
+//
+//  The basis vectors are first three columns.
+
+template<class T>
+class Matrix4
+{
+public:
+    typedef T ElementType;
+    static const size_t Dimension = 4;
+
+    T M[4][4];
+
+    enum NoInitType { NoInit };
+
+    // Construct with no memory initialization.
+    Matrix4(NoInitType) { }
+
+    // By default, we construct identity matrix.
+    Matrix4()
+    {
+        M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1);
+        M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0);
+        M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0);
+        M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0);
+    }
+
+    Matrix4(T m11, T m12, T m13, T m14,
+            T m21, T m22, T m23, T m24,
+            T m31, T m32, T m33, T m34,
+            T m41, T m42, T m43, T m44)
+    {
+        M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; M[0][3] = m14;
+        M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; M[1][3] = m24;
+        M[2][0] = m31; M[2][1] = m32; M[2][2] = m33; M[2][3] = m34;
+        M[3][0] = m41; M[3][1] = m42; M[3][2] = m43; M[3][3] = m44;
+    }
+
+    Matrix4(T m11, T m12, T m13,
+            T m21, T m22, T m23,
+            T m31, T m32, T m33)
+    {
+        M[0][0] = m11; M[0][1] = m12; M[0][2] = m13; M[0][3] = T(0);
+        M[1][0] = m21; M[1][1] = m22; M[1][2] = m23; M[1][3] = T(0);
+        M[2][0] = m31; M[2][1] = m32; M[2][2] = m33; M[2][3] = T(0);
+        M[3][0] = T(0);   M[3][1] = T(0);   M[3][2] = T(0);   M[3][3] = T(1);
+    }
+
+    explicit Matrix4(const Matrix3<T>& m)
+    {
+        M[0][0] = m.M[0][0]; M[0][1] = m.M[0][1]; M[0][2] = m.M[0][2]; M[0][3] = T(0);
+        M[1][0] = m.M[1][0]; M[1][1] = m.M[1][1]; M[1][2] = m.M[1][2]; M[1][3] = T(0);
+        M[2][0] = m.M[2][0]; M[2][1] = m.M[2][1]; M[2][2] = m.M[2][2]; M[2][3] = T(0);
+        M[3][0] = T(0);         M[3][1] = T(0);         M[3][2] = T(0);         M[3][3] = T(1);
+    }
+
+    explicit Matrix4(const Quat<T>& q)
+    {
+        OVR_MATH_ASSERT(q.IsNormalized());
+        T ww = q.w*q.w;
+        T xx = q.x*q.x;
+        T yy = q.y*q.y;
+        T zz = q.z*q.z;
+
+        M[0][0] = ww + xx - yy - zz;       M[0][1] = 2 * (q.x*q.y - q.w*q.z); M[0][2] = 2 * (q.x*q.z + q.w*q.y); M[0][3] = T(0);
+        M[1][0] = 2 * (q.x*q.y + q.w*q.z); M[1][1] = ww - xx + yy - zz;       M[1][2] = 2 * (q.y*q.z - q.w*q.x); M[1][3] = T(0);
+        M[2][0] = 2 * (q.x*q.z - q.w*q.y); M[2][1] = 2 * (q.y*q.z + q.w*q.x); M[2][2] = ww - xx - yy + zz;       M[2][3] = T(0);
+        M[3][0] = T(0);                       M[3][1] = T(0);                       M[3][2] = T(0);                       M[3][3] = T(1);
+    }
+
+    explicit Matrix4(const Pose<T>& p)
+    {
+        Matrix4 result(p.Rotation);
+        result.SetTranslation(p.Translation);
+        *this = result;
+    }
+
+
+    // C-interop support
+    explicit Matrix4(const Matrix4<typename Math<T>::OtherFloatType> &src)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] = (T)src.M[i][j];
+    }
+
+    // C-interop support.
+    Matrix4(const typename CompatibleTypes<Matrix4<T> >::Type& s) 
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix4), "sizeof(s) == sizeof(Matrix4)");
+        memcpy(M, s.M, sizeof(M));
+    }
+
+    operator typename CompatibleTypes<Matrix4<T> >::Type () const
+    {
+        typename CompatibleTypes<Matrix4<T> >::Type result;
+        OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix4), "sizeof(result) == sizeof(Matrix4)");
+        memcpy(result.M, M, sizeof(M));
+        return result;
+    }
+
+    void ToString(char* dest, size_t destsize) const
+    {
+        size_t pos = 0;
+        for (int r=0; r<4; r++)
+        {
+            for (int c=0; c<4; c++)
+            {
+                pos += OVRMath_sprintf(dest+pos, destsize-pos, "%g ", M[r][c]);
+            }
+        }
+    }
+
+    static Matrix4 FromString(const char* src)
+    {
+        Matrix4 result;
+        if (src)
+        {
+            for (int r = 0; r < 4; r++)
+            {
+                for (int c = 0; c < 4; c++)
+                {
+                    result.M[r][c] = (T)atof(src);
+                    while (*src && *src != ' ')
+                    {
+                        src++;
+                    }
+                    while (*src && *src == ' ')
+                    {
+                        src++;
+                    }
+                }
+            }
+        }
+        return result;
+    }
+
+    static Matrix4 Identity()  { return Matrix4(); }
+
+    void SetIdentity()
+    {
+        M[0][0] = M[1][1] = M[2][2] = M[3][3] = T(1);
+        M[0][1] = M[1][0] = M[2][3] = M[3][1] = T(0);
+        M[0][2] = M[1][2] = M[2][0] = M[3][2] = T(0);
+        M[0][3] = M[1][3] = M[2][1] = M[3][0] = T(0);
+    }
+
+    void SetXBasis(const Vector3<T>& v)
+    {
+        M[0][0] = v.x;
+        M[1][0] = v.y;
+        M[2][0] = v.z;
+    }
+    Vector3<T> GetXBasis() const
+    {
+        return Vector3<T>(M[0][0], M[1][0], M[2][0]);
+    }
+
+    void SetYBasis(const Vector3<T> & v)
+    {
+        M[0][1] = v.x;
+        M[1][1] = v.y;
+        M[2][1] = v.z;
+    }
+    Vector3<T> GetYBasis() const
+    {
+        return Vector3<T>(M[0][1], M[1][1], M[2][1]);
+    }
+
+    void SetZBasis(const Vector3<T> & v)
+    {
+        M[0][2] = v.x;
+        M[1][2] = v.y;
+        M[2][2] = v.z;
+    }
+    Vector3<T> GetZBasis() const
+    {
+        return Vector3<T>(M[0][2], M[1][2], M[2][2]);
+    }
+
+    bool operator== (const Matrix4& b) const
+    {
+        bool isEqual = true;
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                isEqual &= (M[i][j] == b.M[i][j]);
+
+        return isEqual;
+    }
+
+    Matrix4 operator+ (const Matrix4& b) const
+    {
+        Matrix4 result(*this);
+        result += b;
+        return result;
+    }
+
+    Matrix4& operator+= (const Matrix4& b)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] += b.M[i][j];
+        return *this;
+    }
+
+    Matrix4 operator- (const Matrix4& b) const
+    {
+        Matrix4 result(*this);
+        result -= b;
+        return result;
+    }
+
+    Matrix4& operator-= (const Matrix4& b)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] -= b.M[i][j];
+        return *this;
+    }
+
+    // Multiplies two matrices into destination with minimum copying.
+    static Matrix4& Multiply(Matrix4* d, const Matrix4& a, const Matrix4& b)
+    {
+        OVR_MATH_ASSERT((d != &a) && (d != &b));
+        int i = 0;
+        do {
+            d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0] + a.M[i][3] * b.M[3][0];
+            d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1] + a.M[i][3] * b.M[3][1];
+            d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2] + a.M[i][3] * b.M[3][2];
+            d->M[i][3] = a.M[i][0] * b.M[0][3] + a.M[i][1] * b.M[1][3] + a.M[i][2] * b.M[2][3] + a.M[i][3] * b.M[3][3];
+        } while((++i) < 4);
+
+        return *d;
+    }
+
+    Matrix4 operator* (const Matrix4& b) const
+    {
+        Matrix4 result(Matrix4::NoInit);
+        Multiply(&result, *this, b);
+        return result;
+    }
+
+    Matrix4& operator*= (const Matrix4& b)
+    {
+        return Multiply(this, Matrix4(*this), b);
+    }
+
+    Matrix4 operator* (T s) const
+    {
+        Matrix4 result(*this);
+        result *= s;
+        return result;
+    }
+
+    Matrix4& operator*= (T s)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] *= s;
+        return *this;
+    }
+
+
+    Matrix4 operator/ (T s) const
+    {
+        Matrix4 result(*this);
+        result /= s;
+        return result;
+    }
+
+    Matrix4& operator/= (T s)
+    {
+        for (int i = 0; i < 4; i++)
+            for (int j = 0; j < 4; j++)
+                M[i][j] /= s;
+        return *this;
+    }
+
+    Vector3<T> Transform(const Vector3<T>& v) const
+    {
+        const T rcpW = T(1) / (M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3]);
+        return Vector3<T>((M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3]) * rcpW,
+                          (M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3]) * rcpW,
+                          (M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3]) * rcpW);
+    }
+
+    Vector4<T> Transform(const Vector4<T>& v) const
+    {
+        return Vector4<T>(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z + M[0][3] * v.w,
+                          M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z + M[1][3] * v.w,
+                          M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z + M[2][3] * v.w,
+                          M[3][0] * v.x + M[3][1] * v.y + M[3][2] * v.z + M[3][3] * v.w);
+    }
+
+    Matrix4 Transposed() const
+    {
+        return Matrix4(M[0][0], M[1][0], M[2][0], M[3][0],
+                        M[0][1], M[1][1], M[2][1], M[3][1],
+                        M[0][2], M[1][2], M[2][2], M[3][2],
+                        M[0][3], M[1][3], M[2][3], M[3][3]);
+    }
+
+    void     Transpose()
+    {
+        *this = Transposed();
+    }
+
+
+    T SubDet (const size_t* rows, const size_t* cols) const
+    {
+        return M[rows[0]][cols[0]] * (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]])
+             - M[rows[0]][cols[1]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]])
+             + M[rows[0]][cols[2]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]);
+    }
+
+    T Cofactor(size_t I, size_t J) const
+    {
+        const size_t indices[4][3] = {{1,2,3},{0,2,3},{0,1,3},{0,1,2}};
+        return ((I+J)&1) ? -SubDet(indices[I],indices[J]) : SubDet(indices[I],indices[J]);
+    }
+
+    T    Determinant() const
+    {
+        return M[0][0] * Cofactor(0,0) + M[0][1] * Cofactor(0,1) + M[0][2] * Cofactor(0,2) + M[0][3] * Cofactor(0,3);
+    }
+
+    Matrix4 Adjugated() const
+    {
+        return Matrix4(Cofactor(0,0), Cofactor(1,0), Cofactor(2,0), Cofactor(3,0), 
+                        Cofactor(0,1), Cofactor(1,1), Cofactor(2,1), Cofactor(3,1), 
+                        Cofactor(0,2), Cofactor(1,2), Cofactor(2,2), Cofactor(3,2),
+                        Cofactor(0,3), Cofactor(1,3), Cofactor(2,3), Cofactor(3,3));
+    }
+
+    Matrix4 Inverted() const
+    {
+        T det = Determinant();
+        OVR_MATH_ASSERT(det != 0);
+        return Adjugated() * (T(1)/det);
+    }
+
+    void Invert()
+    {
+        *this = Inverted();
+    }
+
+    // This is more efficient than general inverse, but ONLY works
+    // correctly if it is a homogeneous transform matrix (rot + trans)
+    Matrix4 InvertedHomogeneousTransform() const
+    {
+        // Make the inverse rotation matrix
+        Matrix4 rinv = this->Transposed();
+        rinv.M[3][0] = rinv.M[3][1] = rinv.M[3][2] = T(0);
+        // Make the inverse translation matrix
+        Vector3<T> tvinv(-M[0][3],-M[1][3],-M[2][3]);
+        Matrix4 tinv = Matrix4::Translation(tvinv);
+        return rinv * tinv;  // "untranslate", then "unrotate"
+    }
+
+    // This is more efficient than general inverse, but ONLY works
+    // correctly if it is a homogeneous transform matrix (rot + trans)
+    void InvertHomogeneousTransform()
+    {
+        *this = InvertedHomogeneousTransform();
+    }
+
+    // Matrix to Euler Angles conversion
+    // a,b,c, are the YawPitchRoll angles to be returned
+    // rotation a around axis A1
+    // is followed by rotation b around axis A2
+    // is followed by rotation c around axis A3
+    // rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    template <Axis A1, Axis A2, Axis A3, RotateDirection D, HandedSystem S>
+    void ToEulerAngles(T *a, T *b, T *c) const
+    {
+        OVR_MATH_STATIC_ASSERT((A1 != A2) && (A2 != A3) && (A1 != A3), "(A1 != A2) && (A2 != A3) && (A1 != A3)");
+
+        T psign = T(-1);
+        if (((A1 + 1) % 3 == A2) && ((A2 + 1) % 3 == A3)) // Determine whether even permutation
+            psign = T(1);
+        
+        T pm = psign*M[A1][A3];
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (pm < T(-1) + singularityRadius)
+        { // South pole singularity
+            *a = T(0);
+            *b = -S*D*((T)MATH_DOUBLE_PIOVER2);
+            *c = S*D*atan2( psign*M[A2][A1], M[A2][A2] );
+        }
+        else if (pm > T(1) - singularityRadius)
+        { // North pole singularity
+            *a = T(0);
+            *b = S*D*((T)MATH_DOUBLE_PIOVER2);
+            *c = S*D*atan2( psign*M[A2][A1], M[A2][A2] );
+        }
+        else
+        { // Normal case (nonsingular)
+            *a = S*D*atan2( -psign*M[A2][A3], M[A3][A3] );
+            *b = S*D*asin(pm);
+            *c = S*D*atan2( -psign*M[A1][A2], M[A1][A1] );
+        }
+    }
+
+    // Matrix to Euler Angles conversion
+    // a,b,c, are the YawPitchRoll angles to be returned
+    // rotation a around axis A1
+    // is followed by rotation b around axis A2
+    // is followed by rotation c around axis A1
+    // rotations are CCW or CW (D) in LH or RH coordinate system (S)
+    template <Axis A1, Axis A2, RotateDirection D, HandedSystem S>
+    void ToEulerAnglesABA(T *a, T *b, T *c) const
+    {        
+         OVR_MATH_STATIC_ASSERT(A1 != A2, "A1 != A2");
+  
+        // Determine the axis that was not supplied
+        int m = 3 - A1 - A2;
+
+        T psign = T(-1);
+        if ((A1 + 1) % 3 == A2) // Determine whether even permutation
+            psign = T(1);
+
+        T c2 = M[A1][A1];
+        T singularityRadius = Math<T>::SingularityRadius();
+        if (c2 < T(-1) + singularityRadius)
+        { // South pole singularity
+            *a = T(0);
+            *b = S*D*((T)MATH_DOUBLE_PI);
+            *c = S*D*atan2( -psign*M[A2][m],M[A2][A2]);
+        }
+        else if (c2 > T(1) - singularityRadius)
+        { // North pole singularity
+            *a = T(0);
+            *b = T(0);
+            *c = S*D*atan2( -psign*M[A2][m],M[A2][A2]);
+        }
+        else
+        { // Normal case (nonsingular)
+            *a = S*D*atan2( M[A2][A1],-psign*M[m][A1]);
+            *b = S*D*acos(c2);
+            *c = S*D*atan2( M[A1][A2],psign*M[A1][m]);
+        }
+    }
+  
+    // Creates a matrix that converts the vertices from one coordinate system
+    // to another.
+    static Matrix4 AxisConversion(const WorldAxes& to, const WorldAxes& from)
+    {        
+        // Holds axis values from the 'to' structure
+        int toArray[3] = { to.XAxis, to.YAxis, to.ZAxis };
+
+        // The inverse of the toArray
+        int inv[4]; 
+        inv[0] = inv[abs(to.XAxis)] = 0;
+        inv[abs(to.YAxis)] = 1;
+        inv[abs(to.ZAxis)] = 2;
+
+        Matrix4 m(0,  0,  0, 
+                  0,  0,  0,
+                  0,  0,  0);
+
+        // Only three values in the matrix need to be changed to 1 or -1.
+        m.M[inv[abs(from.XAxis)]][0] = T(from.XAxis/toArray[inv[abs(from.XAxis)]]);
+        m.M[inv[abs(from.YAxis)]][1] = T(from.YAxis/toArray[inv[abs(from.YAxis)]]);
+        m.M[inv[abs(from.ZAxis)]][2] = T(from.ZAxis/toArray[inv[abs(from.ZAxis)]]);
+        return m;
+    } 
+
+
+    // Creates a matrix for translation by vector
+    static Matrix4 Translation(const Vector3<T>& v)
+    {
+        Matrix4 t;
+        t.M[0][3] = v.x;
+        t.M[1][3] = v.y;
+        t.M[2][3] = v.z;
+        return t;
+    }
+
+    // Creates a matrix for translation by vector
+    static Matrix4 Translation(T x, T y, T z = T(0))
+    {
+        Matrix4 t;
+        t.M[0][3] = x;
+        t.M[1][3] = y;
+        t.M[2][3] = z;
+        return t;
+    }
+
+    // Sets the translation part
+    void SetTranslation(const Vector3<T>& v)
+    {
+        M[0][3] = v.x;
+        M[1][3] = v.y;
+        M[2][3] = v.z;
+    }
+
+    Vector3<T> GetTranslation() const
+    {
+        return Vector3<T>( M[0][3], M[1][3], M[2][3] );
+    }
+
+    // Creates a matrix for scaling by vector
+    static Matrix4 Scaling(const Vector3<T>& v)
+    {
+        Matrix4 t;
+        t.M[0][0] = v.x;
+        t.M[1][1] = v.y;
+        t.M[2][2] = v.z;
+        return t;
+    }
+
+    // Creates a matrix for scaling by vector
+    static Matrix4 Scaling(T x, T y, T z)
+    {
+        Matrix4 t;
+        t.M[0][0] = x;
+        t.M[1][1] = y;
+        t.M[2][2] = z;
+        return t;
+    }
+
+    // Creates a matrix for scaling by constant
+    static Matrix4 Scaling(T s)
+    {
+        Matrix4 t;
+        t.M[0][0] = s;
+        t.M[1][1] = s;
+        t.M[2][2] = s;
+        return t;
+    }
+
+    // Simple L1 distance in R^12
+    T Distance(const Matrix4& m2) const           
+    { 
+        T d = fabs(M[0][0] - m2.M[0][0]) + fabs(M[0][1] - m2.M[0][1]);
+        d += fabs(M[0][2] - m2.M[0][2]) + fabs(M[0][3] - m2.M[0][3]);
+        d += fabs(M[1][0] - m2.M[1][0]) + fabs(M[1][1] - m2.M[1][1]);
+        d += fabs(M[1][2] - m2.M[1][2]) + fabs(M[1][3] - m2.M[1][3]);
+        d += fabs(M[2][0] - m2.M[2][0]) + fabs(M[2][1] - m2.M[2][1]);
+        d += fabs(M[2][2] - m2.M[2][2]) + fabs(M[2][3] - m2.M[2][3]);
+        d += fabs(M[3][0] - m2.M[3][0]) + fabs(M[3][1] - m2.M[3][1]);
+        d += fabs(M[3][2] - m2.M[3][2]) + fabs(M[3][3] - m2.M[3][3]);
+        return d; 
+    }
+
+    // Creates a rotation matrix rotating around the X axis by 'angle' radians.
+    // Just for quick testing.  Not for final API.  Need to remove case.
+    static Matrix4 RotationAxis(Axis A, T angle, RotateDirection d, HandedSystem s)
+    {
+        T sina = s * d *sin(angle);
+        T cosa = cos(angle);
+        
+        switch(A)
+        {
+        case Axis_X:
+            return Matrix4(1,  0,     0, 
+                           0,  cosa,  -sina,
+                           0,  sina,  cosa);
+        case Axis_Y:
+            return Matrix4(cosa,  0,   sina, 
+                           0,     1,   0,
+                           -sina, 0,   cosa);
+        case Axis_Z:
+            return Matrix4(cosa,  -sina,  0, 
+                           sina,  cosa,   0,
+                           0,     0,      1);
+        default:
+            return Matrix4();
+        }
+    }
+
+
+    // Creates a rotation matrix rotating around the X axis by 'angle' radians.
+    // Rotation direction is depends on the coordinate system:
+    // RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),
+    //                        while looking in the negative axis direction. This is the
+    //                        same as looking down from positive axis values towards origin.
+    // LHS: Positive angle values rotate clock-wise (CW), while looking in the
+    //       negative axis direction.
+    static Matrix4 RotationX(T angle)
+    {
+        T sina = sin(angle);
+        T cosa = cos(angle);
+        return Matrix4(1,  0,     0, 
+                       0,  cosa,  -sina,
+                       0,  sina,  cosa);
+    }
+
+    // Creates a rotation matrix rotating around the Y axis by 'angle' radians.
+    // Rotation direction is depends on the coordinate system:
+    //  RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),
+    //                        while looking in the negative axis direction. This is the
+    //                        same as looking down from positive axis values towards origin.
+    //  LHS: Positive angle values rotate clock-wise (CW), while looking in the
+    //       negative axis direction.
+    static Matrix4 RotationY(T angle)
+    {
+        T sina = (T)sin(angle);
+        T cosa = (T)cos(angle);
+        return Matrix4(cosa,  0,   sina, 
+                       0,     1,   0,
+                       -sina, 0,   cosa);
+    }
+
+    // Creates a rotation matrix rotating around the Z axis by 'angle' radians.
+    // Rotation direction is depends on the coordinate system:
+    //  RHS (Oculus default): Positive angle values rotate Counter-clockwise (CCW),
+    //                        while looking in the negative axis direction. This is the
+    //                        same as looking down from positive axis values towards origin.
+    //  LHS: Positive angle values rotate clock-wise (CW), while looking in the
+    //       negative axis direction.
+    static Matrix4 RotationZ(T angle)
+    {
+        T sina = sin(angle);
+        T cosa = cos(angle);
+        return Matrix4(cosa,  -sina,  0, 
+                       sina,  cosa,   0,
+                       0,     0,      1);
+    }
+
+    // LookAtRH creates a View transformation matrix for right-handed coordinate system.
+    // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up'
+    // specifying the up vector. The resulting matrix should be used with PerspectiveRH
+    // projection.
+    static Matrix4 LookAtRH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up)
+    {
+        Vector3<T> z = (eye - at).Normalized();  // Forward
+        Vector3<T> x = up.Cross(z).Normalized(); // Right
+        Vector3<T> y = z.Cross(x);
+
+        Matrix4 m(x.x,  x.y,  x.z,  -(x.Dot(eye)),
+                  y.x,  y.y,  y.z,  -(y.Dot(eye)),
+                  z.x,  z.y,  z.z,  -(z.Dot(eye)),
+                  0,    0,    0,    1 );
+        return m;
+    }
+    
+    // LookAtLH creates a View transformation matrix for left-handed coordinate system.
+    // The resulting matrix points camera from 'eye' towards 'at' direction, with 'up'
+    // specifying the up vector. 
+    static Matrix4 LookAtLH(const Vector3<T>& eye, const Vector3<T>& at, const Vector3<T>& up)
+    {
+        Vector3<T> z = (at - eye).Normalized();  // Forward
+        Vector3<T> x = up.Cross(z).Normalized(); // Right
+        Vector3<T> y = z.Cross(x);
+
+        Matrix4 m(x.x,  x.y,  x.z,  -(x.Dot(eye)),
+                  y.x,  y.y,  y.z,  -(y.Dot(eye)),
+                  z.x,  z.y,  z.z,  -(z.Dot(eye)),
+                  0,    0,    0,    1 ); 
+        return m;
+    }
+    
+    // PerspectiveRH creates a right-handed perspective projection matrix that can be
+    // used with the Oculus sample renderer. 
+    //  yfov   - Specifies vertical field of view in radians.
+    //  aspect - Screen aspect ration, which is usually width/height for square pixels.
+    //           Note that xfov = yfov * aspect.
+    //  znear  - Absolute value of near Z clipping clipping range.
+    //  zfar   - Absolute value of far  Z clipping clipping range (larger then near).
+    // Even though RHS usually looks in the direction of negative Z, positive values
+    // are expected for znear and zfar.
+    static Matrix4 PerspectiveRH(T yfov, T aspect, T znear, T zfar)
+    {
+        Matrix4 m;
+        T tanHalfFov = tan(yfov * T(0.5));
+
+        m.M[0][0] = T(1) / (aspect * tanHalfFov);
+        m.M[1][1] = T(1) / tanHalfFov;
+        m.M[2][2] = zfar / (znear - zfar);
+        m.M[3][2] = T(-1);
+        m.M[2][3] = (zfar * znear) / (znear - zfar);
+        m.M[3][3] = T(0);
+
+        // Note: Post-projection matrix result assumes Left-Handed coordinate system,
+        //       with Y up, X right and Z forward. This supports positive z-buffer values.
+        // This is the case even for RHS coordinate input.
+        return m;
+    }
+    
+    // PerspectiveLH creates a left-handed perspective projection matrix that can be
+    // used with the Oculus sample renderer. 
+    //  yfov   - Specifies vertical field of view in radians.
+    //  aspect - Screen aspect ration, which is usually width/height for square pixels.
+    //           Note that xfov = yfov * aspect.
+    //  znear  - Absolute value of near Z clipping clipping range.
+    //  zfar   - Absolute value of far  Z clipping clipping range (larger then near).
+    static Matrix4 PerspectiveLH(T yfov, T aspect, T znear, T zfar)
+    {
+        Matrix4 m;
+        T tanHalfFov = tan(yfov * T(0.5));
+
+        m.M[0][0] = T(1) / (aspect * tanHalfFov);
+        m.M[1][1] = T(1) / tanHalfFov;
+        //m.M[2][2] = zfar / (znear - zfar);
+         m.M[2][2] = zfar / (zfar - znear);
+        m.M[3][2] = T(-1);
+        m.M[2][3] = (zfar * znear) / (znear - zfar);
+        m.M[3][3] = T(0);
+
+        // Note: Post-projection matrix result assumes Left-Handed coordinate system,    
+        //       with Y up, X right and Z forward. This supports positive z-buffer values.
+        // This is the case even for RHS coordinate input. 
+        return m;
+    }
+
+    static Matrix4 Ortho2D(T w, T h)
+    {
+        Matrix4 m;
+        m.M[0][0] = T(2.0)/w;
+        m.M[1][1] = T(-2.0)/h;
+        m.M[0][3] = T(-1.0);
+        m.M[1][3] = T(1.0);
+        m.M[2][2] = T(0);
+        return m;
+    }
+};
+
+typedef Matrix4<float>  Matrix4f;
+typedef Matrix4<double> Matrix4d;
+
+//-------------------------------------------------------------------------------------
+// ***** Matrix3
+//
+// Matrix3 is a 3x3 matrix used for representing a rotation matrix.
+// The matrix is stored in row-major order in memory, meaning that values
+// of the first row are stored before the next one.
+//
+// The arrangement of the matrix is chosen to be in Right-Handed 
+// coordinate system and counterclockwise rotations when looking down
+// the axis
+//
+// Transformation Order:
+//   - Transformations are applied from right to left, so the expression
+//     M1 * M2 * M3 * V means that the vector V is transformed by M3 first,
+//     followed by M2 and M1. 
+//
+// Coordinate system: Right Handed
+//
+// Rotations: Counterclockwise when looking down the axis. All angles are in radians.
+
+template<class T>
+class Matrix3
+{
+public:
+    typedef T ElementType;
+    static const size_t Dimension = 3;
+
+    T M[3][3];
+
+    enum NoInitType { NoInit };
+
+    // Construct with no memory initialization.
+    Matrix3(NoInitType) { }
+
+    // By default, we construct identity matrix.
+    Matrix3()
+    {
+        M[0][0] = M[1][1] = M[2][2] = T(1);
+        M[0][1] = M[1][0] = M[2][0] = T(0);
+        M[0][2] = M[1][2] = M[2][1] = T(0);
+    }
+
+    Matrix3(T m11, T m12, T m13,
+            T m21, T m22, T m23,
+            T m31, T m32, T m33)
+    {
+        M[0][0] = m11; M[0][1] = m12; M[0][2] = m13;
+        M[1][0] = m21; M[1][1] = m22; M[1][2] = m23;
+        M[2][0] = m31; M[2][1] = m32; M[2][2] = m33;
+    }
+    
+    // Construction from X, Y, Z basis vectors
+    Matrix3(const Vector3<T>& xBasis, const Vector3<T>& yBasis, const Vector3<T>& zBasis)
+    {
+        M[0][0] = xBasis.x; M[0][1] = yBasis.x; M[0][2] = zBasis.x;
+        M[1][0] = xBasis.y; M[1][1] = yBasis.y; M[1][2] = zBasis.y;
+        M[2][0] = xBasis.z; M[2][1] = yBasis.z; M[2][2] = zBasis.z;
+    }
+
+    explicit Matrix3(const Quat<T>& q)
+    {
+        OVR_MATH_ASSERT(q.IsNormalized());
+        const T tx  = q.x+q.x,  ty  = q.y+q.y,  tz  = q.z+q.z;
+        const T twx = q.w*tx,   twy = q.w*ty,   twz = q.w*tz;
+        const T txx = q.x*tx,   txy = q.x*ty,   txz = q.x*tz;
+        const T tyy = q.y*ty,   tyz = q.y*tz,   tzz = q.z*tz;
+        M[0][0] = T(1) - (tyy + tzz);    M[0][1] = txy - twz;            M[0][2] = txz + twy;
+        M[1][0] = txy + twz;            M[1][1] = T(1) - (txx + tzz);    M[1][2] = tyz - twx;
+        M[2][0] = txz - twy;            M[2][1] = tyz + twx;            M[2][2] = T(1) - (txx + tyy);
+    }
+    
+    inline explicit Matrix3(T s)
+    {
+        M[0][0] = M[1][1] = M[2][2] = s;
+        M[0][1] = M[0][2] = M[1][0] = M[1][2] = M[2][0] = M[2][1] = T(0);
+    }
+
+    Matrix3(T m11, T m22, T m33)
+    {
+        M[0][0] = m11; M[0][1] = T(0); M[0][2] = T(0);
+        M[1][0] = T(0); M[1][1] = m22; M[1][2] = T(0);
+        M[2][0] = T(0); M[2][1] = T(0); M[2][2] = m33;
+    }
+
+    explicit Matrix3(const Matrix3<typename Math<T>::OtherFloatType> &src)
+    {
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                M[i][j] = (T)src.M[i][j];
+    }
+
+    // C-interop support.
+    Matrix3(const typename CompatibleTypes<Matrix3<T> >::Type& s) 
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix3), "sizeof(s) == sizeof(Matrix3)");
+        memcpy(M, s.M, sizeof(M));
+    }
+
+    operator const typename CompatibleTypes<Matrix3<T> >::Type () const
+    {
+        typename CompatibleTypes<Matrix3<T> >::Type result;
+        OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix3), "sizeof(result) == sizeof(Matrix3)");
+        memcpy(result.M, M, sizeof(M));
+        return result;
+    }
+
+    T  operator()(int i, int j) const { return M[i][j]; }
+    T& operator()(int i, int j)       { return M[i][j]; }
+
+    void ToString(char* dest, size_t destsize) const
+    {
+        size_t pos = 0;
+        for (int r=0; r<3; r++)
+        {
+            for (int c=0; c<3; c++)
+                pos += OVRMath_sprintf(dest+pos, destsize-pos, "%g ", M[r][c]);
+        }
+    }
+
+    static Matrix3 FromString(const char* src)
+    {
+        Matrix3 result;
+        if (src)
+        {
+            for (int r=0; r<3; r++)
+            {
+                for (int c=0; c<3; c++)
+                {
+                    result.M[r][c] = (T)atof(src);
+                    while (*src && *src != ' ')
+                        src++;
+                    while (*src && *src == ' ')
+                        src++;
+                }
+            }
+        }
+        return result;
+    }
+
+    static Matrix3 Identity()  { return Matrix3(); }
+
+    void SetIdentity()
+    {
+        M[0][0] = M[1][1] = M[2][2] = T(1);
+        M[0][1] = M[1][0] = M[2][0] = T(0);
+        M[0][2] = M[1][2] = M[2][1] = T(0);
+    }
+
+    static Matrix3 Diagonal(T m00, T m11, T m22)
+    {
+        return Matrix3(m00, 0, 0,
+            0, m11, 0,
+            0, 0, m22);
+    }
+    static Matrix3 Diagonal(const Vector3<T>& v) { return Diagonal(v.x, v.y, v.z); }
+
+    T Trace() const { return M[0][0] + M[1][1] + M[2][2]; }
+    
+    bool operator== (const Matrix3& b) const
+    {
+        bool isEqual = true;
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                isEqual &= (M[i][j] == b.M[i][j]);
+        }
+
+        return isEqual;
+    }
+
+    Matrix3 operator+ (const Matrix3& b) const
+    {
+        Matrix3<T> result(*this);
+        result += b;
+        return result;
+    }
+
+    Matrix3& operator+= (const Matrix3& b)
+    {
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                M[i][j] += b.M[i][j];
+        return *this;
+    }
+
+    void operator= (const Matrix3& b)
+    {
+        for (int i = 0; i < 3; i++)
+            for (int j = 0; j < 3; j++)
+                M[i][j] = b.M[i][j];
+    }
+
+    Matrix3 operator- (const Matrix3& b) const
+    {
+        Matrix3 result(*this);
+        result -= b;
+        return result;
+    }
+
+    Matrix3& operator-= (const Matrix3& b)
+    {
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                M[i][j] -= b.M[i][j];
+        }
+
+        return *this;
+    }
+
+    // Multiplies two matrices into destination with minimum copying.
+    static Matrix3& Multiply(Matrix3* d, const Matrix3& a, const Matrix3& b)
+    {
+        OVR_MATH_ASSERT((d != &a) && (d != &b));
+        int i = 0;
+        do {
+            d->M[i][0] = a.M[i][0] * b.M[0][0] + a.M[i][1] * b.M[1][0] + a.M[i][2] * b.M[2][0];
+            d->M[i][1] = a.M[i][0] * b.M[0][1] + a.M[i][1] * b.M[1][1] + a.M[i][2] * b.M[2][1];
+            d->M[i][2] = a.M[i][0] * b.M[0][2] + a.M[i][1] * b.M[1][2] + a.M[i][2] * b.M[2][2];
+        } while((++i) < 3);
+
+        return *d;
+    }
+
+    Matrix3 operator* (const Matrix3& b) const
+    {
+        Matrix3 result(Matrix3::NoInit);
+        Multiply(&result, *this, b);
+        return result;
+    }
+
+    Matrix3& operator*= (const Matrix3& b)
+    {
+        return Multiply(this, Matrix3(*this), b);
+    }
+
+    Matrix3 operator* (T s) const
+    {
+        Matrix3 result(*this);
+        result *= s;
+        return result;
+    }
+
+    Matrix3& operator*= (T s)
+    {
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                M[i][j] *= s;
+        }
+
+        return *this;
+    }
+
+    Vector3<T> operator* (const Vector3<T> &b) const
+    {
+        Vector3<T> result;
+        result.x = M[0][0]*b.x + M[0][1]*b.y + M[0][2]*b.z;
+        result.y = M[1][0]*b.x + M[1][1]*b.y + M[1][2]*b.z;
+        result.z = M[2][0]*b.x + M[2][1]*b.y + M[2][2]*b.z;
+
+        return result;
+    }
+
+    Matrix3 operator/ (T s) const
+    {
+        Matrix3 result(*this);
+        result /= s;
+        return result;
+    }
+
+    Matrix3& operator/= (T s)
+    {
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+                M[i][j] /= s;
+        }
+
+        return *this;
+    }
+
+    Vector2<T> Transform(const Vector2<T>& v) const
+    {
+        const T rcpZ = T(1) / (M[2][0] * v.x + M[2][1] * v.y + M[2][2]);
+        return Vector2<T>((M[0][0] * v.x + M[0][1] * v.y + M[0][2]) * rcpZ,
+                          (M[1][0] * v.x + M[1][1] * v.y + M[1][2]) * rcpZ);
+    }
+
+    Vector3<T> Transform(const Vector3<T>& v) const
+    {
+        return Vector3<T>(M[0][0] * v.x + M[0][1] * v.y + M[0][2] * v.z,
+                          M[1][0] * v.x + M[1][1] * v.y + M[1][2] * v.z,
+                          M[2][0] * v.x + M[2][1] * v.y + M[2][2] * v.z);
+    }
+
+    Matrix3 Transposed() const
+    {
+        return Matrix3(M[0][0], M[1][0], M[2][0],
+                       M[0][1], M[1][1], M[2][1],
+                       M[0][2], M[1][2], M[2][2]);
+    }
+
+    void     Transpose()
+    {
+        *this = Transposed();
+    }
+
+
+    T SubDet (const size_t* rows, const size_t* cols) const
+    {
+        return M[rows[0]][cols[0]] * (M[rows[1]][cols[1]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[1]])
+             - M[rows[0]][cols[1]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[2]] - M[rows[1]][cols[2]] * M[rows[2]][cols[0]])
+             + M[rows[0]][cols[2]] * (M[rows[1]][cols[0]] * M[rows[2]][cols[1]] - M[rows[1]][cols[1]] * M[rows[2]][cols[0]]);
+    }
+
+    
+    // M += a*b.t()
+    inline void Rank1Add(const Vector3<T> &a, const Vector3<T> &b)
+    {
+        M[0][0] += a.x*b.x;        M[0][1] += a.x*b.y;        M[0][2] += a.x*b.z;
+        M[1][0] += a.y*b.x;        M[1][1] += a.y*b.y;        M[1][2] += a.y*b.z;
+        M[2][0] += a.z*b.x;        M[2][1] += a.z*b.y;        M[2][2] += a.z*b.z;
+    }
+
+    // M -= a*b.t()
+    inline void Rank1Sub(const Vector3<T> &a, const Vector3<T> &b)
+    {
+        M[0][0] -= a.x*b.x;        M[0][1] -= a.x*b.y;        M[0][2] -= a.x*b.z;
+        M[1][0] -= a.y*b.x;        M[1][1] -= a.y*b.y;        M[1][2] -= a.y*b.z;
+        M[2][0] -= a.z*b.x;        M[2][1] -= a.z*b.y;        M[2][2] -= a.z*b.z;
+    }
+
+    inline Vector3<T> Col(int c) const
+    {
+        return Vector3<T>(M[0][c], M[1][c], M[2][c]);
+    }
+
+    inline Vector3<T> Row(int r) const
+    {
+        return Vector3<T>(M[r][0], M[r][1], M[r][2]);
+    }
+
+    inline Vector3<T> GetColumn(int c) const
+    {
+        return Vector3<T>(M[0][c], M[1][c], M[2][c]);
+    }
+
+    inline Vector3<T> GetRow(int r) const
+    {
+        return Vector3<T>(M[r][0], M[r][1], M[r][2]);
+    }
+
+    inline void SetColumn(int c, const Vector3<T>& v)
+    {
+        M[0][c] = v.x;
+        M[1][c] = v.y;
+        M[2][c] = v.z;
+    }
+
+    inline void SetRow(int r, const Vector3<T>& v)
+    {
+        M[r][0] = v.x;
+        M[r][1] = v.y;
+        M[r][2] = v.z;
+    }
+
+    inline T Determinant() const
+    {
+        const Matrix3<T>& m = *this;
+        T d; 
+
+        d  = m.M[0][0] * (m.M[1][1]*m.M[2][2] - m.M[1][2] * m.M[2][1]);
+        d -= m.M[0][1] * (m.M[1][0]*m.M[2][2] - m.M[1][2] * m.M[2][0]);
+        d += m.M[0][2] * (m.M[1][0]*m.M[2][1] - m.M[1][1] * m.M[2][0]);
+
+        return d;
+    }
+    
+    inline Matrix3<T> Inverse() const
+    {
+        Matrix3<T> a;
+        const  Matrix3<T>& m = *this;
+        T d = Determinant();
+
+        OVR_MATH_ASSERT(d != 0);
+        T s = T(1)/d;
+
+        a.M[0][0] = s * (m.M[1][1] * m.M[2][2] - m.M[1][2] * m.M[2][1]);   
+        a.M[1][0] = s * (m.M[1][2] * m.M[2][0] - m.M[1][0] * m.M[2][2]);   
+        a.M[2][0] = s * (m.M[1][0] * m.M[2][1] - m.M[1][1] * m.M[2][0]);   
+
+        a.M[0][1] = s * (m.M[0][2] * m.M[2][1] - m.M[0][1] * m.M[2][2]);   
+        a.M[1][1] = s * (m.M[0][0] * m.M[2][2] - m.M[0][2] * m.M[2][0]);   
+        a.M[2][1] = s * (m.M[0][1] * m.M[2][0] - m.M[0][0] * m.M[2][1]);   
+        
+        a.M[0][2] = s * (m.M[0][1] * m.M[1][2] - m.M[0][2] * m.M[1][1]);   
+        a.M[1][2] = s * (m.M[0][2] * m.M[1][0] - m.M[0][0] * m.M[1][2]);   
+        a.M[2][2] = s * (m.M[0][0] * m.M[1][1] - m.M[0][1] * m.M[1][0]);   
+        
+        return a;
+    }
+    
+    // Outer Product of two column vectors: a * b.Transpose()
+    static Matrix3 OuterProduct(const Vector3<T>& a, const Vector3<T>& b)
+    {
+        return Matrix3(a.x*b.x, a.x*b.y, a.x*b.z,
+                       a.y*b.x, a.y*b.y, a.y*b.z,
+                       a.z*b.x, a.z*b.y, a.z*b.z);
+    }
+
+    // Vector cross product as a premultiply matrix:
+    // L.Cross(R) = LeftCrossAsMatrix(L) * R
+    static Matrix3 LeftCrossAsMatrix(const Vector3<T>& L)
+    {
+        return Matrix3(
+            T(0), -L.z, +L.y,
+            +L.z, T(0), -L.x,
+            -L.y, +L.x, T(0));
+    }
+
+    // Vector cross product as a premultiply matrix:
+    // L.Cross(R) = RightCrossAsMatrix(R) * L
+    static Matrix3 RightCrossAsMatrix(const Vector3<T>& R)
+    {
+        return Matrix3(
+            T(0), +R.z, -R.y,
+            -R.z, T(0), +R.x,
+            +R.y, -R.x, T(0));
+    }
+
+    // Angle in radians of a rotation matrix
+    // Uses identity trace(a) = 2*cos(theta) + 1
+    T Angle() const
+    {
+        return Acos((Trace() - T(1)) * T(0.5));
+    }
+
+    // Angle in radians between two rotation matrices
+    T Angle(const Matrix3& b) const
+    {
+        // Compute trace of (this->Transposed() * b)
+        // This works out to sum of products of elements.
+        T trace = T(0);
+        for (int i = 0; i < 3; i++)
+        {
+            for (int j = 0; j < 3; j++)
+            {
+                trace += M[i][j] * b.M[i][j];
+            }
+        }
+        return Acos((trace - T(1)) * T(0.5));
+    }
+};
+
+typedef Matrix3<float>  Matrix3f;
+typedef Matrix3<double> Matrix3d;
+
+//-------------------------------------------------------------------------------------
+// ***** Matrix2
+
+template<class T>
+class Matrix2
+{
+public:
+    typedef T ElementType;
+    static const size_t Dimension = 2;
+
+    T M[2][2];
+
+    enum NoInitType { NoInit };
+
+    // Construct with no memory initialization.
+    Matrix2(NoInitType) { }
+
+    // By default, we construct identity matrix.
+    Matrix2()
+    {
+        M[0][0] = M[1][1] = T(1);
+        M[0][1] = M[1][0] = T(0);
+    }
+
+    Matrix2(T m11, T m12,
+            T m21, T m22)
+    {
+        M[0][0] = m11; M[0][1] = m12;
+        M[1][0] = m21; M[1][1] = m22;
+    }
+
+    // Construction from X, Y basis vectors
+    Matrix2(const Vector2<T>& xBasis, const Vector2<T>& yBasis)
+    {
+        M[0][0] = xBasis.x; M[0][1] = yBasis.x;
+        M[1][0] = xBasis.y; M[1][1] = yBasis.y;
+    }
+
+    explicit Matrix2(T s)
+    {
+        M[0][0] = M[1][1] = s;
+        M[0][1] = M[1][0] = T(0);
+    }
+
+    Matrix2(T m11, T m22)
+    {
+        M[0][0] = m11; M[0][1] = T(0);
+        M[1][0] = T(0);   M[1][1] = m22;
+    }
+
+    explicit Matrix2(const Matrix2<typename Math<T>::OtherFloatType> &src)
+    {
+        M[0][0] = T(src.M[0][0]); M[0][1] = T(src.M[0][1]);
+        M[1][0] = T(src.M[1][0]); M[1][1] = T(src.M[1][1]);
+    }
+
+    // C-interop support
+    Matrix2(const typename CompatibleTypes<Matrix2<T> >::Type& s)
+    {
+        OVR_MATH_STATIC_ASSERT(sizeof(s) == sizeof(Matrix2), "sizeof(s) == sizeof(Matrix2)");
+        memcpy(M, s.M, sizeof(M));
+    }
+
+    operator const typename CompatibleTypes<Matrix2<T> >::Type() const
+    {
+        typename CompatibleTypes<Matrix2<T> >::Type result;
+        OVR_MATH_STATIC_ASSERT(sizeof(result) == sizeof(Matrix2), "sizeof(result) == sizeof(Matrix2)");
+        memcpy(result.M, M, sizeof(M));
+        return result;
+    }
+
+    T  operator()(int i, int j) const { return M[i][j]; }
+    T& operator()(int i, int j)       { return M[i][j]; }
+    const T*  operator[](int i) const { return M[i]; }
+    T*  operator[](int i)             { return M[i]; }
+
+    static Matrix2 Identity()  { return Matrix2(); }
+
+    void SetIdentity()
+    {
+        M[0][0] = M[1][1] = T(1);
+        M[0][1] = M[1][0] = T(0);
+    }
+
+    static Matrix2 Diagonal(T m00, T m11)
+    {
+        return Matrix2(m00, m11);
+    }
+    static Matrix2 Diagonal(const Vector2<T>& v) { return Matrix2(v.x, v.y); }
+
+    T Trace() const { return M[0][0] + M[1][1]; }
+
+    bool operator== (const Matrix2& b) const
+    {
+        return M[0][0] == b.M[0][0] && M[0][1] == b.M[0][1] &&
+               M[1][0] == b.M[1][0] && M[1][1] == b.M[1][1];
+    }
+
+    Matrix2 operator+ (const Matrix2& b) const
+    {
+        return Matrix2(M[0][0] + b.M[0][0], M[0][1] + b.M[0][1],
+                       M[1][0] + b.M[1][0], M[1][1] + b.M[1][1]);
+    }
+
+    Matrix2& operator+= (const Matrix2& b)
+    {
+        M[0][0] += b.M[0][0]; M[0][1] += b.M[0][1];
+        M[1][0] += b.M[1][0]; M[1][1] += b.M[1][1];
+        return *this;
+    }
+
+    void operator= (const Matrix2& b)
+    {
+        M[0][0] = b.M[0][0]; M[0][1] = b.M[0][1];
+        M[1][0] = b.M[1][0]; M[1][1] = b.M[1][1];
+    }
+
+    Matrix2 operator- (const Matrix2& b) const
+    {
+        return Matrix2(M[0][0] - b.M[0][0], M[0][1] - b.M[0][1],
+                       M[1][0] - b.M[1][0], M[1][1] - b.M[1][1]);
+    }
+
+    Matrix2& operator-= (const Matrix2& b)
+    {
+        M[0][0] -= b.M[0][0]; M[0][1] -= b.M[0][1];
+        M[1][0] -= b.M[1][0]; M[1][1] -= b.M[1][1];
+        return *this;
+    }
+
+    Matrix2 operator* (const Matrix2& b) const
+    {
+        return Matrix2(M[0][0] * b.M[0][0] + M[0][1] * b.M[1][0], M[0][0] * b.M[0][1] + M[0][1] * b.M[1][1],
+                       M[1][0] * b.M[0][0] + M[1][1] * b.M[1][0], M[1][0] * b.M[0][1] + M[1][1] * b.M[1][1]);
+    }
+
+    Matrix2& operator*= (const Matrix2& b)
+    {
+        *this = *this * b;
+        return *this;
+    }
+
+    Matrix2 operator* (T s) const
+    {
+        return Matrix2(M[0][0] * s, M[0][1] * s,
+                       M[1][0] * s, M[1][1] * s);
+    }
+
+    Matrix2& operator*= (T s)
+    {
+        M[0][0] *= s; M[0][1] *= s;
+        M[1][0] *= s; M[1][1] *= s;
+        return *this;
+    }
+
+    Matrix2 operator/ (T s) const
+    {
+        return *this * (T(1) / s);
+    }
+
+    Matrix2& operator/= (T s)
+    {
+        return *this *= (T(1) / s);
+    }
+
+    Vector2<T> operator* (const Vector2<T> &b) const
+    {
+        return Vector2<T>(M[0][0] * b.x + M[0][1] * b.y,
+                          M[1][0] * b.x + M[1][1] * b.y);
+    }
+
+    Vector2<T> Transform(const Vector2<T>& v) const
+    {
+        return Vector2<T>(M[0][0] * v.x + M[0][1] * v.y,
+                          M[1][0] * v.x + M[1][1] * v.y);
+    }
+
+    Matrix2 Transposed() const
+    {
+        return Matrix2(M[0][0], M[1][0],
+                       M[0][1], M[1][1]);
+    }
+
+    void Transpose()
+    {
+        OVRMath_Swap(M[1][0], M[0][1]);
+    }
+
+    Vector2<T> GetColumn(int c) const
+    {
+        return Vector2<T>(M[0][c], M[1][c]);
+    }
+
+    Vector2<T> GetRow(int r) const
+    {
+        return Vector2<T>(M[r][0], M[r][1]);
+    }
+
+    void SetColumn(int c, const Vector2<T>& v)
+    {
+        M[0][c] = v.x;
+        M[1][c] = v.y;
+    }
+
+    void SetRow(int r, const Vector2<T>& v)
+    {
+        M[r][0] = v.x;
+        M[r][1] = v.y;
+    }
+
+    T Determinant() const
+    {
+        return M[0][0] * M[1][1] - M[0][1] * M[1][0];
+    }
+
+    Matrix2 Inverse() const
+    {
+        T rcpDet = T(1) / Determinant();
+        return Matrix2( M[1][1] * rcpDet, -M[0][1] * rcpDet,
+                       -M[1][0] * rcpDet,  M[0][0] * rcpDet);
+    }
+
+    // Outer Product of two column vectors: a * b.Transpose()
+    static Matrix2 OuterProduct(const Vector2<T>& a, const Vector2<T>& b)
+    {
+        return Matrix2(a.x*b.x, a.x*b.y,
+                       a.y*b.x, a.y*b.y);
+    }
+
+    // Angle in radians between two rotation matrices
+    T Angle(const Matrix2& b) const
+    {
+        const Matrix2& a = *this;
+        return Acos(a(0, 0)*b(0, 0) + a(1, 0)*b(1, 0));
+    }
+};
+
+typedef Matrix2<float>  Matrix2f;
+typedef Matrix2<double> Matrix2d;
+
+//-------------------------------------------------------------------------------------
+
+template<class T>
+class SymMat3
+{
+private:
+    typedef SymMat3<T> this_type;
+
+public:
+    typedef T Value_t;
+    // Upper symmetric
+    T v[6]; // _00 _01 _02 _11 _12 _22
+
+    inline SymMat3() {}
+
+    inline explicit SymMat3(T s)
+    {
+        v[0] = v[3] = v[5] = s;
+        v[1] = v[2] = v[4] = T(0);
+    }
+
+    inline explicit SymMat3(T a00, T a01, T a02, T a11, T a12, T a22)
+    {
+        v[0] = a00; v[1] = a01; v[2] = a02;
+        v[3] = a11; v[4] = a12;
+        v[5] = a22;
+    }
+
+    // Cast to symmetric Matrix3
+    operator Matrix3<T>() const
+    {
+        return Matrix3<T>(v[0], v[1], v[2],
+                          v[1], v[3], v[4],
+                          v[2], v[4], v[5]);
+    }
+
+    static inline int Index(unsigned int i, unsigned int j)
+    {
+        return (i <= j) ? (3*i - i*(i+1)/2 + j) : (3*j - j*(j+1)/2 + i);
+    }
+
+    inline T operator()(int i, int j) const { return v[Index(i,j)]; }
+    
+    inline T &operator()(int i, int j) { return v[Index(i,j)]; }
+
+    inline this_type& operator+=(const this_type& b)
+    {
+        v[0]+=b.v[0];
+        v[1]+=b.v[1];
+        v[2]+=b.v[2];
+        v[3]+=b.v[3];
+        v[4]+=b.v[4];
+        v[5]+=b.v[5];
+        return *this;
+    }
+
+    inline this_type& operator-=(const this_type& b)
+    {
+        v[0]-=b.v[0];
+        v[1]-=b.v[1];
+        v[2]-=b.v[2];
+        v[3]-=b.v[3];
+        v[4]-=b.v[4];
+        v[5]-=b.v[5];
+
+        return *this;
+    }
+
+    inline this_type& operator*=(T s)
+    {
+        v[0]*=s;
+        v[1]*=s;
+        v[2]*=s;
+        v[3]*=s;
+        v[4]*=s;
+        v[5]*=s;
+
+        return *this;
+    }
+        
+    inline SymMat3 operator*(T s) const
+    {
+        SymMat3 d;
+        d.v[0] = v[0]*s; 
+        d.v[1] = v[1]*s; 
+        d.v[2] = v[2]*s; 
+        d.v[3] = v[3]*s; 
+        d.v[4] = v[4]*s; 
+        d.v[5] = v[5]*s; 
+                        
+        return d;
+    }
+
+    // Multiplies two matrices into destination with minimum copying.
+    static SymMat3& Multiply(SymMat3* d, const SymMat3& a, const SymMat3& b)
+    {        
+        // _00 _01 _02 _11 _12 _22
+
+        d->v[0] = a.v[0] * b.v[0];
+        d->v[1] = a.v[0] * b.v[1] + a.v[1] * b.v[3];
+        d->v[2] = a.v[0] * b.v[2] + a.v[1] * b.v[4];
+                    
+        d->v[3] = a.v[3] * b.v[3];
+        d->v[4] = a.v[3] * b.v[4] + a.v[4] * b.v[5];
+                
+        d->v[5] = a.v[5] * b.v[5];
+    
+        return *d;
+    }
+    
+    inline T Determinant() const
+    {
+        const this_type& m = *this;
+        T d; 
+
+        d  = m(0,0) * (m(1,1)*m(2,2) - m(1,2) * m(2,1));
+        d -= m(0,1) * (m(1,0)*m(2,2) - m(1,2) * m(2,0));
+        d += m(0,2) * (m(1,0)*m(2,1) - m(1,1) * m(2,0));
+
+        return d;
+    }
+
+    inline this_type Inverse() const
+    {
+        this_type a;
+        const this_type& m = *this;
+        T d = Determinant();
+
+        OVR_MATH_ASSERT(d != 0);
+        T s = T(1)/d;
+
+        a(0,0) = s * (m(1,1) * m(2,2) - m(1,2) * m(2,1));   
+
+        a(0,1) = s * (m(0,2) * m(2,1) - m(0,1) * m(2,2));   
+        a(1,1) = s * (m(0,0) * m(2,2) - m(0,2) * m(2,0));   
+
+        a(0,2) = s * (m(0,1) * m(1,2) - m(0,2) * m(1,1));   
+        a(1,2) = s * (m(0,2) * m(1,0) - m(0,0) * m(1,2));   
+        a(2,2) = s * (m(0,0) * m(1,1) - m(0,1) * m(1,0));   
+
+        return a;
+    }
+
+    inline T Trace() const { return v[0] + v[3] + v[5]; }
+
+    // M = a*a.t()
+    inline void Rank1(const Vector3<T> &a)
+    {
+        v[0] = a.x*a.x; v[1] = a.x*a.y; v[2] = a.x*a.z;
+        v[3] = a.y*a.y; v[4] = a.y*a.z;
+        v[5] = a.z*a.z;
+    }
+
+    // M += a*a.t()
+    inline void Rank1Add(const Vector3<T> &a)
+    {
+        v[0] += a.x*a.x; v[1] += a.x*a.y; v[2] += a.x*a.z;
+        v[3] += a.y*a.y; v[4] += a.y*a.z;
+        v[5] += a.z*a.z;
+    }
+
+    // M -= a*a.t()
+    inline void Rank1Sub(const Vector3<T> &a)
+    {
+        v[0] -= a.x*a.x; v[1] -= a.x*a.y; v[2] -= a.x*a.z;
+        v[3] -= a.y*a.y; v[4] -= a.y*a.z;
+        v[5] -= a.z*a.z;
+    }
+};
+
+typedef SymMat3<float>  SymMat3f;
+typedef SymMat3<double> SymMat3d;
+
+template<class T>
+inline Matrix3<T> operator*(const SymMat3<T>& a, const SymMat3<T>& b)
+{
+    #define AJB_ARBC(r,c) (a(r,0)*b(0,c)+a(r,1)*b(1,c)+a(r,2)*b(2,c))
+    return Matrix3<T>(
+        AJB_ARBC(0,0), AJB_ARBC(0,1), AJB_ARBC(0,2),
+        AJB_ARBC(1,0), AJB_ARBC(1,1), AJB_ARBC(1,2),
+        AJB_ARBC(2,0), AJB_ARBC(2,1), AJB_ARBC(2,2));
+    #undef AJB_ARBC
+}
+
+template<class T>
+inline Matrix3<T> operator*(const Matrix3<T>& a, const SymMat3<T>& b)
+{
+    #define AJB_ARBC(r,c) (a(r,0)*b(0,c)+a(r,1)*b(1,c)+a(r,2)*b(2,c))
+    return Matrix3<T>(
+        AJB_ARBC(0,0), AJB_ARBC(0,1), AJB_ARBC(0,2),
+        AJB_ARBC(1,0), AJB_ARBC(1,1), AJB_ARBC(1,2),
+        AJB_ARBC(2,0), AJB_ARBC(2,1), AJB_ARBC(2,2));
+    #undef AJB_ARBC
+}
+
+//-------------------------------------------------------------------------------------
+// ***** Angle
+
+// Cleanly representing the algebra of 2D rotations.
+// The operations maintain the angle between -Pi and Pi, the same range as atan2.
+
+template<class T>
+class Angle
+{
+public:
+    enum AngularUnits
+    {
+        Radians = 0,
+        Degrees = 1
+    };
+
+    Angle() : a(0) {}
+    
+    // Fix the range to be between -Pi and Pi
+    Angle(T a_, AngularUnits u = Radians) : a((u == Radians) ? a_ : a_*((T)MATH_DOUBLE_DEGREETORADFACTOR)) { FixRange(); }
+
+    T    Get(AngularUnits u = Radians) const       { return (u == Radians) ? a : a*((T)MATH_DOUBLE_RADTODEGREEFACTOR); }
+    void Set(const T& x, AngularUnits u = Radians) { a = (u == Radians) ? x : x*((T)MATH_DOUBLE_DEGREETORADFACTOR); FixRange(); }
+    int Sign() const                               { if (a == 0) return 0; else return (a > 0) ? 1 : -1; }
+    T   Abs() const                                { return (a >= 0) ? a : -a; }
+
+    bool operator== (const Angle& b) const    { return a == b.a; }
+    bool operator!= (const Angle& b) const    { return a != b.a; }
+//    bool operator<  (const Angle& b) const    { return a < a.b; } 
+//    bool operator>  (const Angle& b) const    { return a > a.b; } 
+//    bool operator<= (const Angle& b) const    { return a <= a.b; } 
+//    bool operator>= (const Angle& b) const    { return a >= a.b; } 
+//    bool operator= (const T& x)               { a = x; FixRange(); }
+
+    // These operations assume a is already between -Pi and Pi.
+    Angle& operator+= (const Angle& b)        { a = a + b.a; FastFixRange(); return *this; }
+    Angle& operator+= (const T& x)            { a = a + x; FixRange(); return *this; }
+    Angle  operator+  (const Angle& b) const  { Angle res = *this; res += b; return res; }
+    Angle  operator+  (const T& x) const      { Angle res = *this; res += x; return res; }
+    Angle& operator-= (const Angle& b)        { a = a - b.a; FastFixRange(); return *this; }
+    Angle& operator-= (const T& x)            { a = a - x; FixRange(); return *this; }
+    Angle  operator-  (const Angle& b) const  { Angle res = *this; res -= b; return res; }
+    Angle  operator-  (const T& x) const      { Angle res = *this; res -= x; return res; }
+    
+    T   Distance(const Angle& b)              { T c = fabs(a - b.a); return (c <= ((T)MATH_DOUBLE_PI)) ? c : ((T)MATH_DOUBLE_TWOPI) - c; }
+
+private:
+
+    // The stored angle, which should be maintained between -Pi and Pi
+    T a;
+
+    // Fixes the angle range to [-Pi,Pi], but assumes no more than 2Pi away on either side 
+    inline void FastFixRange()
+    {
+        if (a < -((T)MATH_DOUBLE_PI))
+            a += ((T)MATH_DOUBLE_TWOPI);
+        else if (a > ((T)MATH_DOUBLE_PI))
+            a -= ((T)MATH_DOUBLE_TWOPI);
+    }
+
+    // Fixes the angle range to [-Pi,Pi] for any given range, but slower then the fast method
+    inline void FixRange()
+    {
+        // do nothing if the value is already in the correct range, since fmod call is expensive
+        if (a >= -((T)MATH_DOUBLE_PI) && a <= ((T)MATH_DOUBLE_PI))
+            return;
+        a = fmod(a,((T)MATH_DOUBLE_TWOPI));
+        if (a < -((T)MATH_DOUBLE_PI))
+            a += ((T)MATH_DOUBLE_TWOPI);
+        else if (a > ((T)MATH_DOUBLE_PI))
+            a -= ((T)MATH_DOUBLE_TWOPI);
+    }
+};
+
+
+typedef Angle<float>  Anglef;
+typedef Angle<double> Angled;
+
+
+//-------------------------------------------------------------------------------------
+// ***** Plane
+
+// Consists of a normal vector and distance from the origin where the plane is located.
+
+template<class T>
+class Plane
+{
+public:
+    Vector3<T> N;
+    T          D;
+
+    Plane() : D(0) {}
+
+    // Normals must already be normalized
+    Plane(const Vector3<T>& n, T d) : N(n), D(d) {}
+    Plane(T x, T y, T z, T d) : N(x,y,z), D(d) {}
+
+    // construct from a point on the plane and the normal
+    Plane(const Vector3<T>& p, const Vector3<T>& n) : N(n), D(-(p * n)) {}
+
+    // Find the point to plane distance. The sign indicates what side of the plane the point is on (0 = point on plane).
+    T TestSide(const Vector3<T>& p) const
+    {
+        return (N.Dot(p)) + D;
+    }
+
+    Plane<T> Flipped() const
+    {
+        return Plane(-N, -D);
+    }
+
+    void Flip()
+    {
+        N = -N;
+        D = -D;
+    }
+
+    bool operator==(const Plane<T>& rhs) const
+    {
+        return (this->D == rhs.D && this->N == rhs.N);
+    }
+};
+
+typedef Plane<float> Planef;
+typedef Plane<double> Planed;
+
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** ScaleAndOffset2D
+
+struct ScaleAndOffset2D
+{
+    Vector2f Scale;
+    Vector2f Offset;
+
+    ScaleAndOffset2D(float sx = 0.0f, float sy = 0.0f, float ox = 0.0f, float oy = 0.0f)
+        : Scale(sx, sy), Offset(ox, oy)        
+    { }
+};
+
+
+//-----------------------------------------------------------------------------------
+// ***** FovPort
+
+// FovPort describes Field Of View (FOV) of a viewport.
+// This class has values for up, down, left and right, stored in 
+// tangent of the angle units to simplify calculations.
+//
+// As an example, for a standard 90 degree vertical FOV, we would 
+// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }.
+//
+// CreateFromRadians/Degrees helper functions can be used to
+// access FOV in different units.
+
+
+// ***** FovPort
+
+struct FovPort
+{
+    float UpTan;
+    float DownTan;
+    float LeftTan;
+    float RightTan;
+
+    FovPort ( float sideTan = 0.0f ) :
+        UpTan(sideTan), DownTan(sideTan), LeftTan(sideTan), RightTan(sideTan) { }
+    FovPort ( float u, float d, float l, float r ) :
+        UpTan(u), DownTan(d), LeftTan(l), RightTan(r) { }
+
+    // C-interop support: FovPort <-> ovrFovPort (implementation in OVR_CAPI.cpp).
+    FovPort(const ovrFovPort &src)
+        : UpTan(src.UpTan), DownTan(src.DownTan), LeftTan(src.LeftTan), RightTan(src.RightTan)
+    { }    
+
+    operator ovrFovPort () const
+    {
+        ovrFovPort result;
+        result.LeftTan  = LeftTan;
+        result.RightTan = RightTan;
+        result.UpTan    = UpTan;
+        result.DownTan  = DownTan;
+        return result;
+    }
+
+    static FovPort CreateFromRadians(float horizontalFov, float verticalFov)
+    {
+        FovPort result;
+        result.UpTan    = tanf (   verticalFov * 0.5f );
+        result.DownTan  = tanf (   verticalFov * 0.5f );
+        result.LeftTan  = tanf ( horizontalFov * 0.5f );
+        result.RightTan = tanf ( horizontalFov * 0.5f );
+        return result;
+    }
+
+    static FovPort CreateFromDegrees(float horizontalFovDegrees,
+                                     float verticalFovDegrees)
+    {
+        return CreateFromRadians(DegreeToRad(horizontalFovDegrees),
+                                 DegreeToRad(verticalFovDegrees));
+    }
+
+    //  Get Horizontal/Vertical components of Fov in radians.
+    float GetVerticalFovRadians() const     { return atanf(UpTan)    + atanf(DownTan); }
+    float GetHorizontalFovRadians() const   { return atanf(LeftTan)  + atanf(RightTan); }
+    //  Get Horizontal/Vertical components of Fov in degrees.
+    float GetVerticalFovDegrees() const     { return RadToDegree(GetVerticalFovRadians()); }
+    float GetHorizontalFovDegrees() const   { return RadToDegree(GetHorizontalFovRadians()); }
+
+    // Compute maximum tangent value among all four sides.
+    float GetMaxSideTan() const
+    {
+        return OVRMath_Max(OVRMath_Max(UpTan, DownTan), OVRMath_Max(LeftTan, RightTan));
+    }
+
+    static ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov ( FovPort tanHalfFov )
+    {
+        float projXScale = 2.0f / ( tanHalfFov.LeftTan + tanHalfFov.RightTan );
+        float projXOffset = ( tanHalfFov.LeftTan - tanHalfFov.RightTan ) * projXScale * 0.5f;
+        float projYScale = 2.0f / ( tanHalfFov.UpTan + tanHalfFov.DownTan );
+        float projYOffset = ( tanHalfFov.UpTan - tanHalfFov.DownTan ) * projYScale * 0.5f;
+
+        ScaleAndOffset2D result;
+        result.Scale    = Vector2f(projXScale, projYScale);
+        result.Offset   = Vector2f(projXOffset, projYOffset);
+        // Hey - why is that Y.Offset negated?
+        // It's because a projection matrix transforms from world coords with Y=up,
+        // whereas this is from NDC which is Y=down.
+
+        return result;
+    }
+
+    // Converts Fov Tan angle units to [-1,1] render target NDC space
+    Vector2f TanAngleToRendertargetNDC(Vector2f const &tanEyeAngle)
+    {  
+        ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(*this);
+        return tanEyeAngle * eyeToSourceNDC.Scale + eyeToSourceNDC.Offset;
+    }
+
+    // Compute per-channel minimum and maximum of Fov.
+    static FovPort Min(const FovPort& a, const FovPort& b)
+    {   
+        FovPort fov( OVRMath_Min( a.UpTan   , b.UpTan    ),   
+                     OVRMath_Min( a.DownTan , b.DownTan  ),
+                     OVRMath_Min( a.LeftTan , b.LeftTan  ),
+                     OVRMath_Min( a.RightTan, b.RightTan ) );
+        return fov;
+    }
+
+    static FovPort Max(const FovPort& a, const FovPort& b)
+    {   
+        FovPort fov( OVRMath_Max( a.UpTan   , b.UpTan    ),   
+                     OVRMath_Max( a.DownTan , b.DownTan  ),
+                     OVRMath_Max( a.LeftTan , b.LeftTan  ),
+                     OVRMath_Max( a.RightTan, b.RightTan ) );
+        return fov;
+    }
+};
+
+
+} // Namespace OVR
+
+
+#if defined(_MSC_VER)
+    #pragma warning(pop)
+#endif
+
+
+#endif

+ 70 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/Extras/OVR_StereoProjection.h

@@ -0,0 +1,70 @@
+/************************************************************************************
+
+Filename    :   OVR_StereoProjection.h
+Content     :   Stereo projection functions
+Created     :   November 30, 2013
+Authors     :   Tom Fosyth
+
+Copyright   :   Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License"); 
+you may not use the Oculus VR Rift SDK except in compliance with the License, 
+which is provided at the time of installation or download, or which 
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.3 
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK 
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef OVR_StereoProjection_h
+#define OVR_StereoProjection_h
+
+
+#include "Extras/OVR_Math.h"
+
+
+namespace OVR {
+
+
+//-----------------------------------------------------------------------------------
+// ***** Stereo Enumerations
+
+// StereoEye specifies which eye we are rendering for; it is used to
+// retrieve StereoEyeParams.
+enum StereoEye
+{
+    StereoEye_Left,
+    StereoEye_Right,
+    StereoEye_Center
+};
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Propjection functions
+
+Matrix4f            CreateProjection ( bool rightHanded, bool isOpenGL, FovPort fov, StereoEye eye,
+                                       float zNear = 0.01f, float zFar = 10000.0f,
+                                       bool flipZ = false, bool farAtInfinity = false);
+
+Matrix4f            CreateOrthoSubProjection ( bool rightHanded, StereoEye eyeType,
+                                               float tanHalfFovX, float tanHalfFovY,
+                                               float unitsX, float unitsY, float distanceFromCamera,
+                                               float interpupillaryDistance, Matrix4f const &projection,
+                                               float zNear = 0.0f, float zFar = 0.0f,
+                                               bool flipZ = false, bool farAtInfinity = false);
+
+ScaleAndOffset2D    CreateNDCScaleAndOffsetFromFov ( FovPort fov );
+
+
+} //namespace OVR
+
+#endif // OVR_StereoProjection_h

+ 2103 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_CAPI.h

@@ -0,0 +1,2103 @@
+/********************************************************************************//**
+\file      OVR_CAPI.h
+\brief     C Interface to the Oculus PC SDK tracking and rendering library.
+\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_h  //   We don't use version numbers within this name, as all versioned variations of this file are currently mutually exclusive.
+#define OVR_CAPI_h  ///< Header include guard
+
+
+#include "OVR_CAPI_Keys.h"
+#include "OVR_Version.h"
+#include "OVR_ErrorCode.h"
+
+
+#include <stdint.h>
+
+#if defined(_MSC_VER)
+    #pragma warning(push)
+    #pragma warning(disable: 4324) // structure was padded due to __declspec(align())
+    #pragma warning(disable: 4359) // The alignment specified for a type is less than the alignment of the type of one of its data members
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_OS
+//
+#if !defined(OVR_OS_WIN32) && defined(_WIN32)
+    #define OVR_OS_WIN32
+#endif
+
+#if !defined(OVR_OS_MAC) && defined(__APPLE__)
+    #define OVR_OS_MAC
+#endif
+
+#if !defined(OVR_OS_LINUX) && defined(__linux__)
+    #define OVR_OS_LINUX
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_CPP
+//
+#if !defined(OVR_CPP)
+    #if defined(__cplusplus)
+        #define OVR_CPP(x) x
+    #else
+        #define OVR_CPP(x) /* Not C++ */
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_CDECL
+//
+/// LibOVR calling convention for 32-bit Windows builds.
+//
+#if !defined(OVR_CDECL)
+    #if defined(_WIN32)
+        #define OVR_CDECL __cdecl
+    #else
+        #define OVR_CDECL
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_EXTERN_C
+//
+/// Defined as extern "C" when built from C++ code.
+//
+#if !defined(OVR_EXTERN_C)
+    #ifdef __cplusplus
+        #define OVR_EXTERN_C extern "C"
+    #else
+        #define OVR_EXTERN_C
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_PUBLIC_FUNCTION / OVR_PRIVATE_FUNCTION
+//
+// OVR_PUBLIC_FUNCTION  - Functions that externally visible from a shared library. Corresponds to Microsoft __dllexport.
+// OVR_PUBLIC_CLASS     - C++ structs and classes that are externally visible from a shared library. Corresponds to Microsoft __dllexport.
+// OVR_PRIVATE_FUNCTION - Functions that are not visible outside of a shared library. They are private to the shared library.
+// OVR_PRIVATE_CLASS    - C++ structs and classes that are not visible outside of a shared library. They are private to the shared library.
+//
+// OVR_DLL_BUILD        - Used to indicate that the current compilation unit is of a shared library.
+// OVR_DLL_IMPORT       - Used to indicate that the current compilation unit is a user of the corresponding shared library.
+// OVR_STATIC_BUILD     - used to indicate that the current compilation unit is not a shared library but rather statically linked code.
+//
+#if !defined(OVR_PUBLIC_FUNCTION)
+    #if defined(OVR_DLL_BUILD)
+        #if defined(_WIN32)
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllexport) rval OVR_CDECL
+            #define OVR_PUBLIC_CLASS          __declspec(dllexport)
+            #define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL
+            #define OVR_PRIVATE_CLASS
+        #else
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __attribute__((visibility("default"))) rval OVR_CDECL /* Requires GCC 4.0+ */
+            #define OVR_PUBLIC_CLASS          __attribute__((visibility("default"))) /* Requires GCC 4.0+ */
+            #define OVR_PRIVATE_FUNCTION(rval) __attribute__((visibility("hidden"))) rval OVR_CDECL
+            #define OVR_PRIVATE_CLASS         __attribute__((visibility("hidden")))
+        #endif
+    #elif defined(OVR_DLL_IMPORT)
+        #if defined(_WIN32)
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C __declspec(dllimport) rval OVR_CDECL
+            #define OVR_PUBLIC_CLASS          __declspec(dllimport)
+        #else
+            #define OVR_PUBLIC_FUNCTION(rval) OVR_EXTERN_C rval OVR_CDECL
+            #define OVR_PUBLIC_CLASS
+        #endif
+        #define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL
+        #define OVR_PRIVATE_CLASS
+    #else // OVR_STATIC_BUILD
+        #define OVR_PUBLIC_FUNCTION(rval)     OVR_EXTERN_C rval OVR_CDECL
+        #define OVR_PUBLIC_CLASS
+        #define OVR_PRIVATE_FUNCTION(rval) rval OVR_CDECL
+        #define OVR_PRIVATE_CLASS
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_EXPORT
+//
+/// Provided for backward compatibility with older versions of this library.
+//
+#if !defined(OVR_EXPORT)
+    #ifdef OVR_OS_WIN32
+        #define OVR_EXPORT __declspec(dllexport)
+    #else
+        #define OVR_EXPORT
+    #endif
+#endif
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_ALIGNAS
+//
+#if !defined(OVR_ALIGNAS)
+    #if defined(__GNUC__) || defined(__clang__)
+        #define OVR_ALIGNAS(n) __attribute__((aligned(n)))
+    #elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
+        #define OVR_ALIGNAS(n) __declspec(align(n))
+    #elif defined(__CC_ARM)
+        #define OVR_ALIGNAS(n) __align(n)
+    #else
+        #error Need to define OVR_ALIGNAS
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_CC_HAS_FEATURE
+//
+// This is a portable way to use compile-time feature identification available
+// with some compilers in a clean way. Direct usage of __has_feature in preprocessing
+// statements of non-supporting compilers results in a preprocessing error.
+//
+// Example usage:
+//     #if OVR_CC_HAS_FEATURE(is_pod)
+//         if(__is_pod(T)) // If the type is plain data then we can safely memcpy it.
+//             memcpy(&destObject, &srcObject, sizeof(object));
+//     #endif
+//
+#if !defined(OVR_CC_HAS_FEATURE)
+    #if defined(__clang__) // http://clang.llvm.org/docs/LanguageExtensions.html#id2
+        #define OVR_CC_HAS_FEATURE(x) __has_feature(x)
+    #else
+        #define OVR_CC_HAS_FEATURE(x) 0
+    #endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// ***** OVR_STATIC_ASSERT
+//
+// Portable support for C++11 static_assert().
+// Acts as if the following were declared:
+//     void OVR_STATIC_ASSERT(bool const_expression, const char* msg);
+//
+// Example usage:
+//     OVR_STATIC_ASSERT(sizeof(int32_t) == 4, "int32_t expected to be 4 bytes.");
+
+#if !defined(OVR_STATIC_ASSERT)
+    #if !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* Other */ && \
+        !(defined(__GXX_EXPERIMENTAL_CXX0X__)) /* GCC */ && \
+        !(defined(__clang__) && defined(__cplusplus) && OVR_CC_HAS_FEATURE(cxx_static_assert)) /* clang */ && \
+        !(defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(__cplusplus)) /* VS2010+  */
+
+        #if !defined(OVR_SA_UNUSED)
+        #if defined(OVR_CC_GNU) || defined(OVR_CC_CLANG)
+            #define OVR_SA_UNUSED __attribute__((unused))
+        #else
+            #define OVR_SA_UNUSED
+        #endif
+        #define OVR_SA_PASTE(a,b) a##b
+        #define OVR_SA_HELP(a,b)  OVR_SA_PASTE(a,b)
+        #endif
+
+        #if defined(__COUNTER__)
+            #define OVR_STATIC_ASSERT(expression, msg) typedef char OVR_SA_HELP(compileTimeAssert, __COUNTER__) [((expression) != 0) ? 1 : -1] OVR_SA_UNUSED
+        #else
+            #define OVR_STATIC_ASSERT(expression, msg) typedef char OVR_SA_HELP(compileTimeAssert, __LINE__) [((expression) != 0) ? 1 : -1] OVR_SA_UNUSED
+        #endif
+
+    #else
+        #define OVR_STATIC_ASSERT(expression, msg) static_assert(expression, msg)
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** Padding
+//
+/// Defines explicitly unused space for a struct.
+/// When used correcly, usage of this macro should not change the size of the struct.
+/// Compile-time and runtime behavior with and without this defined should be identical.
+///
+#if !defined(OVR_UNUSED_STRUCT_PAD)
+    #define OVR_UNUSED_STRUCT_PAD(padName, size) char padName[size];
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** Word Size
+//
+/// Specifies the size of a pointer on the given platform.
+///
+#if !defined(OVR_PTR_SIZE)
+    #if defined(__WORDSIZE)
+        #define OVR_PTR_SIZE ((__WORDSIZE) / 8)
+    #elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || defined(__ia64__) || defined(__arch64__) || defined(__64BIT__) || defined(__Ptr_Is_64)
+        #define OVR_PTR_SIZE 8
+    #elif defined(__CC_ARM) && (__sizeof_ptr == 8)
+        #define OVR_PTR_SIZE 8
+    #else
+        #define OVR_PTR_SIZE 4
+    #endif
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** OVR_ON32 / OVR_ON64
+//
+#if OVR_PTR_SIZE == 8
+    #define OVR_ON32(x)
+    #define OVR_ON64(x) x
+#else
+    #define OVR_ON32(x) x
+    #define OVR_ON64(x)
+#endif
+
+
+//-----------------------------------------------------------------------------------
+// ***** ovrBool
+
+typedef char ovrBool;   ///< Boolean type
+#define ovrFalse 0      ///< ovrBool value of false.
+#define ovrTrue  1      ///< ovrBool value of true.
+
+
+//-----------------------------------------------------------------------------------
+// ***** Simple Math Structures
+
+/// A 2D vector with integer components.
+typedef struct OVR_ALIGNAS(4) ovrVector2i_
+{
+    int x, y;
+} ovrVector2i;
+
+/// A 2D size with integer components.
+typedef struct OVR_ALIGNAS(4) ovrSizei_
+{
+    int w, h;
+} ovrSizei;
+
+/// A 2D rectangle with a position and size.
+/// All components are integers.
+typedef struct OVR_ALIGNAS(4) ovrRecti_
+{
+    ovrVector2i Pos;
+    ovrSizei    Size;
+} ovrRecti;
+
+/// A quaternion rotation.
+typedef struct OVR_ALIGNAS(4) ovrQuatf_
+{
+    float x, y, z, w;
+} ovrQuatf;
+
+/// A 2D vector with float components.
+typedef struct OVR_ALIGNAS(4) ovrVector2f_
+{
+    float x, y;
+} ovrVector2f;
+
+/// A 3D vector with float components.
+typedef struct OVR_ALIGNAS(4) ovrVector3f_
+{
+    float x, y, z;
+} ovrVector3f;
+
+/// A 4x4 matrix with float elements.
+typedef struct OVR_ALIGNAS(4) ovrMatrix4f_
+{
+    float M[4][4];
+} ovrMatrix4f;
+
+
+/// Position and orientation together.
+typedef struct OVR_ALIGNAS(4) ovrPosef_
+{
+    ovrQuatf     Orientation;
+    ovrVector3f  Position;
+} ovrPosef;
+
+/// A full pose (rigid body) configuration with first and second derivatives.
+///
+/// Body refers to any object for which ovrPoseStatef is providing data.
+/// It can be the HMD, Touch controller, sensor or something else. The context 
+/// depends on the usage of the struct.
+typedef struct OVR_ALIGNAS(8) ovrPoseStatef_
+{
+    ovrPosef     ThePose;               ///< Position and orientation.
+    ovrVector3f  AngularVelocity;       ///< Angular velocity in radians per second.
+    ovrVector3f  LinearVelocity;        ///< Velocity in meters per second.
+    ovrVector3f  AngularAcceleration;   ///< Angular acceleration in radians per second per second.
+    ovrVector3f  LinearAcceleration;    ///< Acceleration in meters per second per second.
+    OVR_UNUSED_STRUCT_PAD(pad0, 4)      ///< \internal struct pad.
+    double       TimeInSeconds;         ///< Absolute time that this pose refers to. \see ovr_GetTimeInSeconds
+} ovrPoseStatef;
+
+/// Describes the up, down, left, and right angles of the field of view.
+///
+/// Field Of View (FOV) tangent of the angle units.
+/// \note For a standard 90 degree vertical FOV, we would
+/// have: { UpTan = tan(90 degrees / 2), DownTan = tan(90 degrees / 2) }.
+typedef struct OVR_ALIGNAS(4) ovrFovPort_
+{
+    float UpTan;    ///< The tangent of the angle between the viewing vector and the top edge of the field of view.
+    float DownTan;  ///< The tangent of the angle between the viewing vector and the bottom edge of the field of view.
+    float LeftTan;  ///< The tangent of the angle between the viewing vector and the left edge of the field of view.
+    float RightTan; ///< The tangent of the angle between the viewing vector and the right edge of the field of view.
+} ovrFovPort;
+
+
+//-----------------------------------------------------------------------------------
+// ***** HMD Types
+
+/// Enumerates all HMD types that we support.
+///
+/// The currently released developer kits are ovrHmd_DK1 and ovrHmd_DK2. The other enumerations are for internal use only.
+typedef enum ovrHmdType_
+{
+    ovrHmd_None      = 0,
+    ovrHmd_DK1       = 3,
+    ovrHmd_DKHD      = 4,
+    ovrHmd_DK2       = 6,
+    ovrHmd_CB        = 8,
+    ovrHmd_Other     = 9,
+    ovrHmd_E3_2015   = 10,
+    ovrHmd_ES06      = 11,
+    ovrHmd_ES09      = 12,
+    ovrHmd_ES11      = 13,
+    ovrHmd_CV1       = 14,
+
+    ovrHmd_EnumSize  = 0x7fffffff ///< \internal Force type int32_t.
+} ovrHmdType;
+
+
+/// HMD capability bits reported by device.
+///
+typedef enum ovrHmdCaps_
+{
+    // Read-only flags
+    ovrHmdCap_DebugDevice             = 0x0010,   ///< <B>(read only)</B> Specifies that the HMD is a virtual debug device.
+
+
+    ovrHmdCap_EnumSize            = 0x7fffffff ///< \internal Force type int32_t.
+} ovrHmdCaps;
+
+
+/// Tracking capability bits reported by the device.
+/// Used with ovr_GetTrackingCaps.
+typedef enum ovrTrackingCaps_
+{
+    ovrTrackingCap_Orientation      = 0x0010,    ///< Supports orientation tracking (IMU).
+    ovrTrackingCap_MagYawCorrection = 0x0020,    ///< Supports yaw drift correction via a magnetometer or other means.
+    ovrTrackingCap_Position         = 0x0040,    ///< Supports positional tracking.
+    ovrTrackingCap_EnumSize         = 0x7fffffff ///< \internal Force type int32_t.
+} ovrTrackingCaps;
+
+
+/// Specifies which eye is being used for rendering.
+/// This type explicitly does not include a third "NoStereo" monoscopic option, as such is
+/// not required for an HMD-centered API.
+typedef enum ovrEyeType_
+{
+    ovrEye_Left     = 0,         ///< The left eye, from the viewer's perspective.
+    ovrEye_Right    = 1,         ///< The right eye, from the viewer's perspective.
+    ovrEye_Count    = 2,         ///< \internal Count of enumerated elements.
+    ovrEye_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrEyeType;
+
+/// Specifies the coordinate system ovrTrackingState returns tracking poses in.
+/// Used with ovr_SetTrackingOriginType()
+typedef enum ovrTrackingOrigin_
+{
+    /// \brief Tracking system origin reported at eye (HMD) height
+    /// \details Prefer using this origin when your application requires
+    /// matching user's current physical head pose to a virtual head pose
+    /// without any regards to a the height of the floor. Cockpit-based,
+    /// or 3rd-person experiences are ideal candidates.
+    /// When used, all poses in ovrTrackingState are reported as an offset
+    /// transform from the profile calibrated or recentered HMD pose.
+    /// It is recommended that apps using this origin type call ovr_RecenterTrackingOrigin
+    /// prior to starting the VR experience, but notify the user before doing so
+    /// to make sure the user is in a comfortable pose, facing a comfortable
+    /// direction.
+    ovrTrackingOrigin_EyeLevel = 0,
+    /// \brief Tracking system origin reported at floor height
+    /// \details Prefer using this origin when your application requires the
+    /// physical floor height to match the virtual floor height, such as
+    /// standing experiences.
+    /// When used, all poses in ovrTrackingState are reported as an offset
+    /// transform from the profile calibrated floor pose. Calling ovr_RecenterTrackingOrigin
+    /// will recenter the X & Z axes as well as yaw, but the Y-axis (i.e. height) will continue
+    /// to be reported using the floor height as the origin for all poses.
+    ovrTrackingOrigin_FloorLevel = 1,
+    ovrTrackingOrigin_Count = 2,            ///< \internal Count of enumerated elements.
+    ovrTrackingOrigin_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrTrackingOrigin;
+
+/// Identifies a graphics device in a platform-specific way.
+/// For Windows this is a LUID type.
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrGraphicsLuid_
+{
+    // Public definition reserves space for graphics API-specific implementation
+    char        Reserved[8];
+} ovrGraphicsLuid;
+
+
+/// This is a complete descriptor of the HMD.
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrHmdDesc_
+{
+    ovrHmdType   Type;                         ///< The type of HMD.
+    OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4))   ///< \internal struct paddding.
+    char         ProductName[64];              ///< UTF8-encoded product identification string (e.g. "Oculus Rift DK1").
+    char         Manufacturer[64];             ///< UTF8-encoded HMD manufacturer identification string.
+    short        VendorId;                     ///< HID (USB) vendor identifier of the device.
+    short        ProductId;                    ///< HID (USB) product identifier of the device.
+    char         SerialNumber[24];             ///< HMD serial number.
+    short        FirmwareMajor;                ///< HMD firmware major version.
+    short        FirmwareMinor;                ///< HMD firmware minor version.
+    unsigned int AvailableHmdCaps;             ///< Capability bits described by ovrHmdCaps which the HMD currently supports.
+    unsigned int DefaultHmdCaps;               ///< Capability bits described by ovrHmdCaps which are default for the current Hmd.
+    unsigned int AvailableTrackingCaps;        ///< Capability bits described by ovrTrackingCaps which the system currently supports.
+    unsigned int DefaultTrackingCaps;          ///< Capability bits described by ovrTrackingCaps which are default for the current system.
+    ovrFovPort   DefaultEyeFov[ovrEye_Count];  ///< Defines the recommended FOVs for the HMD.
+    ovrFovPort   MaxEyeFov[ovrEye_Count];      ///< Defines the maximum FOVs for the HMD.
+    ovrSizei     Resolution;                   ///< Resolution of the full HMD screen (both eyes) in pixels.
+    float        DisplayRefreshRate;           ///< Nominal refresh rate of the display in cycles per second at the time of HMD creation.
+    OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad1, 4))   ///< \internal struct paddding.
+} ovrHmdDesc;
+
+
+/// Used as an opaque pointer to an OVR session.
+typedef struct ovrHmdStruct* ovrSession;
+
+
+
+/// Bit flags describing the current status of sensor tracking.
+///  The values must be the same as in enum StatusBits
+///
+/// \see ovrTrackingState
+///
+typedef enum ovrStatusBits_
+{
+    ovrStatus_OrientationTracked    = 0x0001,    ///< Orientation is currently tracked (connected and in use).
+    ovrStatus_PositionTracked       = 0x0002,    ///< Position is currently tracked (false if out of range).
+    ovrStatus_EnumSize              = 0x7fffffff ///< \internal Force type int32_t.
+} ovrStatusBits;
+
+
+///  Specifies the description of a single sensor.
+///
+/// \see ovrGetTrackerDesc
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrTrackerDesc_
+{
+    float FrustumHFovInRadians;      ///< Sensor frustum horizontal field-of-view (if present).
+    float FrustumVFovInRadians;      ///< Sensor frustum vertical field-of-view (if present).
+    float FrustumNearZInMeters;      ///< Sensor frustum near Z (if present).
+    float FrustumFarZInMeters;       ///< Sensor frustum far Z (if present).
+} ovrTrackerDesc;
+
+
+///  Specifies sensor flags.
+///
+///  /see ovrTrackerPose
+///
+typedef enum ovrTrackerFlags_
+{
+    ovrTracker_Connected   = 0x0020,      ///< The sensor is present, else the sensor is absent or offline.
+    ovrTracker_PoseTracked = 0x0004       ///< The sensor has a valid pose, else the pose is unavailable. This will only be set if ovrTracker_Connected is set.
+} ovrTrackerFlags;
+
+
+///  Specifies the pose for a single sensor.
+///
+typedef struct OVR_ALIGNAS(8) _ovrTrackerPose
+{
+    unsigned int TrackerFlags;      ///< ovrTrackerFlags.
+    ovrPosef     Pose;              ///< The sensor's pose. This pose includes sensor tilt (roll and pitch). For a leveled coordinate system use LeveledPose.
+    ovrPosef     LeveledPose;       ///< The sensor's leveled pose, aligned with gravity. This value includes position and yaw of the sensor, but not roll and pitch. It can be used as a reference point to render real-world objects in the correct location.
+    OVR_UNUSED_STRUCT_PAD(pad0, 4)  ///< \internal struct pad.
+} ovrTrackerPose;
+
+
+/// Tracking state at a given absolute time (describes predicted HMD pose, etc.).
+/// Returned by ovr_GetTrackingState.
+///
+/// \see ovr_GetTrackingState
+///
+typedef struct OVR_ALIGNAS(8) ovrTrackingState_
+{
+    /// Predicted head pose (and derivatives) at the requested absolute time.
+    ovrPoseStatef  HeadPose;
+
+    /// HeadPose tracking status described by ovrStatusBits.
+    unsigned int   StatusFlags;
+
+    /// The most recent calculated pose for each hand when hand controller tracking is present.
+    /// HandPoses[ovrHand_Left] refers to the left hand and HandPoses[ovrHand_Right] to the right hand.
+    /// These values can be combined with ovrInputState for complete hand controller information.
+    ovrPoseStatef  HandPoses[2];
+
+    /// HandPoses status flags described by ovrStatusBits.
+    /// Only ovrStatus_OrientationTracked and ovrStatus_PositionTracked are reported.
+    unsigned int   HandStatusFlags[2];
+
+    /// The pose of the origin captured during calibration.
+    /// Like all other poses here, this is expressed in the space set by ovr_RecenterTrackingOrigin,
+    /// and so will change every time that is called. This pose can be used to calculate
+    /// where the calibrated origin lands in the new recentered space.
+    /// If an application never calls ovr_RecenterTrackingOrigin, expect this value to be the identity
+    /// pose and as such will point respective origin based on ovrTrackingOrigin requested when
+    /// calling ovr_GetTrackingState.
+    ovrPosef      CalibratedOrigin;
+
+} ovrTrackingState;
+
+
+/// Rendering information for each eye. Computed by ovr_GetRenderDesc() based on the
+/// specified FOV. Note that the rendering viewport is not included
+/// here as it can be specified separately and modified per frame by
+/// passing different Viewport values in the layer structure.
+///
+/// \see ovr_GetRenderDesc
+///
+typedef struct OVR_ALIGNAS(4) ovrEyeRenderDesc_
+{
+    ovrEyeType  Eye;                        ///< The eye index to which this instance corresponds.
+    ovrFovPort  Fov;                        ///< The field of view.
+    ovrRecti    DistortedViewport;          ///< Distortion viewport.
+    ovrVector2f PixelsPerTanAngleAtCenter;  ///< How many display pixels will fit in tan(angle) = 1.
+    ovrVector3f HmdToEyeOffset;             ///< Translation of each eye, in meters.
+} ovrEyeRenderDesc;
+
+
+/// Projection information for ovrLayerEyeFovDepth.
+///
+/// Use the utility function ovrTimewarpProjectionDesc_FromProjection to
+/// generate this structure from the application's projection matrix.
+///
+/// \see ovrLayerEyeFovDepth, ovrTimewarpProjectionDesc_FromProjection
+///
+typedef struct OVR_ALIGNAS(4) ovrTimewarpProjectionDesc_
+{
+    float Projection22;     ///< Projection matrix element [2][2].
+    float Projection23;     ///< Projection matrix element [2][3].
+    float Projection32;     ///< Projection matrix element [3][2].
+} ovrTimewarpProjectionDesc;
+
+
+/// Contains the data necessary to properly calculate position info for various layer types.
+/// - HmdToEyeOffset is the same value pair provided in ovrEyeRenderDesc.
+/// - HmdSpaceToWorldScaleInMeters is used to scale player motion into in-application units.
+///   In other words, it is how big an in-application unit is in the player's physical meters.
+///   For example, if the application uses inches as its units then HmdSpaceToWorldScaleInMeters would be 0.0254.
+///   Note that if you are scaling the player in size, this must also scale. So if your application
+///   units are inches, but you're shrinking the player to half their normal size, then
+///   HmdSpaceToWorldScaleInMeters would be 0.0254*2.0.
+///
+/// \see ovrEyeRenderDesc, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(4) ovrViewScaleDesc_
+{
+    ovrVector3f HmdToEyeOffset[ovrEye_Count];   ///< Translation of each eye.
+    float       HmdSpaceToWorldScaleInMeters;   ///< Ratio of viewer units to meter units.
+} ovrViewScaleDesc;
+
+
+//-----------------------------------------------------------------------------------
+// ***** Platform-independent Rendering Configuration
+
+/// The type of texture resource.
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureType_
+{
+    ovrTexture_2D,              ///< 2D textures.
+    ovrTexture_2D_External,     ///< External 2D texture. Not used on PC
+    ovrTexture_Cube,            ///< Cube maps. Not currently supported on PC.
+    ovrTexture_Count,
+    ovrTexture_EnumSize = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureType;
+
+/// The bindings required for texture swap chain.
+///
+/// All texture swap chains are automatically bindable as shader
+/// input resources since the Oculus runtime needs this to read them.
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureBindFlags_
+{
+    ovrTextureBind_None,
+    ovrTextureBind_DX_RenderTarget = 0x0001,    ///< The application can write into the chain with pixel shader
+    ovrTextureBind_DX_UnorderedAccess = 0x0002, ///< The application can write to the chain with compute shader
+    ovrTextureBind_DX_DepthStencil = 0x0004,    ///< The chain buffers can be bound as depth and/or stencil buffers
+
+    ovrTextureBind_EnumSize = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureBindFlags;
+
+/// The format of a texture.
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureFormat_
+{
+    OVR_FORMAT_UNKNOWN,
+    OVR_FORMAT_B5G6R5_UNORM,    ///< Not currently supported on PC. Would require a DirectX 11.1 device.
+    OVR_FORMAT_B5G5R5A1_UNORM,  ///< Not currently supported on PC. Would require a DirectX 11.1 device.
+    OVR_FORMAT_B4G4R4A4_UNORM,  ///< Not currently supported on PC. Would require a DirectX 11.1 device.
+    OVR_FORMAT_R8G8B8A8_UNORM,
+    OVR_FORMAT_R8G8B8A8_UNORM_SRGB,
+    OVR_FORMAT_B8G8R8A8_UNORM,
+    OVR_FORMAT_B8G8R8A8_UNORM_SRGB, ///< Not supported for OpenGL applications
+    OVR_FORMAT_B8G8R8X8_UNORM,      ///< Not supported for OpenGL applications
+    OVR_FORMAT_B8G8R8X8_UNORM_SRGB, ///< Not supported for OpenGL applications
+    OVR_FORMAT_R16G16B16A16_FLOAT,
+    OVR_FORMAT_D16_UNORM,
+    OVR_FORMAT_D24_UNORM_S8_UINT,
+    OVR_FORMAT_D32_FLOAT,
+    OVR_FORMAT_D32_FLOAT_S8X24_UINT,
+
+    OVR_FORMAT_ENUMSIZE = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureFormat;
+
+/// Misc flags overriding particular
+///   behaviors of a texture swap chain
+///
+/// \see ovrTextureSwapChainDesc
+///
+typedef enum ovrTextureMiscFlags_
+{
+    ovrTextureMisc_None, 
+
+    /// DX only: The underlying texture is created with a TYPELESS equivalent of the
+    /// format specified in the texture desc. The SDK will still access the
+    /// texture using the format specified in the texture desc, but the app can
+    /// create views with different formats if this is specified.
+    ovrTextureMisc_DX_Typeless = 0x0001,
+
+    /// DX only: Allow generation of the mip chain on the GPU via the GenerateMips
+    /// call. This flag requires that RenderTarget binding also be specified.
+    ovrTextureMisc_AllowGenerateMips = 0x0002,
+
+    ovrTextureMisc_EnumSize = 0x7fffffff  ///< \internal Force type int32_t.
+} ovrTextureFlags;
+
+/// Description used to create a texture swap chain.
+///
+/// \see ovr_CreateTextureSwapChainDX
+/// \see ovr_CreateTextureSwapChainGL
+///
+typedef struct
+{
+    ovrTextureType      Type;
+    ovrTextureFormat    Format;
+    int                 ArraySize;      ///< Only supported with ovrTexture_2D. Not supported on PC at this time.
+    int                 Width;
+    int                 Height;
+    int                 MipLevels;
+    int                 SampleCount;    ///< Current only supported on depth textures
+    ovrBool             StaticImage;    ///< Not buffered in a chain. For images that don't change
+    unsigned int        MiscFlags;      ///< ovrTextureMiscFlags
+    unsigned int        BindFlags;      ///< ovrTextureBindFlags. Not used for GL.
+} ovrTextureSwapChainDesc;
+
+/// Description used to create a mirror texture.
+///
+/// \see ovr_CreateMirrorTextureDX
+/// \see ovr_CreateMirrorTextureGL
+///
+typedef struct
+{
+    ovrTextureFormat    Format;
+    int                 Width;
+    int                 Height;
+    unsigned int        MiscFlags;      ///< ovrTextureMiscFlags
+} ovrMirrorTextureDesc;
+
+typedef struct ovrTextureSwapChainData* ovrTextureSwapChain;
+typedef struct ovrMirrorTextureData* ovrMirrorTexture;
+
+//-----------------------------------------------------------------------------------
+
+/// Describes button input types.
+/// Button inputs are combined; that is they will be reported as pressed if they are 
+/// pressed on either one of the two devices.
+/// The ovrButton_Up/Down/Left/Right map to both XBox D-Pad and directional buttons.
+/// The ovrButton_Enter and ovrButton_Return map to Start and Back controller buttons, respectively.
+typedef enum ovrButton_
+{    
+    ovrButton_A         = 0x00000001,
+    ovrButton_B         = 0x00000002,
+    ovrButton_RThumb    = 0x00000004,
+    ovrButton_RShoulder = 0x00000008,
+
+    // Bit mask of all buttons on the right Touch controller
+    ovrButton_RMask     = ovrButton_A | ovrButton_B | ovrButton_RThumb | ovrButton_RShoulder,
+
+    ovrButton_X         = 0x00000100,
+    ovrButton_Y         = 0x00000200,
+    ovrButton_LThumb    = 0x00000400,  
+    ovrButton_LShoulder = 0x00000800,
+
+    // Bit mask of all buttons on the left Touch controller
+    ovrButton_LMask     = ovrButton_X | ovrButton_Y | ovrButton_LThumb | ovrButton_LShoulder,
+
+    // Navigation through DPad.
+    ovrButton_Up        = 0x00010000,
+    ovrButton_Down      = 0x00020000,
+    ovrButton_Left      = 0x00040000,
+    ovrButton_Right     = 0x00080000,
+    ovrButton_Enter     = 0x00100000, // Start on XBox controller.
+    ovrButton_Back      = 0x00200000, // Back on Xbox controller.
+    ovrButton_VolUp     = 0x00400000,  // only supported by Remote.
+    ovrButton_VolDown   = 0x00800000,  // only supported by Remote.
+    ovrButton_Home      = 0x01000000,  
+    ovrButton_Private   = ovrButton_VolUp | ovrButton_VolDown | ovrButton_Home,
+
+
+    ovrButton_EnumSize  = 0x7fffffff ///< \internal Force type int32_t.
+} ovrButton;
+
+/// Describes touch input types.
+/// These values map to capacitive touch values reported ovrInputState::Touch.
+/// Some of these values are mapped to button bits for consistency.
+typedef enum ovrTouch_
+{
+    ovrTouch_A              = ovrButton_A,
+    ovrTouch_B              = ovrButton_B,
+    ovrTouch_RThumb         = ovrButton_RThumb,
+    ovrTouch_RIndexTrigger  = 0x00000010,
+
+    // Bit mask of all the button touches on the right controller
+    ovrTouch_RButtonMask    = ovrTouch_A | ovrTouch_B | ovrTouch_RThumb | ovrTouch_RIndexTrigger,
+
+    ovrTouch_X              = ovrButton_X,
+    ovrTouch_Y              = ovrButton_Y,
+    ovrTouch_LThumb         = ovrButton_LThumb,
+    ovrTouch_LIndexTrigger  = 0x00001000,
+
+    // Bit mask of all the button touches on the left controller
+    ovrTouch_LButtonMask    = ovrTouch_X | ovrTouch_Y | ovrTouch_LThumb | ovrTouch_LIndexTrigger,
+
+    // Finger pose state 
+    // Derived internally based on distance, proximity to sensors and filtering.
+    ovrTouch_RIndexPointing = 0x00000020,
+    ovrTouch_RThumbUp       = 0x00000040,
+
+    // Bit mask of all right controller poses
+    ovrTouch_RPoseMask      = ovrTouch_RIndexPointing | ovrTouch_RThumbUp,
+
+    ovrTouch_LIndexPointing = 0x00002000,
+    ovrTouch_LThumbUp       = 0x00004000,
+
+    // Bit mask of all left controller poses
+    ovrTouch_LPoseMask      = ovrTouch_LIndexPointing | ovrTouch_LThumbUp,
+
+    ovrTouch_EnumSize       = 0x7fffffff ///< \internal Force type int32_t.
+} ovrTouch;
+
+/// Specifies which controller is connected; multiple can be connected at once.
+typedef enum ovrControllerType_
+{
+    ovrControllerType_None      = 0x00,
+    ovrControllerType_LTouch    = 0x01,
+    ovrControllerType_RTouch    = 0x02,
+    ovrControllerType_Touch     = 0x03,
+    ovrControllerType_Remote    = 0x04,
+    ovrControllerType_XBox      = 0x10,
+
+    ovrControllerType_Active    = 0xff,      ///< Operate on or query whichever controller is active.
+
+    ovrControllerType_EnumSize  = 0x7fffffff ///< \internal Force type int32_t.
+} ovrControllerType;
+
+
+/// Provides names for the left and right hand array indexes.
+///
+/// \see ovrInputState, ovrTrackingState
+/// 
+typedef enum ovrHandType_
+{
+    ovrHand_Left  = 0,
+    ovrHand_Right = 1,
+    ovrHand_Count = 2,
+    ovrHand_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrHandType;
+
+
+
+/// ovrInputState describes the complete controller input state, including Oculus Touch,
+/// and XBox gamepad. If multiple inputs are connected and used at the same time,
+/// their inputs are combined.
+typedef struct ovrInputState_
+{
+    // System type when the controller state was last updated.
+    double              TimeInSeconds;
+
+    // Values for buttons described by ovrButton.
+    unsigned int        Buttons;
+
+    // Touch values for buttons and sensors as described by ovrTouch.
+    unsigned int        Touches;
+    
+    // Left and right finger trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.
+    float               IndexTrigger[ovrHand_Count];
+    
+    // Left and right hand trigger values (ovrHand_Left and ovrHand_Right), in the range 0.0 to 1.0f.
+    float               HandTrigger[ovrHand_Count];
+
+    // Horizontal and vertical thumbstick axis values (ovrHand_Left and ovrHand_Right), in the range -1.0f to 1.0f.
+    ovrVector2f         Thumbstick[ovrHand_Count];
+
+    // The type of the controller this state is for.
+    ovrControllerType   ControllerType;
+    
+} ovrInputState;
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Initialize structures
+
+/// Initialization flags.
+///
+/// \see ovrInitParams, ovr_Initialize
+///
+typedef enum ovrInitFlags_
+{
+    /// When a debug library is requested, a slower debugging version of the library will
+    /// run which can be used to help solve problems in the library and debug application code.
+    ovrInit_Debug          = 0x00000001,
+
+    /// When a version is requested, the LibOVR runtime respects the RequestedMinorVersion
+    /// field and verifies that the RequestedMinorVersion is supported.
+    ovrInit_RequestVersion = 0x00000004,
+
+    // These bits are writable by user code.
+    ovrinit_WritableBits   = 0x00ffffff,
+
+    ovrInit_EnumSize       = 0x7fffffff ///< \internal Force type int32_t.
+} ovrInitFlags;
+
+
+/// Logging levels
+///
+/// \see ovrInitParams, ovrLogCallback
+///
+typedef enum ovrLogLevel_
+{
+    ovrLogLevel_Debug    = 0, ///< Debug-level log event.
+    ovrLogLevel_Info     = 1, ///< Info-level log event.
+    ovrLogLevel_Error    = 2, ///< Error-level log event.
+
+    ovrLogLevel_EnumSize = 0x7fffffff ///< \internal Force type int32_t.
+} ovrLogLevel;
+
+
+/// Signature of the logging callback function pointer type.
+///
+/// \param[in] userData is an arbitrary value specified by the user of ovrInitParams.
+/// \param[in] level is one of the ovrLogLevel constants.
+/// \param[in] message is a UTF8-encoded null-terminated string.
+/// \see ovrInitParams, ovrLogLevel, ovr_Initialize
+///
+typedef void (OVR_CDECL* ovrLogCallback)(uintptr_t userData, int level, const char* message);
+
+
+/// Parameters for ovr_Initialize.
+///
+/// \see ovr_Initialize
+///
+typedef struct OVR_ALIGNAS(8) ovrInitParams_
+{
+    /// Flags from ovrInitFlags to override default behavior.
+    /// Use 0 for the defaults.
+    uint32_t       Flags;
+
+    /// Requests a specific minimum minor version of the LibOVR runtime.
+    /// Flags must include ovrInit_RequestVersion or this will be ignored
+    /// and OVR_MINOR_VERSION will be used.
+    uint32_t       RequestedMinorVersion;
+
+    /// User-supplied log callback function, which may be called at any time
+    /// asynchronously from multiple threads until ovr_Shutdown completes.
+    /// Use NULL to specify no log callback.
+    ovrLogCallback LogCallback;
+
+    /// User-supplied data which is passed as-is to LogCallback. Typically this 
+    /// is used to store an application-specific pointer which is read in the 
+    /// callback function.
+    uintptr_t      UserData;
+
+    /// Relative number of milliseconds to wait for a connection to the server
+    /// before failing. Use 0 for the default timeout.
+    uint32_t       ConnectionTimeoutMS;
+
+    OVR_ON64(OVR_UNUSED_STRUCT_PAD(pad0, 4)) ///< \internal
+
+} ovrInitParams;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+// -----------------------------------------------------------------------------------
+// ***** API Interfaces
+
+// Overview of the API
+//
+// Setup:
+//  - ovr_Initialize().
+//  - ovr_Create(&hmd, &graphicsId).
+//  - Use hmd members and ovr_GetFovTextureSize() to determine graphics configuration
+//    and ovr_GetRenderDesc() to get per-eye rendering parameters.
+//  - Allocate texture swap chains with ovr_CreateTextureSwapChainDX() or
+//    ovr_CreateTextureSwapChainGL(). Create any associated render target views or
+//    frame buffer objects.
+//
+// Application Loop:
+//  - Call ovr_GetPredictedDisplayTime() to get the current frame timing information.
+//  - Call ovr_GetTrackingState() and ovr_CalcEyePoses() to obtain the predicted
+//    rendering pose for each eye based on timing.
+//  - Render the scene content into the current buffer of the texture swapchains
+//    for each eye and layer you plan to update this frame. If you render into a
+//    texture swap chain, you must call ovr_CommitTextureSwapChain() on it to commit
+//    the changes before you reference the chain this frame (otherwise, your latest
+//    changes won't be picked up).
+//  - Call ovr_SubmitFrame() to render the distorted layers to and present them on the HMD.
+//    If ovr_SubmitFrame returns ovrSuccess_NotVisible, there is no need to render the scene
+//    for the next loop iteration. Instead, just call ovr_SubmitFrame again until it returns
+//    ovrSuccess. 
+//
+// Shutdown:
+//  - ovr_Destroy().
+//  - ovr_Shutdown().
+
+
+/// Initializes LibOVR
+///
+/// Initialize LibOVR for application usage. This includes finding and loading the LibOVRRT
+/// shared library. No LibOVR API functions, other than ovr_GetLastErrorInfo, can be called
+/// unless ovr_Initialize succeeds. A successful call to ovr_Initialize must be eventually
+/// followed by a call to ovr_Shutdown. ovr_Initialize calls are idempotent.
+/// Calling ovr_Initialize twice does not require two matching calls to ovr_Shutdown.
+/// If already initialized, the return value is ovr_Success.
+/// 
+/// LibOVRRT shared library search order:
+///      -# Current working directory (often the same as the application directory).
+///      -# Module directory (usually the same as the application directory,
+///         but not if the module is a separate shared library).
+///      -# Application directory
+///      -# Development directory (only if OVR_ENABLE_DEVELOPER_SEARCH is enabled,
+///         which is off by default).
+///      -# Standard OS shared library search location(s) (OS-specific).
+///
+/// \param params Specifies custom initialization options. May be NULL to indicate default options.
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information. Example failed results include:
+///     - ovrError_Initialize: Generic initialization error.
+///     - ovrError_LibLoad: Couldn't load LibOVRRT.
+///     - ovrError_LibVersion: LibOVRRT version incompatibility.
+///     - ovrError_ServiceConnection: Couldn't connect to the OVR Service.
+///     - ovrError_ServiceVersion: OVR Service version incompatibility.
+///     - ovrError_IncompatibleOS: The operating system version is incompatible.
+///     - ovrError_DisplayInit: Unable to initialize the HMD display.
+///     - ovrError_ServerStart:  Unable to start the server. Is it already running?
+///     - ovrError_Reinitialization: Attempted to re-initialize with a different version.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrResult result = ovr_Initialize(NULL);
+///         if(OVR_FAILURE(result)) {
+///             ovrErrorInfo errorInfo;
+///             ovr_GetLastErrorInfo(&errorInfo);
+///             DebugLog("ovr_Initialize failed: %s", errorInfo.ErrorString);
+///             return false;
+///         }
+///         [...]
+///     \endcode
+///
+/// \see ovr_Shutdown
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Initialize(const ovrInitParams* params);
+
+
+/// Shuts down LibOVR
+///
+/// A successful call to ovr_Initialize must be eventually matched by a call to ovr_Shutdown.
+/// After calling ovr_Shutdown, no LibOVR functions can be called except ovr_GetLastErrorInfo
+/// or another ovr_Initialize. ovr_Shutdown invalidates all pointers, references, and created objects
+/// previously returned by LibOVR functions. The LibOVRRT shared library can be unloaded by
+/// ovr_Shutdown.
+///
+/// \see ovr_Initialize
+///
+OVR_PUBLIC_FUNCTION(void) ovr_Shutdown();
+
+/// Returns information about the most recent failed return value by the
+/// current thread for this library.
+///
+/// This function itself can never generate an error.
+/// The last error is never cleared by LibOVR, but will be overwritten by new errors.
+/// Do not use this call to determine if there was an error in the last API
+/// call as successful API calls don't clear the last ovrErrorInfo.
+/// To avoid any inconsistency, ovr_GetLastErrorInfo should be called immediately
+/// after an API function that returned a failed ovrResult, with no other API
+/// functions called in the interim.
+///
+/// \param[out] errorInfo The last ovrErrorInfo for the current thread.
+///
+/// \see ovrErrorInfo
+///
+OVR_PUBLIC_FUNCTION(void) ovr_GetLastErrorInfo(ovrErrorInfo* errorInfo);
+
+
+/// Returns the version string representing the LibOVRRT version.
+///
+/// The returned string pointer is valid until the next call to ovr_Shutdown.
+///
+/// Note that the returned version string doesn't necessarily match the current
+/// OVR_MAJOR_VERSION, etc., as the returned string refers to the LibOVRRT shared
+/// library version and not the locally compiled interface version.
+///
+/// The format of this string is subject to change in future versions and its contents
+/// should not be interpreted.
+///
+/// \return Returns a UTF8-encoded null-terminated version string.
+///
+OVR_PUBLIC_FUNCTION(const char*) ovr_GetVersionString();
+
+
+/// Writes a message string to the LibOVR tracing mechanism (if enabled).
+///
+/// This message will be passed back to the application via the ovrLogCallback if
+/// it was registered.
+///
+/// \param[in] level One of the ovrLogLevel constants.
+/// \param[in] message A UTF8-encoded null-terminated string.
+/// \return returns the strlen of the message or a negative value if the message is too large.
+///
+/// \see ovrLogLevel, ovrLogCallback
+///
+OVR_PUBLIC_FUNCTION(int) ovr_TraceMessage(int level, const char* message);
+
+
+//-------------------------------------------------------------------------------------
+/// @name HMD Management
+///
+/// Handles the enumeration, creation, destruction, and properties of an HMD (head-mounted display).
+///@{
+
+
+/// Returns information about the current HMD.
+///
+/// ovr_Initialize must have first been called in order for this to succeed, otherwise ovrHmdDesc::Type
+/// will be reported as ovrHmd_None.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create, else NULL in which
+///                case this function detects whether an HMD is present and returns its info if so.
+///
+/// \return Returns an ovrHmdDesc. If the hmd is NULL and ovrHmdDesc::Type is ovrHmd_None then 
+///         no HMD is present.
+///
+OVR_PUBLIC_FUNCTION(ovrHmdDesc) ovr_GetHmdDesc(ovrSession session);
+
+
+/// Returns the number of sensors. 
+///
+/// The number of sensors may change at any time, so this function should be called before use 
+/// as opposed to once on startup.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \return Returns unsigned int count.
+///
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetTrackerCount(ovrSession session);
+
+
+/// Returns a given sensor description.
+///
+/// It's possible that sensor desc [0] may indicate a unconnnected or non-pose tracked sensor, but 
+/// sensor desc [1] may be connected.
+///
+/// ovr_Initialize must have first been called in order for this to succeed, otherwise the returned
+/// trackerDescArray will be zero-initialized. The data returned by this function can change at runtime.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// 
+/// \param[in] trackerDescIndex Specifies a sensor index. The valid indexes are in the range of 0 to 
+///            the sensor count returned by ovr_GetTrackerCount.
+///
+/// \return Returns ovrTrackerDesc. An empty ovrTrackerDesc will be returned if trackerDescIndex is out of range.
+///
+/// \see ovrTrackerDesc, ovr_GetTrackerCount
+///
+OVR_PUBLIC_FUNCTION(ovrTrackerDesc) ovr_GetTrackerDesc(ovrSession session, unsigned int trackerDescIndex);
+
+
+/// Creates a handle to a VR session.
+///
+/// Upon success the returned ovrSession must be eventually freed with ovr_Destroy when it is no longer needed.
+/// A second call to ovr_Create will result in an error return value if the previous Hmd has not been destroyed.
+///
+/// \param[out] pSession Provides a pointer to an ovrSession which will be written to upon success.
+/// \param[out] luid Provides a system specific graphics adapter identifier that locates which
+/// graphics adapter has the HMD attached. This must match the adapter used by the application
+/// or no rendering output will be possible. This is important for stability on multi-adapter systems. An
+/// application that simply chooses the default adapter will not run reliably on multi-adapter systems.
+/// \return Returns an ovrResult indicating success or failure. Upon failure
+///         the returned pHmd will be NULL.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrSession session;
+///         ovrGraphicsLuid luid;
+///         ovrResult result = ovr_Create(&session, &luid);
+///         if(OVR_FAILURE(result))
+///            ...
+///     \endcode
+///
+/// \see ovr_Destroy
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_Create(ovrSession* pSession, ovrGraphicsLuid* pLuid);
+
+
+/// Destroys the HMD.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \see ovr_Create
+///
+OVR_PUBLIC_FUNCTION(void) ovr_Destroy(ovrSession session);
+
+
+/// Specifies status information for the current session.
+///
+/// \see ovr_GetSessionStatus
+///
+typedef struct ovrSessionStatus_
+{
+    ovrBool IsVisible;    ///< True if the process has VR focus and thus is visible in the HMD.
+    ovrBool HmdPresent;   ///< True if an HMD is present.
+    ovrBool HmdMounted;   ///< True if the HMD is on the user's head.
+    ovrBool DisplayLost;  ///< True if the session is in a display-lost state. See ovr_SubmitFrame.
+    ovrBool ShouldQuit;   ///< True if the application should initiate shutdown.    
+    ovrBool ShouldRecenter;  ///< True if UX has requested re-centering. Must call ovr_ClearShouldRecenterFlag or ovr_RecenterTrackingOrigin.
+}ovrSessionStatus;
+
+
+/// Returns status information for the application.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[out] sessionStatus Provides an ovrSessionStatus that is filled in.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of
+///         failure, use ovr_GetLastErrorInfo to get more information.
+//          Return values include but aren't limited to:
+///     - ovrSuccess: Completed successfully.
+///     - ovrError_ServiceConnection: The service connection was lost and the application
+//        must destroy the session.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetSessionStatus(ovrSession session, ovrSessionStatus* sessionStatus);
+
+
+//@}
+
+
+
+//-------------------------------------------------------------------------------------
+/// @name Tracking
+///
+/// Tracking functions handle the position, orientation, and movement of the HMD in space.
+///
+/// All tracking interface functions are thread-safe, allowing tracking state to be sampled
+/// from different threads.
+///
+///@{
+
+
+
+/// Sets the tracking origin type
+///
+/// When the tracking origin is changed, all of the calls that either provide
+/// or accept ovrPosef will use the new tracking origin provided.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] origin Specifies an ovrTrackingOrigin to be used for all ovrPosef
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \see ovrTrackingOrigin, ovr_GetTrackingOriginType
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetTrackingOriginType(ovrSession session, ovrTrackingOrigin origin);
+
+
+/// Gets the tracking origin state
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \return Returns the ovrTrackingOrigin that was either set by default, or previous set by the application.
+///
+/// \see ovrTrackingOrigin, ovr_SetTrackingOriginType
+OVR_PUBLIC_FUNCTION(ovrTrackingOrigin) ovr_GetTrackingOriginType(ovrSession session);
+
+
+/// Re-centers the sensor position and orientation.
+///
+/// This resets the (x,y,z) positional components and the yaw orientation component.
+/// The Roll and pitch orientation components are always determined by gravity and cannot
+/// be redefined. All future tracking will report values relative to this new reference position.
+/// If you are using ovrTrackerPoses then you will need to call ovr_GetTrackerPose after 
+/// this, because the sensor position(s) will change as a result of this.
+/// 
+/// The headset cannot be facing vertically upward or downward but rather must be roughly
+/// level otherwise this function will fail with ovrError_InvalidHeadsetOrientation.
+///
+/// For more info, see the notes on each ovrTrackingOrigin enumeration to understand how
+/// recenter will vary slightly in its behavior based on the current ovrTrackingOrigin setting.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information. Return values include but aren't limited to:
+///     - ovrSuccess: Completed successfully.
+///     - ovrError_InvalidHeadsetOrientation: The headset was facing an invalid direction when
+///       attempting recentering, such as facing vertically.
+///
+/// \see ovrTrackingOrigin, ovr_GetTrackerPose
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_RecenterTrackingOrigin(ovrSession session);
+
+
+/// Clears the ShouldRecenter status bit in ovrSessionStatus.
+///
+/// Clears the ShouldRecenter status bit in ovrSessionStatus, allowing further recenter 
+/// requests to be detected. Since this is automatically done by ovr_RecenterTrackingOrigin,
+/// this is only needs to be called when application is doing its own re-centering.
+OVR_PUBLIC_FUNCTION(void) ovr_ClearShouldRecenterFlag(ovrSession session);
+
+
+/// Returns tracking state reading based on the specified absolute system time.
+///
+/// Pass an absTime value of 0.0 to request the most recent sensor reading. In this case
+/// both PredictedPose and SamplePose will have the same value.
+///
+/// This may also be used for more refined timing of front buffer rendering logic, and so on.
+/// This may be called by multiple threads.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] absTime Specifies the absolute future time to predict the return
+///            ovrTrackingState value. Use 0 to request the most recent tracking state.
+/// \param[in] latencyMarker Specifies that this call is the point in time where
+///            the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer
+///            provides "SensorSampleTimestamp", that will override the value stored here.
+/// \return Returns the ovrTrackingState that is predicted for the given absTime.
+///
+/// \see ovrTrackingState, ovr_GetEyePoses, ovr_GetTimeInSeconds
+///
+OVR_PUBLIC_FUNCTION(ovrTrackingState) ovr_GetTrackingState(ovrSession session, double absTime, ovrBool latencyMarker);
+
+
+
+/// Returns the ovrTrackerPose for the given sensor.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] trackerPoseIndex Index of the sensor being requested.
+///
+/// \return Returns the requested ovrTrackerPose. An empty ovrTrackerPose will be returned if trackerPoseIndex is out of range.
+///
+/// \see ovr_GetTrackerCount
+///
+OVR_PUBLIC_FUNCTION(ovrTrackerPose) ovr_GetTrackerPose(ovrSession session, unsigned int trackerPoseIndex);
+
+
+
+/// Returns the most recent input state for controllers, without positional tracking info.
+///
+/// \param[out] inputState Input state that will be filled in.
+/// \param[in] ovrControllerType Specifies which controller the input will be returned for.
+/// \return Returns ovrSuccess if the new state was successfully obtained.
+///
+/// \see ovrControllerType
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetInputState(ovrSession session, ovrControllerType controllerType, ovrInputState* inputState);
+
+
+/// Returns controller types connected to the system OR'ed together.
+///
+/// \return A bitmask of ovrControllerTypes connected to the system.
+///
+/// \see ovrControllerType
+///
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetConnectedControllerTypes(ovrSession session);
+
+
+/// Turns on vibration of the given controller.
+///
+/// To disable vibration, call ovr_SetControllerVibration with an amplitude of 0.
+/// Vibration automatically stops after a nominal amount of time, so if you want vibration 
+/// to be continuous over multiple seconds then you need to call this function periodically.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] controllerType Specifies the controller to apply the vibration to.
+/// \param[in] frequency Specifies a vibration frequency in the range of 0.0 to 1.0. 
+///            Currently the only valid values are 0.0, 0.5, and 1.0 and other values will
+///            be clamped to one of these.
+/// \param[in] amplitude Specifies a vibration amplitude in the range of 0.0 to 1.0.
+///
+/// \return Returns ovrSuccess upon success.
+///
+/// \see ovrControllerType
+/// 
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SetControllerVibration(ovrSession session, ovrControllerType controllerType,
+                                                            float frequency, float amplitude);
+
+///@}
+
+
+//-------------------------------------------------------------------------------------
+// @name Layers
+//
+///@{
+
+
+///  Specifies the maximum number of layers supported by ovr_SubmitFrame.
+///
+///  /see ovr_SubmitFrame
+///
+enum {
+    ovrMaxLayerCount = 16
+};
+
+/// Describes layer types that can be passed to ovr_SubmitFrame.
+/// Each layer type has an associated struct, such as ovrLayerEyeFov.
+///
+/// \see ovrLayerHeader
+///
+typedef enum ovrLayerType_
+{
+    ovrLayerType_Disabled    = 0,         ///< Layer is disabled.
+    ovrLayerType_EyeFov      = 1,         ///< Described by ovrLayerEyeFov.
+    ovrLayerType_Quad        = 3,         ///< Described by ovrLayerQuad. Previously called ovrLayerType_QuadInWorld.
+    /// enum 4 used to be ovrLayerType_QuadHeadLocked. Instead, use ovrLayerType_Quad with ovrLayerFlag_HeadLocked.
+    ovrLayerType_EyeMatrix   = 5,         ///< Described by ovrLayerEyeMatrix.
+    ovrLayerType_EnumSize    = 0x7fffffff ///< Force type int32_t.
+} ovrLayerType;
+
+
+/// Identifies flags used by ovrLayerHeader and which are passed to ovr_SubmitFrame.
+///
+/// \see ovrLayerHeader
+///
+typedef enum ovrLayerFlags_
+{
+    /// ovrLayerFlag_HighQuality enables 4x anisotropic sampling during the composition of the layer.
+    /// The benefits are mostly visible at the periphery for high-frequency & high-contrast visuals.
+    /// For best results consider combining this flag with an ovrTextureSwapChain that has mipmaps and
+    /// instead of using arbitrary sized textures, prefer texture sizes that are powers-of-two.
+    /// Actual rendered viewport and doesn't necessarily have to fill the whole texture.
+    ovrLayerFlag_HighQuality               = 0x01,
+
+    /// ovrLayerFlag_TextureOriginAtBottomLeft: the opposite is TopLeft.
+    /// Generally this is false for D3D, true for OpenGL.
+    ovrLayerFlag_TextureOriginAtBottomLeft = 0x02,
+
+    /// Mark this surface as "headlocked", which means it is specified
+    /// relative to the HMD and moves with it, rather than being specified
+    /// relative to sensor/torso space and remaining still while the head moves.
+    /// What used to be ovrLayerType_QuadHeadLocked is now ovrLayerType_Quad plus this flag.
+    /// However the flag can be applied to any layer type to achieve a similar effect.
+    ovrLayerFlag_HeadLocked                = 0x04
+
+} ovrLayerFlags;
+
+
+/// Defines properties shared by all ovrLayer structs, such as ovrLayerEyeFov.
+///
+/// ovrLayerHeader is used as a base member in these larger structs.
+/// This struct cannot be used by itself except for the case that Type is ovrLayerType_Disabled.
+///
+/// \see ovrLayerType, ovrLayerFlags
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerHeader_
+{
+    ovrLayerType    Type;   ///< Described by ovrLayerType.
+    unsigned        Flags;  ///< Described by ovrLayerFlags.
+} ovrLayerHeader;
+
+
+/// Describes a layer that specifies a monoscopic or stereoscopic view.
+/// This is the kind of layer that's typically used as layer 0 to ovr_SubmitFrame,
+/// as it is the kind of layer used to render a 3D stereoscopic view.
+///
+/// Three options exist with respect to mono/stereo texture usage:
+///    - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, respectively.
+///      Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.
+///    - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,
+///      and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].
+///    - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and
+///      Viewport[1] both refer to that rendering.
+///
+/// \see ovrTextureSwapChain, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeFov_
+{
+    /// Header.Type must be ovrLayerType_EyeFov.
+    ovrLayerHeader      Header;
+
+    /// ovrTextureSwapChains for the left and right eye respectively.
+    /// The second one of which can be NULL for cases described above.
+    ovrTextureSwapChain  ColorTexture[ovrEye_Count];
+
+    /// Specifies the ColorTexture sub-rect UV coordinates.
+    /// Both Viewport[0] and Viewport[1] must be valid.
+    ovrRecti            Viewport[ovrEye_Count];
+
+    /// The viewport field of view.
+    ovrFovPort          Fov[ovrEye_Count];
+
+    /// Specifies the position and orientation of each eye view, with the position specified in meters.
+    /// RenderPose will typically be the value returned from ovr_CalcEyePoses,
+    /// but can be different in special cases if a different head pose is used for rendering.
+    ovrPosef            RenderPose[ovrEye_Count];
+
+    /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)
+    /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds
+    /// around the instant the application calls ovr_GetTrackingState
+    /// The main purpose for this is to accurately track app tracking latency.
+    double              SensorSampleTime;
+
+} ovrLayerEyeFov;
+
+
+
+
+/// Describes a layer that specifies a monoscopic or stereoscopic view.
+/// This uses a direct 3x4 matrix to map from view space to the UV coordinates.
+/// It is essentially the same thing as ovrLayerEyeFov but using a much
+/// lower level. This is mainly to provide compatibility with specific apps.
+/// Unless the application really requires this flexibility, it is usually better
+/// to use ovrLayerEyeFov.
+///
+/// Three options exist with respect to mono/stereo texture usage:
+///    - ColorTexture[0] and ColorTexture[1] contain the left and right stereo renderings, respectively.
+///      Viewport[0] and Viewport[1] refer to ColorTexture[0] and ColorTexture[1], respectively.
+///    - ColorTexture[0] contains both the left and right renderings, ColorTexture[1] is NULL,
+///      and Viewport[0] and Viewport[1] refer to sub-rects with ColorTexture[0].
+///    - ColorTexture[0] contains a single monoscopic rendering, and Viewport[0] and
+///      Viewport[1] both refer to that rendering.
+///
+/// \see ovrTextureSwapChain, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerEyeMatrix_
+{
+    /// Header.Type must be ovrLayerType_EyeMatrix.
+    ovrLayerHeader      Header;
+
+    /// ovrTextureSwapChains for the left and right eye respectively.
+    /// The second one of which can be NULL for cases described above.
+    ovrTextureSwapChain  ColorTexture[ovrEye_Count];
+
+    /// Specifies the ColorTexture sub-rect UV coordinates.
+    /// Both Viewport[0] and Viewport[1] must be valid.
+    ovrRecti            Viewport[ovrEye_Count];
+
+    /// Specifies the position and orientation of each eye view, with the position specified in meters.
+    /// RenderPose will typically be the value returned from ovr_CalcEyePoses,
+    /// but can be different in special cases if a different head pose is used for rendering.
+    ovrPosef            RenderPose[ovrEye_Count];
+
+    /// Specifies the mapping from a view-space vector
+    /// to a UV coordinate on the textures given above.
+    /// P = (x,y,z,1)*Matrix
+    /// TexU  = P.x/P.z
+    /// TexV  = P.y/P.z
+    ovrMatrix4f         Matrix[ovrEye_Count];
+
+    /// Specifies the timestamp when the source ovrPosef (used in calculating RenderPose)
+    /// was sampled from the SDK. Typically retrieved by calling ovr_GetTimeInSeconds
+    /// around the instant the application calls ovr_GetTrackingState
+    /// The main purpose for this is to accurately track app tracking latency.
+    double              SensorSampleTime;
+
+} ovrLayerEyeMatrix;
+
+
+
+
+
+/// Describes a layer of Quad type, which is a single quad in world or viewer space.
+/// It is used for ovrLayerType_Quad. This type of layer represents a single
+/// object placed in the world and not a stereo view of the world itself.
+///
+/// A typical use of ovrLayerType_Quad is to draw a television screen in a room
+/// that for some reason is more convenient to draw as a layer than as part of the main
+/// view in layer 0. For example, it could implement a 3D popup GUI that is drawn at a
+/// higher resolution than layer 0 to improve fidelity of the GUI.
+///
+/// Quad layers are visible from both sides; they are not back-face culled.
+///
+/// \see ovrTextureSwapChain, ovr_SubmitFrame
+///
+typedef struct OVR_ALIGNAS(OVR_PTR_SIZE) ovrLayerQuad_
+{
+    /// Header.Type must be ovrLayerType_Quad.
+    ovrLayerHeader      Header;
+
+    /// Contains a single image, never with any stereo view.
+    ovrTextureSwapChain  ColorTexture;
+
+    /// Specifies the ColorTexture sub-rect UV coordinates.
+    ovrRecti            Viewport;
+
+    /// Specifies the orientation and position of the center point of a Quad layer type.
+    /// The supplied direction is the vector perpendicular to the quad.
+    /// The position is in real-world meters (not the application's virtual world,
+    /// the physical world the user is in) and is relative to the "zero" position
+    /// set by ovr_RecenterTrackingOrigin unless the ovrLayerFlag_HeadLocked flag is used.
+    ovrPosef            QuadPoseCenter;
+
+    /// Width and height (respectively) of the quad in meters.
+    ovrVector2f         QuadSize;
+
+} ovrLayerQuad;
+
+
+
+
+/// Union that combines ovrLayer types in a way that allows them
+/// to be used in a polymorphic way.
+typedef union ovrLayer_Union_
+{
+    ovrLayerHeader      Header;
+    ovrLayerEyeFov      EyeFov;
+    ovrLayerQuad        Quad;
+} ovrLayer_Union;
+
+
+//@}
+
+
+
+/// @name SDK Distortion Rendering
+///
+/// All of rendering functions including the configure and frame functions
+/// are not thread safe. It is OK to use ConfigureRendering on one thread and handle
+/// frames on another thread, but explicit synchronization must be done since
+/// functions that depend on configured state are not reentrant.
+///
+/// These functions support rendering of distortion by the SDK.
+///
+//@{
+
+/// TextureSwapChain creation is rendering API-specific.
+/// ovr_CreateTextureSwapChainDX and ovr_CreateTextureSwapChainGL can be found in the
+/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h
+
+/// Gets the number of buffers in an ovrTextureSwapChain.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain for which the length should be retrieved.
+/// \param[out] out_Length Returns the number of buffers in the specified chain.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainLength(ovrSession session, ovrTextureSwapChain chain, int* out_Length);
+
+/// Gets the current index in an ovrTextureSwapChain.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain for which the index should be retrieved.
+/// \param[out] out_Index Returns the current (free) index in specified chain.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainCurrentIndex(ovrSession session, ovrTextureSwapChain chain, int* out_Index);
+
+/// Gets the description of the buffers in an ovrTextureSwapChain
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain for which the description should be retrieved.
+/// \param[out] out_Desc Returns the description of the specified chain.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainDesc(ovrSession session, ovrTextureSwapChain chain, ovrTextureSwapChainDesc* out_Desc);
+
+/// Commits any pending changes to an ovrTextureSwapChain, and advances its current index
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies the ovrTextureSwapChain to commit.
+///
+/// \note When Commit is called, the texture at the current index is considered ready for use by the
+/// runtime, and further writes to it should be avoided. The swap chain's current index is advanced,
+/// providing there's room in the chain. The next time the SDK dereferences this texture swap chain,
+/// it will synchronize with the app's graphics context and pick up the submitted index, opening up
+/// room in the swap chain for further commits.
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error. 
+///         Failures include but aren't limited to:
+///     - ovrError_TextureSwapChainFull: ovr_CommitTextureSwapChain was called too many times on a texture swapchain without calling submit to use the chain.
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CommitTextureSwapChain(ovrSession session, ovrTextureSwapChain chain);
+
+/// Destroys an ovrTextureSwapChain and frees all the resources associated with it.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] chain Specifies the ovrTextureSwapChain to destroy. If it is NULL then this function has no effect.
+///
+/// \see ovr_CreateTextureSwapChainDX, ovr_CreateTextureSwapChainGL
+///
+OVR_PUBLIC_FUNCTION(void) ovr_DestroyTextureSwapChain(ovrSession session, ovrTextureSwapChain chain);
+
+
+/// MirrorTexture creation is rendering API-specific.
+/// ovr_CreateMirrorTextureDX and ovr_CreateMirrorTextureGL can be found in the
+/// rendering API-specific headers, such as OVR_CAPI_D3D.h and OVR_CAPI_GL.h
+
+/// Destroys a mirror texture previously created by one of the mirror texture creation functions.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] mirrorTexture Specifies the ovrTexture to destroy. If it is NULL then this function has no effect.
+///
+/// \see ovr_CreateMirrorTextureDX, ovr_CreateMirrorTextureGL
+///
+OVR_PUBLIC_FUNCTION(void) ovr_DestroyMirrorTexture(ovrSession session, ovrMirrorTexture mirrorTexture);
+
+
+/// Calculates the recommended viewport size for rendering a given eye within the HMD
+/// with a given FOV cone.
+///
+/// Higher FOV will generally require larger textures to maintain quality.
+/// Apps packing multiple eye views together on the same texture should ensure there are
+/// at least 8 pixels of padding between them to prevent texture filtering and chromatic
+/// aberration causing images to leak between the two eye views.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] eye Specifies which eye (left or right) to calculate for.
+/// \param[in] fov Specifies the ovrFovPort to use.
+/// \param[in] pixelsPerDisplayPixel Specifies the ratio of the number of render target pixels
+///            to display pixels at the center of distortion. 1.0 is the default value. Lower
+///            values can improve performance, higher values give improved quality.
+/// \return Returns the texture width and height size.
+///
+OVR_PUBLIC_FUNCTION(ovrSizei) ovr_GetFovTextureSize(ovrSession session, ovrEyeType eye, ovrFovPort fov,
+                                                       float pixelsPerDisplayPixel);
+
+/// Computes the distortion viewport, view adjust, and other rendering parameters for
+/// the specified eye.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] eyeType Specifies which eye (left or right) for which to perform calculations.
+/// \param[in] fov Specifies the ovrFovPort to use.
+///
+/// \return Returns the computed ovrEyeRenderDesc for the given eyeType and field of view.
+///
+/// \see ovrEyeRenderDesc
+///
+OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc) ovr_GetRenderDesc(ovrSession session,
+                                                           ovrEyeType eyeType, ovrFovPort fov);
+
+/// Submits layers for distortion and display.
+///
+/// ovr_SubmitFrame triggers distortion and processing which might happen asynchronously.
+/// The function will return when there is room in the submission queue and surfaces
+/// are available. Distortion might or might not have completed.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+///
+/// \param[in] frameIndex Specifies the targeted application frame index, or 0 to refer to one frame
+///        after the last time ovr_SubmitFrame was called.
+///
+/// \param[in] viewScaleDesc Provides additional information needed only if layerPtrList contains
+///        an ovrLayerType_Quad. If NULL, a default version is used based on the current configuration and a 1.0 world scale.
+///
+/// \param[in] layerPtrList Specifies a list of ovrLayer pointers, which can include NULL entries to
+///        indicate that any previously shown layer at that index is to not be displayed.
+///        Each layer header must be a part of a layer structure such as ovrLayerEyeFov or ovrLayerQuad,
+///        with Header.Type identifying its type. A NULL layerPtrList entry in the array indicates the
+//         absence of the given layer.
+///
+/// \param[in] layerCount Indicates the number of valid elements in layerPtrList. The maximum
+///        supported layerCount is not currently specified, but may be specified in a future version.
+///
+/// - Layers are drawn in the order they are specified in the array, regardless of the layer type.
+///
+/// - Layers are not remembered between successive calls to ovr_SubmitFrame. A layer must be
+///   specified in every call to ovr_SubmitFrame or it won't be displayed.
+///
+/// - If a layerPtrList entry that was specified in a previous call to ovr_SubmitFrame is
+///   passed as NULL or is of type ovrLayerType_Disabled, that layer is no longer displayed.
+///
+/// - A layerPtrList entry can be of any layer type and multiple entries of the same layer type
+///   are allowed. No layerPtrList entry may be duplicated (i.e. the same pointer as an earlier entry).
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovrLayerEyeFov  layer0;
+///         ovrLayerQuad    layer1;
+///           ...
+///         ovrLayerHeader* layers[2] = { &layer0.Header, &layer1.Header };
+///         ovrResult result = ovr_SubmitFrame(hmd, frameIndex, nullptr, layers, 2);
+///     \endcode
+///
+/// \return Returns an ovrResult for which OVR_SUCCESS(result) is false upon error and true
+///         upon success. Return values include but aren't limited to:
+///     - ovrSuccess: rendering completed successfully.
+///     - ovrSuccess_NotVisible: rendering completed successfully but was not displayed on the HMD,
+///       usually because another application currently has ownership of the HMD. Applications receiving
+///       this result should stop rendering new content, but continue to call ovr_SubmitFrame periodically
+///       until it returns a value other than ovrSuccess_NotVisible.
+///     - ovrError_DisplayLost: The session has become invalid (such as due to a device removal)
+///       and the shared resources need to be released (ovr_DestroyTextureSwapChain), the session needs to
+///       destroyed (ovr_Destroy) and recreated (ovr_Create), and new resources need to be created
+///       (ovr_CreateTextureSwapChainXXX). The application's existing private graphics resources do not
+///       need to be recreated unless the new ovr_Create call returns a different GraphicsLuid.
+///     - ovrError_TextureSwapChainInvalid: The ovrTextureSwapChain is in an incomplete or inconsistent state. 
+///       Ensure ovr_CommitTextureSwapChain was called at least once first.
+///
+/// \see ovr_GetPredictedDisplayTime, ovrViewScaleDesc, ovrLayerHeader
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_SubmitFrame(ovrSession session, long long frameIndex,
+                                                  const ovrViewScaleDesc* viewScaleDesc,
+                                                  ovrLayerHeader const * const * layerPtrList, unsigned int layerCount);
+///@}
+
+
+
+//-------------------------------------------------------------------------------------
+/// @name Frame Timing
+///
+//@{
+
+
+/// Gets the time of the specified frame midpoint.
+///
+/// Predicts the time at which the given frame will be displayed. The predicted time 
+/// is the middle of the time period during which the corresponding eye images will 
+/// be displayed. 
+///
+/// The application should increment frameIndex for each successively targeted frame,
+/// and pass that index to any relevent OVR functions that need to apply to the frame
+/// identified by that index.
+///
+/// This function is thread-safe and allows for multiple application threads to target
+/// their processing to the same displayed frame.
+/// 
+/// In the even that prediction fails due to various reasons (e.g. the display being off
+/// or app has yet to present any frames), the return value will be current CPU time.
+/// 
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] frameIndex Identifies the frame the caller wishes to target.
+///            A value of zero returns the next frame index.
+/// \return Returns the absolute frame midpoint time for the given frameIndex. 
+/// \see ovr_GetTimeInSeconds
+///
+OVR_PUBLIC_FUNCTION(double) ovr_GetPredictedDisplayTime(ovrSession session, long long frameIndex);
+
+
+/// Returns global, absolute high-resolution time in seconds.
+///
+/// The time frame of reference for this function is not specified and should not be
+/// depended upon.
+///
+/// \return Returns seconds as a floating point value.
+/// \see ovrPoseStatef, ovrFrameTiming
+///
+OVR_PUBLIC_FUNCTION(double) ovr_GetTimeInSeconds();
+
+
+/// Performance HUD enables the HMD user to see information critical to
+/// the real-time operation of the VR application such as latency timing,
+/// and CPU & GPU performance metrics
+///
+///     App can toggle performance HUD modes as such:
+///     \code{.cpp}
+///         ovrPerfHudMode PerfHudMode = ovrPerfHud_LatencyTiming;
+///         ovr_SetInt(Hmd, OVR_PERF_HUD_MODE, (int)PerfHudMode);
+///     \endcode
+///
+typedef enum ovrPerfHudMode_
+{
+    ovrPerfHud_Off                = 0,  ///< Turns off the performance HUD
+    ovrPerfHud_PerfSummary        = 1,  ///< Shows performance summary and headroom
+    ovrPerfHud_LatencyTiming      = 2,  ///< Shows latency related timing info
+    ovrPerfHud_AppRenderTiming    = 3,  ///< Shows render timing info for application
+    ovrPerfHud_CompRenderTiming   = 4,  ///< Shows render timing info for OVR compositor
+    ovrPerfHud_VersionInfo        = 5,  ///< Shows SDK & HMD version Info
+    ovrPerfHud_Count              = 6,  ///< \internal Count of enumerated elements.
+    ovrPerfHud_EnumSize = 0x7fffffff    ///< \internal Force type int32_t.
+} ovrPerfHudMode;
+
+/// Layer HUD enables the HMD user to see information about a layer
+///
+///     App can toggle layer HUD modes as such:
+///     \code{.cpp}
+///         ovrLayerHudMode LayerHudMode = ovrLayerHud_Info;
+///         ovr_SetInt(Hmd, OVR_LAYER_HUD_MODE, (int)LayerHudMode);
+///     \endcode
+///
+typedef enum ovrLayerHudMode_
+{
+    ovrLayerHud_Off = 0, ///< Turns off the layer HUD
+    ovrLayerHud_Info = 1, ///< Shows info about a specific layer
+    ovrLayerHud_EnumSize = 0x7fffffff
+} ovrLayerHudMode;
+
+///@}
+
+/// Debug HUD is provided to help developers gauge and debug the fidelity of their app's
+/// stereo rendering characteristics. Using the provided quad and crosshair guides, 
+/// the developer can verify various aspects such as VR tracking units (e.g. meters),
+/// stereo camera-parallax properties (e.g. making sure objects at infinity are rendered
+/// with the proper separation), measuring VR geometry sizes and distances and more.
+///
+///     App can toggle the debug HUD modes as such:
+///     \code{.cpp}
+///         ovrDebugHudStereoMode DebugHudMode = ovrDebugHudStereo_QuadWithCrosshair;
+///         ovr_SetInt(Hmd, OVR_DEBUG_HUD_STEREO_MODE, (int)DebugHudMode);
+///     \endcode
+///
+/// The app can modify the visual properties of the stereo guide (i.e. quad, crosshair)
+/// using the ovr_SetFloatArray function. For a list of tweakable properties,
+/// see the OVR_DEBUG_HUD_STEREO_GUIDE_* keys in the OVR_CAPI_Keys.h header file.
+typedef enum ovrDebugHudStereoMode_
+{
+    ovrDebugHudStereo_Off                 = 0,  ///< Turns off the Stereo Debug HUD
+    ovrDebugHudStereo_Quad                = 1,  ///< Renders Quad in world for Stereo Debugging
+    ovrDebugHudStereo_QuadWithCrosshair   = 2,  ///< Renders Quad+crosshair in world for Stereo Debugging
+    ovrDebugHudStereo_CrosshairAtInfinity = 3,  ///< Renders screen-space crosshair at infinity for Stereo Debugging
+    ovrDebugHudStereo_Count,                    ///< \internal Count of enumerated elements
+
+    ovrDebugHudStereo_EnumSize = 0x7fffffff     ///< \internal Force type int32_t
+} ovrDebugHudStereoMode;
+
+
+
+
+// -----------------------------------------------------------------------------------
+/// @name Property Access
+///
+/// These functions read and write OVR properties. Supported properties
+/// are defined in OVR_CAPI_Keys.h
+///
+//@{
+
+/// Reads a boolean property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid for only the call.
+/// \param[in] defaultVal specifes the value to return if the property couldn't be read.
+/// \return Returns the property interpreted as a boolean value. Returns defaultVal if
+///         the property doesn't exist.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_GetBool(ovrSession session, const char* propertyName, ovrBool defaultVal);
+
+/// Writes or creates a boolean property.
+/// If the property wasn't previously a boolean property, it is changed to a boolean property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The value to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetBool(ovrSession session, const char* propertyName, ovrBool value);
+
+
+/// Reads an integer property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] defaultVal Specifes the value to return if the property couldn't be read.
+/// \return Returns the property interpreted as an integer value. Returns defaultVal if
+///         the property doesn't exist.
+OVR_PUBLIC_FUNCTION(int) ovr_GetInt(ovrSession session, const char* propertyName, int defaultVal);
+
+/// Writes or creates an integer property.
+///
+/// If the property wasn't previously a boolean property, it is changed to an integer property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The value to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetInt(ovrSession session, const char* propertyName, int value);
+
+
+/// Reads a float property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] defaultVal specifes the value to return if the property couldn't be read.
+/// \return Returns the property interpreted as an float value. Returns defaultVal if
+///         the property doesn't exist.
+OVR_PUBLIC_FUNCTION(float) ovr_GetFloat(ovrSession session, const char* propertyName, float defaultVal);
+
+/// Writes or creates a float property.
+/// If the property wasn't previously a float property, it's changed to a float property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The value to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetFloat(ovrSession session, const char* propertyName, float value);
+
+
+/// Reads a float array property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] values An array of float to write to.
+/// \param[in] valuesCapacity Specifies the maximum number of elements to write to the values array.
+/// \return Returns the number of elements read, or 0 if property doesn't exist or is empty.
+OVR_PUBLIC_FUNCTION(unsigned int) ovr_GetFloatArray(ovrSession session, const char* propertyName,
+                                                       float values[], unsigned int valuesCapacity);
+
+/// Writes or creates a float array property.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] values An array of float to write from.
+/// \param[in] valuesSize Specifies the number of elements to write.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetFloatArray(ovrSession session, const char* propertyName,
+                                                  const float values[], unsigned int valuesSize);
+
+
+/// Reads a string property.
+/// Strings are UTF8-encoded and null-terminated.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] defaultVal Specifes the value to return if the property couldn't be read.
+/// \return Returns the string property if it exists. Otherwise returns defaultVal, which can be specified as NULL.
+///         The return memory is guaranteed to be valid until next call to ovr_GetString or
+///         until the HMD is destroyed, whichever occurs first.
+OVR_PUBLIC_FUNCTION(const char*) ovr_GetString(ovrSession session, const char* propertyName,
+                                                  const char* defaultVal);
+
+/// Writes or creates a string property.
+/// Strings are UTF8-encoded and null-terminated.
+///
+/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in] propertyName The name of the property, which needs to be valid only for the call.
+/// \param[in] value The string property, which only needs to be valid for the duration of the call.
+/// \return Returns true if successful, otherwise false. A false result should only occur if the property
+///         name is empty or if the property is read-only.
+OVR_PUBLIC_FUNCTION(ovrBool) ovr_SetString(ovrSession session, const char* propertyName,
+                                              const char* value);
+
+///@}
+
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+
+#if defined(_MSC_VER)
+    #pragma warning(pop)
+#endif
+
+/// @cond DoxygenIgnore
+//-----------------------------------------------------------------------------
+// ***** Compiler packing validation
+//
+// These checks ensure that the compiler settings being used will be compatible
+// with with pre-built dynamic library provided with the runtime.
+
+OVR_STATIC_ASSERT(sizeof(ovrBool) == 1,         "ovrBool size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrVector2i) == 4 * 2, "ovrVector2i size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrSizei) == 4 * 2,    "ovrSizei size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrRecti) == sizeof(ovrVector2i) + sizeof(ovrSizei), "ovrRecti size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrQuatf) == 4 * 4,    "ovrQuatf size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrVector2f) == 4 * 2, "ovrVector2f size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrVector3f) == 4 * 3, "ovrVector3f size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrMatrix4f) == 4 * 16, "ovrMatrix4f size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrPosef) == (7 * 4),       "ovrPosef size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrPoseStatef) == (22 * 4), "ovrPoseStatef size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrFovPort) == (4 * 4),     "ovrFovPort size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrHmdCaps) == 4,      "ovrHmdCaps size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTrackingCaps) == 4, "ovrTrackingCaps size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrEyeType) == 4,      "ovrEyeType size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrHmdType) == 4,      "ovrHmdType size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrTrackerDesc) == 4 + 4 + 4 + 4, "ovrTrackerDesc size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTrackerPose) == 4 + 4 + sizeof(ovrPosef) + sizeof(ovrPosef), "ovrTrackerPose size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTrackingState) == sizeof(ovrPoseStatef) + 4 + 4 + (sizeof(ovrPoseStatef) * 2) + (sizeof(unsigned int) * 2) + sizeof(ovrPosef) + 4, "ovrTrackingState size mismatch");
+
+
+//OVR_STATIC_ASSERT(sizeof(ovrTextureHeader) == sizeof(ovrRenderAPIType) + sizeof(ovrSizei),
+//                      "ovrTextureHeader size mismatch");
+//OVR_STATIC_ASSERT(sizeof(ovrTexture) == sizeof(ovrTextureHeader) OVR_ON64(+4) + sizeof(uintptr_t) * 8,
+//                      "ovrTexture size mismatch");
+//
+OVR_STATIC_ASSERT(sizeof(ovrStatusBits) == 4, "ovrStatusBits size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrSessionStatus) == 6, "ovrSessionStatus size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrEyeRenderDesc) == sizeof(ovrEyeType) + sizeof(ovrFovPort) + sizeof(ovrRecti) +
+                                                  sizeof(ovrVector2f) + sizeof(ovrVector3f),
+                      "ovrEyeRenderDesc size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrTimewarpProjectionDesc) == 4 * 3, "ovrTimewarpProjectionDesc size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrInitFlags) == 4, "ovrInitFlags size mismatch");
+OVR_STATIC_ASSERT(sizeof(ovrLogLevel) == 4, "ovrLogLevel size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrInitParams) == 4 + 4 + sizeof(ovrLogCallback) + sizeof(uintptr_t) + 4 + 4,
+                      "ovrInitParams size mismatch");
+
+OVR_STATIC_ASSERT(sizeof(ovrHmdDesc) == 
+    + sizeof(ovrHmdType)                // Type
+    OVR_ON64(+ 4)                       // pad0
+    + 64                                // ProductName 
+    + 64                                // Manufacturer
+    + 2                                 // VendorId
+    + 2                                 // ProductId
+    + 24                                // SerialNumber
+    + 2                                 // FirmwareMajor
+    + 2                                 // FirmwareMinor
+    + 4 * 4                             // AvailableHmdCaps - DefaultTrackingCaps
+    + sizeof(ovrFovPort) * 2            // DefaultEyeFov
+    + sizeof(ovrFovPort) * 2            // MaxEyeFov
+    + sizeof(ovrSizei)                  // Resolution
+    + 4                                 // DisplayRefreshRate
+    OVR_ON64(+ 4)                       // pad1
+    , "ovrHmdDesc size mismatch");
+
+
+// -----------------------------------------------------------------------------------
+// ***** Backward compatibility #includes
+//
+// This is at the bottom of this file because the following is dependent on the
+// declarations above.
+
+#if !defined(OVR_CAPI_NO_UTILS)
+    #include "Extras/OVR_CAPI_Util.h"
+#endif
+
+/// @endcond
+
+#endif // OVR_CAPI_h

+ 76 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_CAPI_Audio.h

@@ -0,0 +1,76 @@
+/********************************************************************************//**
+\file      OVR_CAPI_Audio.h
+\brief     CAPI audio functions.
+\copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.
+************************************************************************************/
+
+
+#ifndef OVR_CAPI_Audio_h
+#define OVR_CAPI_Audio_h
+
+#ifdef _WIN32
+#include <windows.h>
+#include "OVR_CAPI.h"
+#define OVR_AUDIO_MAX_DEVICE_STR_SIZE 128
+
+/// Gets the ID of the preferred VR audio output device.
+///
+/// \param[out] deviceOutId The ID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be WAVE_MAPPER.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(UINT* deviceOutId);
+
+/// Gets the ID of the preferred VR audio input device.
+///
+/// \param[out] deviceInId The ID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be WAVE_MAPPER.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(UINT* deviceInId);
+
+
+/// Gets the GUID of the preferred VR audio device as a string.
+///
+/// \param[out] deviceOutStrBuffer A buffer where the GUID string for the device will copied to.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuidStr(WCHAR deviceOutStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);
+
+
+/// Gets the GUID of the preferred VR audio device.
+///
+/// \param[out] deviceOutGuid The GUID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be NULL.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid);
+
+
+/// Gets the GUID of the preferred VR microphone device as a string.
+///
+/// \param[out] deviceInStrBuffer A buffer where the GUID string for the device will copied to.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuidStr(WCHAR deviceInStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);
+
+
+/// Gets the GUID of the preferred VR microphone device.
+///
+/// \param[out] deviceInGuid The GUID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be NULL.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid);
+
+#endif //OVR_OS_MS
+
+#endif    // OVR_CAPI_Audio_h

+ 131 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_CAPI_D3D.h

@@ -0,0 +1,131 @@
+/********************************************************************************//**
+\file      OVR_CAPI_D3D.h
+\brief     D3D specific structures used by the CAPI interface.
+\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_D3D_h
+#define OVR_CAPI_D3D_h
+
+#include "OVR_CAPI.h"
+#include "OVR_Version.h"
+
+
+#if defined (_WIN32)
+#include <Unknwn.h>
+
+//-----------------------------------------------------------------------------------
+// ***** Direct3D Specific
+
+/// Create Texture Swap Chain suitable for use with Direct3D 11 and 12.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  d3dPtr Specifies the application's D3D11Device to create resources with or the D3D12CommandQueue
+///             which must be the same one the application renders to the eye textures with.
+/// \param[in]  desc Specifies requested texture properties. See notes for more info about texture format.
+/// \param[in]  bindFlags Specifies what ovrTextureBindFlags the application requires for this texture chain.
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid upon a successful return value, else it will be NULL.
+///             This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The texture format provided in \a desc should be thought of as the format the distortion-compositor will use for the
+/// ShaderResourceView when reading the contents of the texture. To that end, it is highly recommended that the application
+/// requests texture swapchain formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor
+/// does sRGB-correct rendering. As such, the compositor relies on the GPU's hardware sampler to do the sRGB-to-linear
+/// conversion. If the application still prefers to render to a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) while handling the
+/// linear-to-gamma conversion via HLSL code, then the application must still request the corresponding sRGB format and also use
+/// the \a ovrTextureMisc_DX_Typeless flag in the ovrTextureSwapChainDesc's Flag field. This will allow the application to create
+/// a RenderTargetView that is the desired linear format while the compositor continues to treat it as sRGB. Failure to do so
+/// will cause the compositor to apply unexpected gamma conversions leading to gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless
+/// flag for depth buffer formats (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always converted to be typeless.
+///
+/// \see ovr_GetTextureSwapChainLength
+/// \see ovr_GetTextureSwapChainCurrentIndex
+/// \see ovr_GetTextureSwapChainDesc
+/// \see ovr_GetTextureSwapChainBufferDX
+/// \see ovr_DestroyTextureSwapChain
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateTextureSwapChainDX(ovrSession session,
+                                                            IUnknown* d3dPtr,
+                                                            const ovrTextureSwapChainDesc* desc,
+                                                            ovrTextureSwapChain* out_TextureSwapChain);
+
+
+/// Get a specific buffer within the chain as any compatible COM interface (similar to QueryInterface)
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies an ovrTextureSwapChain previously returned by ovr_CreateTextureSwapChainDX
+/// \param[in]  index Specifies the index within the chain to retrieve. Must be between 0 and length (see ovr_GetTextureSwapChainLength),
+///             or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to GetTextureSwapChainCurrentIndex)
+/// \param[in]  iid Specifies the interface ID of the interface pointer to query the buffer for.
+/// \param[out] out_Buffer Returns the COM interface pointer retrieved.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// <b>Example code</b>
+///     \code{.cpp}
+///         ovr_GetTextureSwapChainBuffer(session, chain, 0, IID_ID3D11Texture2D, &d3d11Texture);
+///         ovr_GetTextureSwapChainBuffer(session, chain, 1, IID_PPV_ARGS(&dxgiResource));
+///     \endcode
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainBufferDX(ovrSession session,
+                                                               ovrTextureSwapChain chain,
+                                                               int index,
+                                                               IID iid,
+                                                               void** out_Buffer);
+
+
+/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this application.
+///
+/// A second call to ovr_CreateMirrorTextureDX for a given ovrSession before destroying the first one
+/// is not supported and will result in an error return.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  d3dPtr Specifies the application's D3D11Device to create resources with or the D3D12CommandQueue
+///             which must be the same one the application renders to the textures with.
+/// \param[in]  desc Specifies requested texture properties. See notes for more info about texture format.
+/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be valid upon a successful return value, else it will be NULL.
+///             This texture must be eventually destroyed via ovr_DestroyMirrorTexture before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The texture format provided in \a desc should be thought of as the format the compositor will use for the RenderTargetView when
+/// writing into mirror texture. To that end, it is highly recommended that the application requests a mirror texture format that is
+/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct rendering. If however the application wants
+/// to still read the mirror texture as a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion in
+/// HLSL code, then it is recommended the application still requests an sRGB format and also use the \a ovrTextureMisc_DX_Typeless flag in the
+/// ovrMirrorTextureDesc's Flags field. This will allow the application to bind a ShaderResourceView that is a linear format while the
+/// compositor continues to treat is as sRGB. Failure to do so will cause the compositor to apply unexpected gamma conversions leading to 
+/// gamma-curve artifacts.
+///
+/// \see ovr_GetMirrorTextureBufferDX
+/// \see ovr_DestroyMirrorTexture
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateMirrorTextureDX(ovrSession session,
+                                                         IUnknown* d3dPtr,
+                                                         const ovrMirrorTextureDesc* desc,
+                                                         ovrMirrorTexture* out_MirrorTexture);
+
+/// Get a the underlying buffer as any compatible COM interface (similar to QueryInterface) 
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  mirrorTexture Specifies an ovrMirrorTexture previously returned by ovr_CreateMirrorTextureDX
+/// \param[in]  iid Specifies the interface ID of the interface pointer to query the buffer for.
+/// \param[out] out_Buffer Returns the COM interface pointer retrieved.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetMirrorTextureBufferDX(ovrSession session,
+                                                            ovrMirrorTexture mirrorTexture,
+                                                            IID iid,
+                                                            void** out_Buffer);
+
+
+#endif // _WIN32
+
+#endif    // OVR_CAPI_D3D_h

+ 99 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_CAPI_GL.h

@@ -0,0 +1,99 @@
+/********************************************************************************//**
+\file      OVR_CAPI_GL.h
+\brief     OpenGL-specific structures used by the CAPI interface.
+\copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_GL_h
+#define OVR_CAPI_GL_h
+
+#include "OVR_CAPI.h"
+
+/// Creates a TextureSwapChain suitable for use with OpenGL.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  desc Specifies the requested texture properties. See notes for more info about texture format.
+/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid upon
+///             a successful return value, else it will be NULL. This texture swap chain must be eventually
+///             destroyed via ovr_DestroyTextureSwapChain before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The \a format provided should be thought of as the format the distortion compositor will use when reading
+/// the contents of the texture. To that end, it is highly recommended that the application requests texture swap chain
+/// formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the distortion compositor does sRGB-correct
+/// rendering. Furthermore, the app should then make sure "glEnable(GL_FRAMEBUFFER_SRGB);" is called before rendering
+/// into these textures. Even though it is not recommended, if the application would like to treat the texture as a linear
+/// format and do linear-to-gamma conversion in GLSL, then the application can avoid calling "glEnable(GL_FRAMEBUFFER_SRGB);",
+/// but should still pass in an sRGB variant for the \a format. Failure to do so will cause the distortion compositor
+/// to apply incorrect gamma conversions leading to gamma-curve artifacts.
+///
+/// \see ovr_GetTextureSwapChainLength
+/// \see ovr_GetTextureSwapChainCurrentIndex
+/// \see ovr_GetTextureSwapChainDesc
+/// \see ovr_GetTextureSwapChainBufferGL
+/// \see ovr_DestroyTextureSwapChain
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateTextureSwapChainGL(ovrSession session,
+                                                            const ovrTextureSwapChainDesc* desc,
+                                                            ovrTextureSwapChain* out_TextureSwapChain);
+
+/// Get a specific buffer within the chain as a GL texture name
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  chain Specifies an ovrTextureSwapChain previously returned by ovr_CreateTextureSwapChainGL
+/// \param[in]  index Specifies the index within the chain to retrieve. Must be between 0 and length (see ovr_GetTextureSwapChainLength)
+///             or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to GetTextureSwapChainCurrentIndex)
+/// \param[out] out_TexId Returns the GL texture object name associated with the specific index requested
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainBufferGL(ovrSession session,
+                                                               ovrTextureSwapChain chain,
+                                                               int index,
+                                                               unsigned int* out_TexId);
+
+
+/// Creates a Mirror Texture which is auto-refreshed to mirror Rift contents produced by this application.
+///
+/// A second call to ovr_CreateMirrorTextureGL for a given ovrSession before destroying the first one
+/// is not supported and will result in an error return.
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  desc Specifies the requested mirror texture description.
+/// \param[out] out_MirrorTexture Specifies the created ovrMirrorTexture, which will be valid upon a successful return value, else it will be NULL.
+///             This texture must be eventually destroyed via ovr_DestroyMirrorTexture before destroying the HMD with ovr_Destroy.
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+/// \note The \a format provided should be thought of as the format the distortion compositor will use when writing into the mirror
+/// texture. It is highly recommended that mirror textures are requested as sRGB formats because the distortion compositor
+/// does sRGB-correct rendering. If the application requests a non-sRGB format (e.g. R8G8B8A8_UNORM) as the mirror texture,
+/// then the application might have to apply a manual linear-to-gamma conversion when reading from the mirror texture.
+/// Failure to do so can result in incorrect gamma conversions leading to gamma-curve artifacts and color banding.
+///
+/// \see ovr_GetMirrorTextureBufferGL
+/// \see ovr_DestroyMirrorTexture
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateMirrorTextureGL(ovrSession session,
+                                                         const ovrMirrorTextureDesc* desc,
+                                                         ovrMirrorTexture* out_MirrorTexture);
+
+/// Get a the underlying buffer as a GL texture name
+///
+/// \param[in]  session Specifies an ovrSession previously returned by ovr_Create.
+/// \param[in]  mirrorTexture Specifies an ovrMirrorTexture previously returned by ovr_CreateMirrorTextureGL
+/// \param[out] out_TexId Specifies the GL texture object name associated with the mirror texture
+///
+/// \return Returns an ovrResult indicating success or failure. In the case of failure, use 
+///         ovr_GetLastErrorInfo to get more information.
+///
+OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetMirrorTextureBufferGL(ovrSession session,
+                                                            ovrMirrorTexture mirrorTexture,
+                                                            unsigned int* out_TexId);
+
+
+#endif    // OVR_CAPI_GL_h

+ 53 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_CAPI_Keys.h

@@ -0,0 +1,53 @@
+/********************************************************************************//**
+\file      OVR_CAPI.h
+\brief     Keys for CAPI proprty function calls
+\copyright Copyright 2015 Oculus VR, LLC All Rights reserved.
+************************************************************************************/
+
+#ifndef OVR_CAPI_Keys_h
+#define OVR_CAPI_Keys_h
+
+#include "OVR_Version.h"
+
+
+
+#define OVR_KEY_USER                        "User"                // string
+
+#define OVR_KEY_NAME                        "Name"                // string
+
+#define OVR_KEY_GENDER                      "Gender"              // string "Male", "Female", or "Unknown"
+#define OVR_DEFAULT_GENDER                  "Unknown"
+
+#define OVR_KEY_PLAYER_HEIGHT               "PlayerHeight"        // float meters
+#define OVR_DEFAULT_PLAYER_HEIGHT           1.778f
+
+#define OVR_KEY_EYE_HEIGHT                  "EyeHeight"           // float meters
+#define OVR_DEFAULT_EYE_HEIGHT              1.675f
+
+#define OVR_KEY_NECK_TO_EYE_DISTANCE        "NeckEyeDistance"     // float[2] meters
+#define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL  0.0805f
+#define OVR_DEFAULT_NECK_TO_EYE_VERTICAL    0.075f
+
+
+#define OVR_KEY_EYE_TO_NOSE_DISTANCE        "EyeToNoseDist"       // float[2] meters
+
+
+
+
+
+#define OVR_PERF_HUD_MODE                       "PerfHudMode"                       // int, allowed values are defined in enum ovrPerfHudMode
+
+#define OVR_LAYER_HUD_MODE                      "LayerHudMode"                      // int, allowed values are defined in enum ovrLayerHudMode
+#define OVR_LAYER_HUD_CURRENT_LAYER             "LayerHudCurrentLayer"              // int, The layer to show 
+#define OVR_LAYER_HUD_SHOW_ALL_LAYERS           "LayerHudShowAll"                   // bool, Hide other layers when the hud is enabled
+
+#define OVR_DEBUG_HUD_STEREO_MODE               "DebugHudStereoMode"                // int, allowed values are defined in enum ovrDebugHudStereoMode
+#define OVR_DEBUG_HUD_STEREO_GUIDE_INFO_ENABLE  "DebugHudStereoGuideInfoEnable"     // bool
+#define OVR_DEBUG_HUD_STEREO_GUIDE_SIZE         "DebugHudStereoGuideSize2f"         // float[2]
+#define OVR_DEBUG_HUD_STEREO_GUIDE_POSITION     "DebugHudStereoGuidePosition3f"     // float[3]
+#define OVR_DEBUG_HUD_STEREO_GUIDE_YAWPITCHROLL "DebugHudStereoGuideYawPitchRoll3f" // float[3]
+#define OVR_DEBUG_HUD_STEREO_GUIDE_COLOR        "DebugHudStereoGuideColor4f"        // float[4]
+
+
+
+#endif // OVR_CAPI_Keys_h

+ 191 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_ErrorCode.h

@@ -0,0 +1,191 @@
+/********************************************************************************//**
+\file  OVR_ErrorCode.h
+\brief     This header provides LibOVR error code declarations.
+\copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_ErrorCode_h
+#define OVR_ErrorCode_h
+
+
+#include "OVR_Version.h"
+#include <stdint.h>
+
+
+
+
+
+
+
+#ifndef OVR_RESULT_DEFINED
+#define OVR_RESULT_DEFINED ///< Allows ovrResult to be independently defined.
+/// API call results are represented at the highest level by a single ovrResult.
+typedef int32_t ovrResult;
+#endif
+
+
+/// \brief Indicates if an ovrResult indicates success.
+///
+/// Some functions return additional successful values other than ovrSucces and
+/// require usage of this macro to indicate successs.
+///
+#if !defined(OVR_SUCCESS)
+    #define OVR_SUCCESS(result) (result >= 0)
+#endif
+
+
+/// \brief Indicates if an ovrResult indicates an unqualified success.
+///
+/// This is useful for indicating that the code intentionally wants to
+/// check for result == ovrSuccess as opposed to OVR_SUCCESS(), which
+/// checks for result >= ovrSuccess.
+///
+#if !defined(OVR_UNQUALIFIED_SUCCESS)
+    #define OVR_UNQUALIFIED_SUCCESS(result) (result == ovrSuccess)
+#endif
+
+
+/// \brief Indicates if an ovrResult indicates failure.
+///
+#if !defined(OVR_FAILURE)
+    #define OVR_FAILURE(result) (!OVR_SUCCESS(result))
+#endif
+
+
+// Success is a value greater or equal to 0, while all error types are negative values.
+#ifndef OVR_SUCCESS_DEFINED
+#define OVR_SUCCESS_DEFINED ///< Allows ovrResult to be independently defined.
+typedef enum ovrSuccessType_
+{
+    /// This is a general success result. Use OVR_SUCCESS to test for success.
+    ovrSuccess = 0,
+
+    /// Returned from a call to SubmitFrame. The call succeeded, but what the app
+    /// rendered will not be visible on the HMD. Ideally the app should continue
+    /// calling SubmitFrame, but not do any rendering. When the result becomes
+    /// ovrSuccess, rendering should continue as usual.
+    ovrSuccess_NotVisible                 = 1000,
+
+    ovrSuccess_HMDFirmwareMismatch        = 4100,   ///< The HMD Firmware is out of date but is acceptable.
+    ovrSuccess_TrackerFirmwareMismatch    = 4101,   ///< The Tracker Firmware is out of date but is acceptable.
+    ovrSuccess_ControllerFirmwareMismatch = 4104,   ///< The controller firmware is out of date but is acceptable.
+    ovrSuccess_TrackerDriverNotFound      = 4105,   ///< The tracker driver interface was not found. Can be a temporary error
+
+} ovrSuccessType;
+#endif
+
+
+typedef enum ovrErrorType_
+{
+    /* General errors */
+    ovrError_MemoryAllocationFailure    = -1000,   ///< Failure to allocate memory.
+    ovrError_SocketCreationFailure      = -1001,   ///< Failure to create a socket.
+    ovrError_InvalidSession             = -1002,   ///< Invalid ovrSession parameter provided.
+    ovrError_Timeout                    = -1003,   ///< The operation timed out.
+    ovrError_NotInitialized             = -1004,   ///< The system or component has not been initialized.
+    ovrError_InvalidParameter           = -1005,   ///< Invalid parameter provided. See error info or log for details.
+    ovrError_ServiceError               = -1006,   ///< Generic service error. See error info or log for details.
+    ovrError_NoHmd                      = -1007,   ///< The given HMD doesn't exist.
+    ovrError_Unsupported                = -1009,   ///< Function call is not supported on this hardware/software
+    ovrError_DeviceUnavailable          = -1010,   ///< Specified device type isn't available.
+    ovrError_InvalidHeadsetOrientation  = -1011,   ///< The headset was in an invalid orientation for the requested operation (e.g. vertically oriented during ovr_RecenterPose).
+    ovrError_ClientSkippedDestroy       = -1012,   ///< The client failed to call ovr_Destroy on an active session before calling ovr_Shutdown. Or the client crashed.
+    ovrError_ClientSkippedShutdown      = -1013,   ///< The client failed to call ovr_Shutdown or the client crashed.
+
+    /* Audio error range, reserved for Audio errors. */
+    ovrError_AudioReservedBegin         = -2000,   ///< First Audio error.
+    ovrError_AudioDeviceNotFound        = -2001,   ///< Failure to find the specified audio device.
+    ovrError_AudioComError              = -2002,   ///< Generic COM error.
+    ovrError_AudioReservedEnd           = -2999,   ///< Last Audio error.
+
+    /* Initialization errors. */
+    ovrError_Initialize                 = -3000,   ///< Generic initialization error.
+    ovrError_LibLoad                    = -3001,   ///< Couldn't load LibOVRRT.
+    ovrError_LibVersion                 = -3002,   ///< LibOVRRT version incompatibility.
+    ovrError_ServiceConnection          = -3003,   ///< Couldn't connect to the OVR Service.
+    ovrError_ServiceVersion             = -3004,   ///< OVR Service version incompatibility.
+    ovrError_IncompatibleOS             = -3005,   ///< The operating system version is incompatible.
+    ovrError_DisplayInit                = -3006,   ///< Unable to initialize the HMD display.
+    ovrError_ServerStart                = -3007,   ///< Unable to start the server. Is it already running?
+    ovrError_Reinitialization           = -3008,   ///< Attempting to re-initialize with a different version.
+    ovrError_MismatchedAdapters         = -3009,   ///< Chosen rendering adapters between client and service do not match
+    ovrError_LeakingResources           = -3010,   ///< Calling application has leaked resources
+    ovrError_ClientVersion              = -3011,   ///< Client version too old to connect to service
+    ovrError_OutOfDateOS                = -3012,   ///< The operating system is out of date.
+    ovrError_OutOfDateGfxDriver         = -3013,   ///< The graphics driver is out of date.
+    ovrError_IncompatibleGPU            = -3014,   ///< The graphics hardware is not supported
+    ovrError_NoValidVRDisplaySystem     = -3015,   ///< No valid VR display system found.
+    ovrError_Obsolete                   = -3016,   ///< Feature or API is obsolete and no longer supported.
+    ovrError_DisabledOrDefaultAdapter   = -3017,   ///< No supported VR display system found, but disabled or driverless adapter found.
+    ovrError_HybridGraphicsNotSupported = -3018,   ///< The system is using hybrid graphics (Optimus, etc...), which is not support.
+    ovrError_DisplayManagerInit         = -3019,   ///< Initialization of the DisplayManager failed.
+    ovrError_TrackerDriverInit          = -3020,   ///< Failed to get the interface for an attached tracker
+
+    /* Hardware errors */
+    ovrError_InvalidBundleAdjustment    = -4000,   ///< Headset has no bundle adjustment data.
+    ovrError_USBBandwidth               = -4001,   ///< The USB hub cannot handle the camera frame bandwidth.
+    ovrError_USBEnumeratedSpeed         = -4002,   ///< The USB camera is not enumerating at the correct device speed.
+    ovrError_ImageSensorCommError       = -4003,   ///< Unable to communicate with the image sensor.
+    ovrError_GeneralTrackerFailure      = -4004,   ///< We use this to report various sensor issues that don't fit in an easily classifiable bucket.
+    ovrError_ExcessiveFrameTruncation   = -4005,   ///< A more than acceptable number of frames are coming back truncated.
+    ovrError_ExcessiveFrameSkipping     = -4006,   ///< A more than acceptable number of frames have been skipped.
+    ovrError_SyncDisconnected           = -4007,   ///< The sensor is not receiving the sync signal (cable disconnected?).
+    ovrError_TrackerMemoryReadFailure   = -4008,   ///< Failed to read memory from the sensor.
+    ovrError_TrackerMemoryWriteFailure  = -4009,   ///< Failed to write memory from the sensor.
+    ovrError_TrackerFrameTimeout        = -4010,   ///< Timed out waiting for a camera frame.
+    ovrError_TrackerTruncatedFrame      = -4011,   ///< Truncated frame returned from sensor.
+    ovrError_TrackerDriverFailure       = -4012,   ///< The sensor driver has encountered a problem.
+    ovrError_TrackerNRFFailure          = -4013,   ///< The sensor wireless subsystem has encountered a problem.
+    ovrError_HardwareGone               = -4014,   ///< The hardware has been unplugged
+    ovrError_NordicEnabledNoSync        = -4015,   ///< The nordic indicates that sync is enabled but it is not sending sync pulses
+    ovrError_NordicSyncNoFrames         = -4016,   ///< It looks like we're getting a sync signal, but no camera frames have been received
+    ovrError_CatastrophicFailure        = -4017,   ///< A catastrophic failure has occurred.  We will attempt to recover by resetting the device
+
+    ovrError_HMDFirmwareMismatch        = -4100,   ///< The HMD Firmware is out of date and is unacceptable.
+    ovrError_TrackerFirmwareMismatch    = -4101,   ///< The sensor Firmware is out of date and is unacceptable.
+    ovrError_BootloaderDeviceDetected   = -4102,   ///< A bootloader HMD is detected by the service.
+    ovrError_TrackerCalibrationError    = -4103,   ///< The sensor calibration is missing or incorrect.
+    ovrError_ControllerFirmwareMismatch = -4104,   ///< The controller firmware is out of date and is unacceptable.
+
+    ovrError_IMUTooManyLostSamples      = -4200,   ///< Too many lost IMU samples.
+    ovrError_IMURateError               = -4201,   ///< IMU rate is outside of the expected range.
+    ovrError_FeatureReportFailure       = -4202,   ///< A feature report has failed.
+
+    /* Synchronization errors */
+    ovrError_Incomplete                 = -5000,   ///<Requested async work not yet complete.
+    ovrError_Abandoned                  = -5001,   ///<Requested async work was abandoned and result is incomplete.
+
+    /* Rendering errors */
+    ovrError_DisplayLost                = -6000,   ///<In the event of a system-wide graphics reset or cable unplug this is returned to the app.
+    ovrError_TextureSwapChainFull       = -6001,   ///<ovr_CommitTextureSwapChain was called too many times on a texture swapchain without calling submit to use the chain.
+    ovrError_TextureSwapChainInvalid    = -6002,   ///<The ovrTextureSwapChain is in an incomplete or inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first.
+
+    /* Fatal errors */
+    ovrError_RuntimeException           = -7000,   ///< A runtime exception occurred. The application is required to shutdown LibOVR and re-initialize it before this error state will be cleared.
+
+
+    ovrError_MetricsUnknownApp            = -90000,
+    ovrError_MetricsDuplicateApp          = -90001,
+    ovrError_MetricsNoEvents              = -90002,
+    ovrError_MetricsRuntime               = -90003,
+    ovrError_MetricsFile                  = -90004,
+    ovrError_MetricsNoClientInfo          = -90005,
+    ovrError_MetricsNoAppMetaData         = -90006,
+    ovrError_MetricsNoApp                 = -90007,
+    ovrError_MetricsOafFailure            = -90008,
+    ovrError_MetricsSessionAlreadyActive  = -90009,
+    ovrError_MetricsSessionNotActive      = -90010,
+
+} ovrErrorType;
+
+
+
+/// Provides information about the last error.
+/// \see ovr_GetLastErrorInfo
+typedef struct ovrErrorInfo_
+{
+    ovrResult Result;               ///< The result from the last API call that generated an error ovrResult.
+    char      ErrorString[512];     ///< A UTF8-encoded null-terminated English string describing the problem. The format of this string is subject to change in future versions.
+} ovrErrorInfo;
+
+#endif /* OVR_ErrorCode_h */

+ 60 - 0
examples/oculus_glfw_sample/OculusSDK/LibOVR/Include/OVR_Version.h

@@ -0,0 +1,60 @@
+/********************************************************************************//**
+\file      OVR_Version.h
+\brief     This header provides LibOVR version identification.
+\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
+*************************************************************************************/
+
+#ifndef OVR_Version_h
+#define OVR_Version_h
+
+
+
+/// Conventional string-ification macro.
+#if !defined(OVR_STRINGIZE)
+    #define OVR_STRINGIZEIMPL(x) #x
+    #define OVR_STRINGIZE(x)     OVR_STRINGIZEIMPL(x)
+#endif
+
+
+// Master version numbers
+#define OVR_PRODUCT_VERSION 1  // Product version doesn't participate in semantic versioning.
+#define OVR_MAJOR_VERSION   1  // If you change these values then you need to also make sure to change LibOVR/Projects/Windows/LibOVR.props in parallel.
+#define OVR_MINOR_VERSION   3  // 
+#define OVR_PATCH_VERSION   0
+#define OVR_BUILD_NUMBER    0
+
+// This is the ((product * 100) + major) version of the service that the DLL is compatible with.
+// When we backport changes to old versions of the DLL we update the old DLLs
+// to move this version number up to the latest version.
+// The DLL is responsible for checking that the service is the version it supports
+// and returning an appropriate error message if it has not been made compatible.
+#define OVR_DLL_COMPATIBLE_VERSION 101
+
+#define OVR_FEATURE_VERSION 0
+
+
+/// "Major.Minor.Patch"
+#if !defined(OVR_VERSION_STRING)
+    #define OVR_VERSION_STRING  OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION)
+#endif
+
+
+/// "Major.Minor.Patch.Build"
+#if !defined(OVR_DETAILED_VERSION_STRING)
+    #define OVR_DETAILED_VERSION_STRING OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION.OVR_BUILD_NUMBER)
+#endif
+
+
+/// \brief file description for version info
+/// This appears in the user-visible file properties. It is intended to convey publicly
+/// available additional information such as feature builds.
+#if !defined(OVR_FILE_DESCRIPTION_STRING)
+    #if defined(_DEBUG)
+        #define OVR_FILE_DESCRIPTION_STRING "dev build debug"
+    #else
+        #define OVR_FILE_DESCRIPTION_STRING "dev build"
+    #endif
+#endif
+
+
+#endif // OVR_Version_h

文件差异内容过多而无法显示
+ 9 - 0
examples/oculus_glfw_sample/glad.c


文件差异内容过多而无法显示
+ 9 - 0
examples/oculus_glfw_sample/glad.h


+ 492 - 0
examples/oculus_glfw_sample/oculus_glfw_sample.c

@@ -0,0 +1,492 @@
+/*******************************************************************************************
+*
+*   raylib Oculus minimum sample (OpenGL 3.3 Core)
+*
+*   NOTE: This example requires raylib module [rlgl]
+*
+*   Compile rlgl using:
+*   gcc -c rlgl.c -Wall -std=c99 -DRLGL_STANDALONE -DRAYMATH_IMPLEMENTATION -DGRAPHICS_API_OPENGL_33
+*
+*   Compile example using:
+*   gcc -o oculus_glfw_sample.exe oculus_glfw_sample.c rlgl.o glad.o -L. -lLibOVRRT32_1 -lglfw3 -lopengl32 -lgdi32 -std=c99
+*
+*   This example has been created using raylib 1.5 (www.raylib.com)
+*   raylib is licensed under an unmodified zlib/libpng license (View raylib.h for details)
+*
+*   Copyright (c) 2015 Ramon Santamaria (@raysan5)
+*
+********************************************************************************************/
+
+#if defined(_WIN32)
+    #define GLFW_EXPOSE_NATIVE_WIN32
+    #define GLFW_EXPOSE_NATIVE_WGL
+    #define OVR_OS_WIN32
+#elif defined(__APPLE__)
+    #define GLFW_EXPOSE_NATIVE_COCOA
+    #define GLFW_EXPOSE_NATIVE_NSGL
+    #define OVR_OS_MAC
+#elif defined(__linux__)
+    #define GLFW_EXPOSE_NATIVE_X11
+    #define GLFW_EXPOSE_NATIVE_GLX
+    #define OVR_OS_LINUX
+#endif
+
+#include "glad.h"      // Extensions loading library
+
+#include <GLFW/glfw3.h>
+#include <GLFW/glfw3native.h>
+
+#include "OculusSDK/LibOVR/Include/OVR_CAPI_GL.h"    // Oculus SDK for OpenGL
+
+//#include "GL/CAPI_GLE.h"        // stripped-down GLEW/GLAD library to manage extensions (really required?)
+//#include "Extras/OVR_Math.h"    // math utilities C++ (really required?)
+
+#define RLGL_STANDALONE
+#include "rlgl.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+//----------------------------------------------------------------------------------
+// Types and Structures Definition
+//----------------------------------------------------------------------------------
+typedef struct OculusBuffer {
+    ovrTextureSwapChain textureChain;
+    GLuint depthId;
+    GLuint fboId;
+    int width;
+    int height;
+} OculusBuffer;
+
+typedef enum { LOG_INFO = 0, LOG_ERROR, LOG_WARNING, LOG_DEBUG, LOG_OTHER } TraceLogType;
+
+//----------------------------------------------------------------------------------
+// Module specific Functions Declaration
+//----------------------------------------------------------------------------------
+static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height);
+static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer);
+static void SetOculusBuffer(ovrSession session, OculusBuffer buffer);
+static void UnsetOculusBuffer(OculusBuffer buffer);
+
+static void ErrorCallback(int error, const char* description)
+{
+    fputs(description, stderr);
+}
+
+static void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+    if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
+    {
+        glfwSetWindowShouldClose(window, GL_TRUE);
+    }
+}
+
+static void DrawRectangleV(Vector2 position, Vector2 size, Color color);
+static void TraceLog(int msgType, const char *text, ...);
+
+//----------------------------------------------------------------------------------
+// Main Entry point
+//----------------------------------------------------------------------------------
+int main()
+{
+    // Initialization
+    //--------------------------------------------------------------------------------------
+    ovrResult result = ovr_Initialize(NULL);
+    if (OVR_FAILURE(result)) TraceLog(LOG_ERROR, "OVR: Could not initialize Oculus device");
+
+    ovrSession session;
+    ovrGraphicsLuid luid;   // Useless for OpenGL since SDK 0.7
+    
+    result = ovr_Create(&session, &luid);
+    if (OVR_FAILURE(result))
+    {
+        TraceLog(LOG_WARNING, "OVR: Could not create Oculus session");
+        ovr_Shutdown();
+    }
+
+    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);
+    
+    TraceLog(LOG_INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
+    TraceLog(LOG_INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
+    TraceLog(LOG_INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
+    TraceLog(LOG_INFO, "OVR: Product Type: %i", hmdDesc.Type);
+    TraceLog(LOG_INFO, "OVR: Serian Number: %s", hmdDesc.SerialNumber);
+    TraceLog(LOG_INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
+    
+    int screenWidth = hmdDesc.Resolution.w/2 + 100;		// Added 100 pixels for testing
+    int screenHeight = hmdDesc.Resolution.h/2 + 100;	// Added 100 pixels for testing
+
+    // GLFW3 Initialization + OpenGL 3.3 Context + Extensions
+    //--------------------------------------------------------
+    GLFWwindow *window;
+    
+    glfwSetErrorCallback(ErrorCallback);
+    
+    if (!glfwInit())
+    {
+        TraceLog(LOG_WARNING, "GLFW3: Can not initialize GLFW");
+        exit(EXIT_FAILURE);
+    }
+    else TraceLog(LOG_INFO, "GLFW3: GLFW initialized successfully");
+    
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
+    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+    glfwWindowHint(GLFW_DECORATED, GL_FALSE);   // Mandatory on Oculus Rift to avoid program crash!
+   
+    window = glfwCreateWindow(screenWidth, screenHeight, "rlgl standalone", NULL, NULL);
+    
+    if (!window)
+    {
+        glfwTerminate();
+        exit(EXIT_FAILURE);
+    }
+    else TraceLog(LOG_INFO, "GLFW3: Window created successfully");
+    
+    glfwSetKeyCallback(window, KeyCallback);
+    
+    glfwMakeContextCurrent(window);
+    glfwSwapInterval(0);
+
+    if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
+    {
+        TraceLog(LOG_WARNING, "GLAD: Cannot load OpenGL extensions");
+        exit(1);
+    }
+    else TraceLog(LOG_INFO, "GLAD: OpenGL extensions loaded successfully");
+    
+    rlglInit();
+    rlglInitGraphics(0, 0, screenWidth, screenHeight);
+    rlClearColor(245, 245, 245, 255); // Define clear color
+    
+    Vector2 position = { screenWidth/2 - 100, screenHeight/2 - 100 };
+    Vector2 size = { 200, 200 };
+    Color color = { 180, 20, 20, 255 };
+    //---------------------------------------------------------------------------
+    
+    OculusBuffer eyeRenderBuffer[2];
+
+    GLuint mirrorFBO = 0;
+    ovrMirrorTexture mirrorTexture = NULL;
+
+    bool isVisible = true;
+    long long frameIndex = 0;
+
+    // Make eyes render buffers
+    ovrSizei recommendedTexSizeLeft = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f);
+    eyeRenderBuffer[0] = LoadOculusBuffer(session, recommendedTexSizeLeft.w, recommendedTexSizeLeft.h);
+    ovrSizei recommendedTexSizeRight = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f);
+    eyeRenderBuffer[1] = LoadOculusBuffer(session, recommendedTexSizeRight.w, recommendedTexSizeRight.h);
+    
+    // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
+    ovrSizei windowSize = { hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2 };
+
+    // Define mirror texture descriptor
+    ovrMirrorTextureDesc mirrorDesc;
+    memset(&mirrorDesc, 0, sizeof(mirrorDesc));
+    mirrorDesc.Width = windowSize.w;
+    mirrorDesc.Height = windowSize.h;
+    mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
+
+    // Create mirror texture and an FBO used to copy mirror texture to back buffer
+    result = ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirrorTexture);
+    if (!OVR_SUCCESS(result)) TraceLog(LOG_WARNING, "OVR: Failed to create mirror texture");
+
+    // Configure the mirror read buffer
+    GLuint texId;
+    ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &texId);
+
+    glGenFramebuffers(1, &mirrorFBO);
+    glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBO);
+    glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texId, 0);
+    glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0);
+    glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+    
+    if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
+    {
+        glDeleteFramebuffers(1, &mirrorFBO);
+        TraceLog(LOG_WARNING, "OVR: Could not initialize mirror framebuffers");
+    }
+    
+    // FloorLevel will give tracking poses where the floor height is 0
+    ovr_SetTrackingOriginType(session, ovrTrackingOrigin_FloorLevel);
+    //--------------------------------------------------------------------------------------
+
+    // Main loop
+    while (!glfwWindowShouldClose(window))
+    {
+        // Update
+        //----------------------------------------------------------------------------------
+
+        // TODO: Update game here!
+        
+	    // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime.
+	    ovrEyeRenderDesc eyeRenderDesc[2];
+	    eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
+	    eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);
+
+        // Get eye poses, feeding in correct IPD offset
+        ovrPosef eyeRenderPose[2];
+        ovrVector3f hmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset };
+
+        double sensorSampleTime;    // sensorSampleTime is fed into the layer later
+        ovr_GetEyePoses(session, frameIndex, ovrTrue, hmdToEyeOffset, eyeRenderPose, &sensorSampleTime);
+        //----------------------------------------------------------------------------------
+
+        // Draw
+        //----------------------------------------------------------------------------------
+        
+        // Clear screen to red color
+        glClearColor(1.0f, 0.1f, 0.1f, 0.0f);   
+        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+        
+        if (isVisible)
+        {
+            for (int eye = 0; eye < 2; ++eye)
+            {
+                SetOculusBuffer(session, eyeRenderBuffer[eye]);
+                
+                // TODO: Get view and projection matrices for the eye
+                // Sample using Oculus OVR_Math.h (C++)
+                /*
+                Matrix4f projection[eye] = Matrix4f(ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.01f, 10000.0f, ovrProjection_None));
+                Matrix4f eyeOrientation[eye] = Matrix4f(Quatf(eyeRenderPose[eye].Orientation).Inverted());
+                Matrix4f eyePose[eye] = Matrix4f::Translation(-Vector3f(eyeRenderPose[eye].Position));
+                Matrix4f mvp = projection[eye]*eyeOrientation[eye]*eyePose[eye];
+				*/
+
+                // Sample using custom raymath.h (C) -INCOMPLETE-
+                /*
+                Matrix projection = MatrixPerspective(eyeRenderDesc[eye].Fov, ((double)screenWidth/(double)screenHeight), 0.01, 1000.0);
+                Matrix eyeOrientation = QuaternionToMatrix((Quaternion){ -eyeRenderPose[eye].Orientation.x, -eyeRenderPose[eye].Orientation.y, 
+                                                                         -eyeRenderPose[eye].Orientation.z, -eyeRenderPose[eye].Orientation.w });
+                Matrix eyePose = MatrixTranslate(-eyeRenderPose[eye].Position.x, -eyeRenderPose[eye].Position.y, -eyeRenderPose[eye].Position.z);
+                Matrix mvp = MatrixMultiply(projection, MatrixMultiply(eyeOrientation, eyePose));
+                */
+                
+                // Render everything
+                // TODO: Pass calculated mvp matrix to default shader to consider projection and orientation! 
+                //DrawRectangleV(position, size, color);
+                //rlglDraw();
+
+                UnsetOculusBuffer(eyeRenderBuffer[eye]);
+                
+                // Commit changes to the textures so they get picked up frame
+                ovr_CommitTextureSwapChain(session, eyeRenderBuffer[eye].textureChain);
+            }
+        }
+        
+        // Set up positional data
+        ovrViewScaleDesc viewScaleDesc;
+        viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
+        viewScaleDesc.HmdToEyeOffset[0] = hmdToEyeOffset[0];
+        viewScaleDesc.HmdToEyeOffset[1] = hmdToEyeOffset[1];
+
+        // Create the main eye layer
+        ovrLayerEyeFov eyeLayer;
+        eyeLayer.Header.Type  = ovrLayerType_EyeFov;
+        eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;   // Because OpenGL
+
+        for (int eye = 0; eye < 2; eye++)
+        {
+            eyeLayer.ColorTexture[eye] = eyeRenderBuffer[eye].textureChain;
+            eyeLayer.Viewport[eye] = (ovrRecti){ eyeRenderBuffer[eye].width, eyeRenderBuffer[eye].height };
+            eyeLayer.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
+            eyeLayer.RenderPose[eye] = eyeRenderPose[eye];
+            eyeLayer.SensorSampleTime = sensorSampleTime;
+        }
+
+        // Append all the layers to global list
+        ovrLayerHeader *layerList = &eyeLayer.Header;
+        ovrResult result = ovr_SubmitFrame(session, frameIndex, NULL, &layerList, 1);
+        
+        // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
+        if (!OVR_SUCCESS(result)) return 1;
+
+        isVisible = (result == ovrSuccess);
+
+        // Get session status information
+        ovrSessionStatus sessionStatus;
+        ovr_GetSessionStatus(session, &sessionStatus);
+        if (sessionStatus.ShouldQuit) TraceLog(LOG_WARNING, "OVR: Session should quit.");
+        if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session);
+        
+        // Blit mirror texture to back buffer
+        glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBO);
+        glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+        GLint w = mirrorDesc.Width;
+        GLint h = mirrorDesc.Height;
+        glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+        glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+        
+        glfwSwapBuffers(window);
+        glfwPollEvents();
+    
+        //frameIndex++;     //?
+        //----------------------------------------------------------------------------------
+    }
+
+    // De-Initialization
+    //--------------------------------------------------------------------------------------
+    if (mirrorFBO) glDeleteFramebuffers(1, &mirrorFBO);
+    if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture);
+    for (int eye = 0; eye < 2; eye++) UnloadOculusBuffer(session, eyeRenderBuffer[eye]);
+    
+    rlglClose();
+    
+    glfwDestroyWindow(window);
+    glfwTerminate();
+    
+    ovr_Destroy(session);   // Must be called after glfwTerminate()
+    ovr_Shutdown();
+    //--------------------------------------------------------------------------------------
+    
+    return 0;
+}
+
+//----------------------------------------------------------------------------------
+// Module specific Functions Definitions
+//----------------------------------------------------------------------------------
+
+// Load Oculus required buffers: texture-swap-chain, fbo, texture-depth
+static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height)
+{
+    OculusBuffer buffer;
+    buffer.width = width;
+    buffer.height = height;
+    
+    // Create OVR texture chain
+    ovrTextureSwapChainDesc desc = {};
+    desc.Type = ovrTexture_2D;
+    desc.ArraySize = 1;
+    desc.Width = width;
+    desc.Height = height;
+    desc.MipLevels = 1;
+    desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
+    desc.SampleCount = 1;
+    desc.StaticImage = ovrFalse;
+
+    ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &buffer.textureChain);
+
+    int textureCount = 0;
+    ovr_GetTextureSwapChainLength(session, buffer.textureChain, &textureCount);
+
+    if (OVR_SUCCESS(result))
+    {
+        for (int i = 0; i < textureCount; ++i)
+        {
+            GLuint chainTexId;
+            ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, i, &chainTexId);
+            glBindTexture(GL_TEXTURE_2D, chainTexId);
+            
+            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+        }
+    }
+    
+    // Generate framebuffer
+    glGenFramebuffers(1, &buffer.fboId);
+
+    // Create Depth texture
+    glGenTextures(1, &buffer.depthId);
+    glBindTexture(GL_TEXTURE_2D, buffer.depthId);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, buffer.width, buffer.height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
+
+    return buffer;
+}
+
+// Unload texture required buffers
+static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer)
+{
+    if (buffer.textureChain)
+    {
+        ovr_DestroyTextureSwapChain(session, buffer.textureChain);
+        buffer.textureChain = NULL;
+    }
+
+    if (buffer.depthId)
+    {
+        glDeleteTextures(1, &buffer.depthId);
+        buffer.depthId = 0;
+    }
+
+    if (buffer.fboId)
+    {
+        glDeleteFramebuffers(1, &buffer.fboId);
+        buffer.fboId = 0;
+    }
+}
+
+// Set current Oculus buffer
+static void SetOculusBuffer(ovrSession session, OculusBuffer buffer)
+{
+    GLuint currentTexId;
+    int currentIndex;
+    
+    ovr_GetTextureSwapChainCurrentIndex(session, buffer.textureChain, &currentIndex);
+    ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, currentIndex, &currentTexId);
+
+    glBindFramebuffer(GL_FRAMEBUFFER, buffer.fboId);
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, currentTexId, 0);
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, buffer.depthId, 0);
+
+    glViewport(0, 0, buffer.width, buffer.height);
+    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+    glEnable(GL_FRAMEBUFFER_SRGB);
+}
+
+// Unset Oculus buffer
+static void UnsetOculusBuffer(OculusBuffer buffer)
+{
+    glBindFramebuffer(GL_FRAMEBUFFER, buffer.fboId);
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
+}
+
+// Draw rectangle using rlgl OpenGL 1.1 style coding (translated to OpenGL 3.3 internally)
+static void DrawRectangleV(Vector2 position, Vector2 size, Color color)
+{
+    rlBegin(RL_TRIANGLES);
+        rlColor4ub(color.r, color.g, color.b, color.a);
+
+        rlVertex2i(position.x, position.y);
+        rlVertex2i(position.x, position.y + size.y);
+        rlVertex2i(position.x + size.x, position.y + size.y);
+
+        rlVertex2i(position.x, position.y);
+        rlVertex2i(position.x + size.x, position.y + size.y);
+        rlVertex2i(position.x + size.x, position.y);
+    rlEnd();
+}
+
+// Output a trace log message
+// NOTE: Expected msgType: (0)Info, (1)Error, (2)Warning
+static void TraceLog(int msgType, const char *text, ...)
+{
+    va_list args;
+    va_start(args, text);
+
+    switch(msgType)
+    {
+        case LOG_INFO: fprintf(stdout, "INFO: "); break;
+        case LOG_ERROR: fprintf(stdout, "ERROR: "); break;
+        case LOG_WARNING: fprintf(stdout, "WARNING: "); break;
+        case LOG_DEBUG: fprintf(stdout, "DEBUG: "); break;
+        default: break;
+    }
+
+    vfprintf(stdout, text, args);
+    fprintf(stdout, "\n");
+
+    va_end(args);
+
+    //if (msgType == LOG_ERROR) exit(1);
+}

+ 131 - 0
examples/oculus_glfw_sample/raylib_rlgl_standalone.c

@@ -0,0 +1,131 @@
+/*******************************************************************************************
+*
+*   raylib [rlgl] example - Using rlgl module as standalone module
+*
+*   NOTE: This example requires OpenGL 3.3 or ES2 versions for shaders support,
+*         OpenGL 1.1 does not support shaders but it can also be used.
+*
+*   Compile rlgl module using:
+*   gcc -c rlgl.c -Wall -std=c99 -DRLGL_STANDALONE -DRAYMATH_IMPLEMENTATION -DGRAPHICS_API_OPENGL_33
+*
+*   Compile example using:
+*   gcc -o $(NAME_PART).exe $(FILE_NAME) rlgl.o glad.o -lglfw3 -lopengl32 -lgdi32 -std=c99
+*
+*   This example has been created using raylib 1.5 (www.raylib.com)
+*   raylib is licensed under an unmodified zlib/libpng license (View raylib.h for details)
+*
+*   Copyright (c) 2015 Ramon Santamaria (@raysan5)
+*
+********************************************************************************************/
+
+#include "glad.h"
+#include <GLFW/glfw3.h>
+
+#define RLGL_STANDALONE
+#include "rlgl.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+//----------------------------------------------------------------------------------
+// Module specific Functions Declaration
+//----------------------------------------------------------------------------------
+static void ErrorCallback(int error, const char* description)
+{
+    fputs(description, stderr);
+}
+
+static void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mods)
+{
+    if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
+    {
+        glfwSetWindowShouldClose(window, GL_TRUE);
+    }
+}
+
+void DrawRectangleV(Vector2 position, Vector2 size, Color color);
+
+//----------------------------------------------------------------------------------
+// Main Entry point
+//----------------------------------------------------------------------------------
+int main(void)
+{
+    const int screenWidth = 800;
+    const int screenHeight = 450;
+    
+    GLFWwindow *window;
+    
+    glfwSetErrorCallback(ErrorCallback);
+    
+    if (!glfwInit()) exit(EXIT_FAILURE);
+    
+    glfwWindowHint(GLFW_SAMPLES, 4);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
+    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+   
+    window = glfwCreateWindow(screenWidth, screenHeight, "rlgl standalone", NULL, NULL);
+    
+    if (!window)
+    {
+        glfwTerminate();
+        exit(EXIT_FAILURE);
+    }
+    
+    glfwSetKeyCallback(window, KeyCallback);
+    
+    glfwMakeContextCurrent(window);
+    glfwSwapInterval(1);
+
+    if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
+    {
+        printf("Cannot load GL extensions.\n");
+        exit(1);
+    }
+    
+    rlglInit();
+    rlglInitGraphics(0, 0, screenWidth, screenHeight);
+    rlClearColor(245, 245, 245, 255); // Define clear color
+    
+    Vector2 position = { screenWidth/2 - 100, screenHeight/2 - 100 };
+    Vector2 size = { 200, 200 };
+    Color color = { 180, 20, 20, 255 };
+    
+    while (!glfwWindowShouldClose(window))
+    {
+        rlClearScreenBuffers();
+        
+        DrawRectangleV(position, size, color);
+        
+        rlglDraw();
+        
+        glfwSwapBuffers(window);
+        glfwPollEvents();
+    }
+    
+    rlglClose();
+    
+    glfwDestroyWindow(window);
+    glfwTerminate();
+    
+    return 0;
+}
+
+//----------------------------------------------------------------------------------
+// Module specific Functions Definitions
+//----------------------------------------------------------------------------------
+void DrawRectangleV(Vector2 position, Vector2 size, Color color)
+{
+    rlBegin(RL_TRIANGLES);
+        rlColor4ub(color.r, color.g, color.b, color.a);
+
+        rlVertex2i(position.x, position.y);
+        rlVertex2i(position.x, position.y + size.y);
+        rlVertex2i(position.x + size.x, position.y + size.y);
+
+        rlVertex2i(position.x, position.y);
+        rlVertex2i(position.x + size.x, position.y + size.y);
+        rlVertex2i(position.x + size.x, position.y);
+    rlEnd();
+}

+ 1128 - 0
examples/oculus_glfw_sample/raymath.h

@@ -0,0 +1,1128 @@
+/**********************************************************************************************
+*
+*   raymath (header only file)
+*
+*   Some useful functions to work with Vector3, Matrix and Quaternions
+*
+*   You must:
+*       #define RAYMATH_IMPLEMENTATION
+*   before you include this file in *only one* C or C++ file to create the implementation.
+*
+*   Example:
+*       #define RAYMATH_IMPLEMENTATION
+*       #include "raymath.h"
+*
+*   You can also use:
+*       #define RAYMATH_EXTERN_INLINE       // Inlines all functions code, so it runs faster.
+*                                           // This requires lots of memory on system.
+*       #define RAYMATH_STANDALONE          // Not dependent on raylib.h structs: Vector3, Matrix.
+*
+*
+*   Copyright (c) 2015 Ramon Santamaria (@raysan5)
+*
+*   This software is provided "as-is", without any express or implied warranty. In no event
+*   will the authors be held liable for any damages arising from the use of this software.
+*
+*   Permission is granted to anyone to use this software for any purpose, including commercial
+*   applications, and to alter it and redistribute it freely, subject to the following restrictions:
+*
+*     1. The origin of this software must not be misrepresented; you must not claim that you
+*     wrote the original software. If you use this software in a product, an acknowledgment
+*     in the product documentation would be appreciated but is not required.
+*
+*     2. Altered source versions must be plainly marked as such, and must not be misrepresented
+*     as being the original software.
+*
+*     3. This notice may not be removed or altered from any source distribution.
+*
+**********************************************************************************************/
+
+#ifndef RAYMATH_H
+#define RAYMATH_H
+
+//#define RAYMATH_STANDALONE        // NOTE: To use raymath as standalone lib, just uncomment this line
+//#define RAYMATH_EXTERN_INLINE     // NOTE: To compile functions as static inline, uncomment this line
+
+#ifndef RAYMATH_STANDALONE
+    #include "raylib.h"             // Required for structs: Vector3, Matrix
+#endif
+
+#if defined(RAYMATH_EXTERN_INLINE)
+    #define RMDEF extern inline
+#else
+    #define RMDEF extern
+#endif
+
+//----------------------------------------------------------------------------------
+// Defines and Macros
+//----------------------------------------------------------------------------------
+#ifndef PI
+    #define PI 3.14159265358979323846
+#endif
+
+#ifndef DEG2RAD
+    #define DEG2RAD (PI/180.0f)
+#endif
+
+#ifndef RAD2DEG
+    #define RAD2DEG (180.0f/PI)
+#endif
+
+//----------------------------------------------------------------------------------
+// Types and Structures Definition
+//----------------------------------------------------------------------------------
+
+#if defined(RAYMATH_STANDALONE)
+	// Vector2 type
+    typedef struct Vector2 {
+        float x;
+        float y;
+    } Vector2;
+
+    // Vector3 type
+    typedef struct Vector3 {
+        float x;
+        float y;
+        float z;
+    } Vector3;
+
+    // Matrix type (OpenGL style 4x4 - right handed, column major)
+    typedef struct Matrix {
+        float m0, m4, m8, m12;
+        float m1, m5, m9, m13;
+        float m2, m6, m10, m14;
+        float m3, m7, m11, m15;
+    } Matrix;
+#endif
+
+// Quaternion type
+typedef struct Quaternion {
+    float x;
+    float y;
+    float z;
+    float w;
+} Quaternion;
+
+#ifndef RAYMATH_EXTERN_INLINE
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//------------------------------------------------------------------------------------
+// Functions Declaration to work with Vector3
+//------------------------------------------------------------------------------------
+RMDEF Vector3 VectorAdd(Vector3 v1, Vector3 v2);              // Add two vectors
+RMDEF Vector3 VectorSubtract(Vector3 v1, Vector3 v2);         // Substract two vectors
+RMDEF Vector3 VectorCrossProduct(Vector3 v1, Vector3 v2);     // Calculate two vectors cross product
+RMDEF Vector3 VectorPerpendicular(Vector3 v);                 // Calculate one vector perpendicular vector
+RMDEF float VectorDotProduct(Vector3 v1, Vector3 v2);         // Calculate two vectors dot product
+RMDEF float VectorLength(const Vector3 v);                    // Calculate vector lenght
+RMDEF void VectorScale(Vector3 *v, float scale);              // Scale provided vector
+RMDEF void VectorNegate(Vector3 *v);                          // Negate provided vector (invert direction)
+RMDEF void VectorNormalize(Vector3 *v);                       // Normalize provided vector
+RMDEF float VectorDistance(Vector3 v1, Vector3 v2);           // Calculate distance between two points
+RMDEF Vector3 VectorLerp(Vector3 v1, Vector3 v2, float amount); // Calculate linear interpolation between two vectors
+RMDEF Vector3 VectorReflect(Vector3 vector, Vector3 normal);  // Calculate reflected vector to normal
+RMDEF void VectorTransform(Vector3 *v, Matrix mat);           // Transforms a Vector3 by a given Matrix
+RMDEF Vector3 VectorZero(void);                               // Return a Vector3 init to zero
+RMDEF Vector3 VectorMin(Vector3 vec1, Vector3 vec2);          // Return min value for each pair of components
+RMDEF Vector3 VectorMax(Vector3 vec1, Vector3 vec2);          // Return max value for each pair of components
+
+//------------------------------------------------------------------------------------
+// Functions Declaration to work with Matrix
+//------------------------------------------------------------------------------------
+RMDEF float MatrixDeterminant(Matrix mat);                    // Compute matrix determinant
+RMDEF float MatrixTrace(Matrix mat);                          // Returns the trace of the matrix (sum of the values along the diagonal)
+RMDEF void MatrixTranspose(Matrix *mat);                      // Transposes provided matrix
+RMDEF void MatrixInvert(Matrix *mat);                         // Invert provided matrix
+RMDEF void MatrixNormalize(Matrix *mat);                      // Normalize provided matrix
+RMDEF Matrix MatrixIdentity(void);                            // Returns identity matrix
+RMDEF Matrix MatrixAdd(Matrix left, Matrix right);            // Add two matrices
+RMDEF Matrix MatrixSubstract(Matrix left, Matrix right);      // Substract two matrices (left - right)
+RMDEF Matrix MatrixTranslate(float x, float y, float z);      // Returns translation matrix
+RMDEF Matrix MatrixRotate(Vector3 axis, float angle);         // Returns rotation matrix for an angle around an specified axis (angle in radians)
+RMDEF Matrix MatrixRotateX(float angle);                      // Returns x-rotation matrix (angle in radians)
+RMDEF Matrix MatrixRotateY(float angle);                      // Returns y-rotation matrix (angle in radians)
+RMDEF Matrix MatrixRotateZ(float angle);                      // Returns z-rotation matrix (angle in radians)
+RMDEF Matrix MatrixScale(float x, float y, float z);          // Returns scaling matrix
+RMDEF Matrix MatrixMultiply(Matrix left, Matrix right);       // Returns two matrix multiplication
+RMDEF Matrix MatrixFrustum(double left, double right, double bottom, double top, double near, double far);  // Returns perspective projection matrix
+RMDEF Matrix MatrixPerspective(double fovy, double aspect, double near, double far);                        // Returns perspective projection matrix
+RMDEF Matrix MatrixOrtho(double left, double right, double bottom, double top, double near, double far);    // Returns orthographic projection matrix
+RMDEF Matrix MatrixLookAt(Vector3 position, Vector3 target, Vector3 up);  // Returns camera look-at matrix (view matrix)
+RMDEF void PrintMatrix(Matrix m);                             // Print matrix utility
+
+//------------------------------------------------------------------------------------
+// Functions Declaration to work with Quaternions
+//------------------------------------------------------------------------------------
+RMDEF float QuaternionLength(Quaternion quat);                // Compute the length of a quaternion
+RMDEF void QuaternionNormalize(Quaternion *q);                // Normalize provided quaternion
+RMDEF Quaternion QuaternionMultiply(Quaternion q1, Quaternion q2);    // Calculate two quaternion multiplication
+RMDEF Quaternion QuaternionSlerp(Quaternion q1, Quaternion q2, float slerp); // Calculates spherical linear interpolation between two quaternions
+RMDEF Quaternion QuaternionFromMatrix(Matrix matrix);                 // Returns a quaternion for a given rotation matrix
+RMDEF Matrix QuaternionToMatrix(Quaternion q);                        // Returns a matrix for a given quaternion
+RMDEF Quaternion QuaternionFromAxisAngle(Vector3 axis, float angle);  // Returns rotation quaternion for an angle and axis
+RMDEF void QuaternionToAxisAngle(Quaternion q, Vector3 *outAxis, float *outAngle); // Returns the rotation angle and axis for a given quaternion
+RMDEF void QuaternionTransform(Quaternion *q, Matrix mat);            // Transform a quaternion given a transformation matrix
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  // notdef RAYMATH_EXTERN_INLINE
+
+#endif  // RAYMATH_H
+//////////////////////////////////////////////////////////////////// end of header file
+
+#if defined(RAYMATH_IMPLEMENTATION) || defined(RAYMATH_EXTERN_INLINE)
+
+#include <stdio.h>      // Used only on PrintMatrix()
+#include <math.h>       // Standard math libary: sin(), cos(), tan()...
+#include <stdlib.h>     // Used for abs()
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Vector3 math
+//----------------------------------------------------------------------------------
+
+// Add two vectors
+RMDEF Vector3 VectorAdd(Vector3 v1, Vector3 v2)
+{
+    Vector3 result;
+
+    result.x = v1.x + v2.x;
+    result.y = v1.y + v2.y;
+    result.z = v1.z + v2.z;
+
+    return result;
+}
+
+// Substract two vectors
+RMDEF Vector3 VectorSubtract(Vector3 v1, Vector3 v2)
+{
+    Vector3 result;
+
+    result.x = v1.x - v2.x;
+    result.y = v1.y - v2.y;
+    result.z = v1.z - v2.z;
+
+    return result;
+}
+
+// Calculate two vectors cross product
+RMDEF Vector3 VectorCrossProduct(Vector3 v1, Vector3 v2)
+{
+    Vector3 result;
+
+    result.x = v1.y*v2.z - v1.z*v2.y;
+    result.y = v1.z*v2.x - v1.x*v2.z;
+    result.z = v1.x*v2.y - v1.y*v2.x;
+
+    return result;
+}
+
+// Calculate one vector perpendicular vector
+RMDEF Vector3 VectorPerpendicular(Vector3 v)
+{
+    Vector3 result;
+
+    float min = fabs(v.x);
+    Vector3 cardinalAxis = {1.0f, 0.0f, 0.0f};
+
+    if (fabs(v.y) < min)
+    {
+        min = fabs(v.y);
+        cardinalAxis = (Vector3){0.0f, 1.0f, 0.0f};
+    }
+
+    if(fabs(v.z) < min)
+    {
+        cardinalAxis = (Vector3){0.0f, 0.0f, 1.0f};
+    }
+
+    result = VectorCrossProduct(v, cardinalAxis);
+
+    return result;
+}
+
+// Calculate two vectors dot product
+RMDEF float VectorDotProduct(Vector3 v1, Vector3 v2)
+{
+    float result;
+
+    result = v1.x*v2.x + v1.y*v2.y + v1.z*v2.z;
+
+    return result;
+}
+
+// Calculate vector lenght
+RMDEF float VectorLength(const Vector3 v)
+{
+    float length;
+
+    length = sqrt(v.x*v.x + v.y*v.y + v.z*v.z);
+
+    return length;
+}
+
+// Scale provided vector
+RMDEF void VectorScale(Vector3 *v, float scale)
+{
+    v->x *= scale;
+    v->y *= scale;
+    v->z *= scale;
+}
+
+// Negate provided vector (invert direction)
+RMDEF void VectorNegate(Vector3 *v)
+{
+    v->x = -v->x;
+    v->y = -v->y;
+    v->z = -v->z;
+}
+
+// Normalize provided vector
+RMDEF void VectorNormalize(Vector3 *v)
+{
+    float length, ilength;
+
+    length = VectorLength(*v);
+
+    if (length == 0) length = 1.0f;
+
+    ilength = 1.0f/length;
+
+    v->x *= ilength;
+    v->y *= ilength;
+    v->z *= ilength;
+}
+
+// Calculate distance between two points
+RMDEF float VectorDistance(Vector3 v1, Vector3 v2)
+{
+    float result;
+
+    float dx = v2.x - v1.x;
+    float dy = v2.y - v1.y;
+    float dz = v2.z - v1.z;
+
+    result = sqrt(dx*dx + dy*dy + dz*dz);
+
+    return result;
+}
+
+// Calculate linear interpolation between two vectors
+RMDEF Vector3 VectorLerp(Vector3 v1, Vector3 v2, float amount)
+{
+    Vector3 result;
+
+    result.x = v1.x + amount*(v2.x - v1.x);
+    result.y = v1.y + amount*(v2.y - v1.y);
+    result.z = v1.z + amount*(v2.z - v1.z);
+
+    return result;
+}
+
+// Calculate reflected vector to normal
+RMDEF Vector3 VectorReflect(Vector3 vector, Vector3 normal)
+{
+    // I is the original vector
+    // N is the normal of the incident plane
+    // R = I - (2*N*( DotProduct[ I,N] ))
+
+    Vector3 result;
+
+    float dotProduct = VectorDotProduct(vector, normal);
+
+    result.x = vector.x - (2.0f*normal.x)*dotProduct;
+    result.y = vector.y - (2.0f*normal.y)*dotProduct;
+    result.z = vector.z - (2.0f*normal.z)*dotProduct;
+
+    return result;
+}
+
+// Transforms a Vector3 with a given Matrix
+RMDEF void VectorTransform(Vector3 *v, Matrix mat)
+{
+    float x = v->x;
+    float y = v->y;
+    float z = v->z;
+
+    //MatrixTranspose(&mat);
+
+    v->x = mat.m0*x + mat.m4*y + mat.m8*z + mat.m12;
+    v->y = mat.m1*x + mat.m5*y + mat.m9*z + mat.m13;
+    v->z = mat.m2*x + mat.m6*y + mat.m10*z + mat.m14;
+};
+
+// Return a Vector3 init to zero
+RMDEF Vector3 VectorZero(void)
+{
+    Vector3 zero = { 0.0f, 0.0f, 0.0f };
+
+    return zero;
+}
+
+// Return min value for each pair of components
+RMDEF Vector3 VectorMin(Vector3 vec1, Vector3 vec2)
+{
+    Vector3 result;
+    
+    result.x = fminf(vec1.x, vec2.x);
+    result.y = fminf(vec1.y, vec2.y);
+    result.z = fminf(vec1.z, vec2.z);
+    
+    return result;
+}
+
+// Return max value for each pair of components
+RMDEF Vector3 VectorMax(Vector3 vec1, Vector3 vec2)
+{
+    Vector3 result;
+    
+    result.x = fmaxf(vec1.x, vec2.x);
+    result.y = fmaxf(vec1.y, vec2.y);
+    result.z = fmaxf(vec1.z, vec2.z);
+    
+    return result;
+}
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Matrix math
+//----------------------------------------------------------------------------------
+
+// Compute matrix determinant
+RMDEF float MatrixDeterminant(Matrix mat)
+{
+    float result;
+
+    // Cache the matrix values (speed optimization)
+    float a00 = mat.m0, a01 = mat.m1, a02 = mat.m2, a03 = mat.m3;
+    float a10 = mat.m4, a11 = mat.m5, a12 = mat.m6, a13 = mat.m7;
+    float a20 = mat.m8, a21 = mat.m9, a22 = mat.m10, a23 = mat.m11;
+    float a30 = mat.m12, a31 = mat.m13, a32 = mat.m14, a33 = mat.m15;
+
+    result = a30*a21*a12*a03 - a20*a31*a12*a03 - a30*a11*a22*a03 + a10*a31*a22*a03 +
+             a20*a11*a32*a03 - a10*a21*a32*a03 - a30*a21*a02*a13 + a20*a31*a02*a13 +
+             a30*a01*a22*a13 - a00*a31*a22*a13 - a20*a01*a32*a13 + a00*a21*a32*a13 +
+             a30*a11*a02*a23 - a10*a31*a02*a23 - a30*a01*a12*a23 + a00*a31*a12*a23 +
+             a10*a01*a32*a23 - a00*a11*a32*a23 - a20*a11*a02*a33 + a10*a21*a02*a33 +
+             a20*a01*a12*a33 - a00*a21*a12*a33 - a10*a01*a22*a33 + a00*a11*a22*a33;
+
+    return result;
+}
+
+// Returns the trace of the matrix (sum of the values along the diagonal)
+RMDEF float MatrixTrace(Matrix mat)
+{
+    return (mat.m0 + mat.m5 + mat.m10 + mat.m15);
+}
+
+// Transposes provided matrix
+RMDEF void MatrixTranspose(Matrix *mat)
+{
+    Matrix temp;
+
+    temp.m0 = mat->m0;
+    temp.m1 = mat->m4;
+    temp.m2 = mat->m8;
+    temp.m3 = mat->m12;
+    temp.m4 = mat->m1;
+    temp.m5 = mat->m5;
+    temp.m6 = mat->m9;
+    temp.m7 = mat->m13;
+    temp.m8 = mat->m2;
+    temp.m9 = mat->m6;
+    temp.m10 = mat->m10;
+    temp.m11 = mat->m14;
+    temp.m12 = mat->m3;
+    temp.m13 = mat->m7;
+    temp.m14 = mat->m11;
+    temp.m15 = mat->m15;
+
+    *mat = temp;
+}
+
+// Invert provided matrix
+RMDEF void MatrixInvert(Matrix *mat)
+{
+    Matrix temp;
+
+    // Cache the matrix values (speed optimization)
+    float a00 = mat->m0, a01 = mat->m1, a02 = mat->m2, a03 = mat->m3;
+    float a10 = mat->m4, a11 = mat->m5, a12 = mat->m6, a13 = mat->m7;
+    float a20 = mat->m8, a21 = mat->m9, a22 = mat->m10, a23 = mat->m11;
+    float a30 = mat->m12, a31 = mat->m13, a32 = mat->m14, a33 = mat->m15;
+
+    float b00 = a00*a11 - a01*a10;
+    float b01 = a00*a12 - a02*a10;
+    float b02 = a00*a13 - a03*a10;
+    float b03 = a01*a12 - a02*a11;
+    float b04 = a01*a13 - a03*a11;
+    float b05 = a02*a13 - a03*a12;
+    float b06 = a20*a31 - a21*a30;
+    float b07 = a20*a32 - a22*a30;
+    float b08 = a20*a33 - a23*a30;
+    float b09 = a21*a32 - a22*a31;
+    float b10 = a21*a33 - a23*a31;
+    float b11 = a22*a33 - a23*a32;
+
+    // Calculate the invert determinant (inlined to avoid double-caching)
+    float invDet = 1.0f/(b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06);
+
+    temp.m0 = (a11*b11 - a12*b10 + a13*b09)*invDet;
+    temp.m1 = (-a01*b11 + a02*b10 - a03*b09)*invDet;
+    temp.m2 = (a31*b05 - a32*b04 + a33*b03)*invDet;
+    temp.m3 = (-a21*b05 + a22*b04 - a23*b03)*invDet;
+    temp.m4 = (-a10*b11 + a12*b08 - a13*b07)*invDet;
+    temp.m5 = (a00*b11 - a02*b08 + a03*b07)*invDet;
+    temp.m6 = (-a30*b05 + a32*b02 - a33*b01)*invDet;
+    temp.m7 = (a20*b05 - a22*b02 + a23*b01)*invDet;
+    temp.m8 = (a10*b10 - a11*b08 + a13*b06)*invDet;
+    temp.m9 = (-a00*b10 + a01*b08 - a03*b06)*invDet;
+    temp.m10 = (a30*b04 - a31*b02 + a33*b00)*invDet;
+    temp.m11 = (-a20*b04 + a21*b02 - a23*b00)*invDet;
+    temp.m12 = (-a10*b09 + a11*b07 - a12*b06)*invDet;
+    temp.m13 = (a00*b09 - a01*b07 + a02*b06)*invDet;
+    temp.m14 = (-a30*b03 + a31*b01 - a32*b00)*invDet;
+    temp.m15 = (a20*b03 - a21*b01 + a22*b00)*invDet;
+
+    *mat = temp;
+}
+
+// Normalize provided matrix
+RMDEF void MatrixNormalize(Matrix *mat)
+{
+    float det = MatrixDeterminant(*mat);
+
+    mat->m0 /= det;
+    mat->m1 /= det;
+    mat->m2 /= det;
+    mat->m3 /= det;
+    mat->m4 /= det;
+    mat->m5 /= det;
+    mat->m6 /= det;
+    mat->m7 /= det;
+    mat->m8 /= det;
+    mat->m9 /= det;
+    mat->m10 /= det;
+    mat->m11 /= det;
+    mat->m12 /= det;
+    mat->m13 /= det;
+    mat->m14 /= det;
+    mat->m15 /= det;
+}
+
+// Returns identity matrix
+RMDEF Matrix MatrixIdentity(void)
+{
+    Matrix result = { 1.0f, 0.0f, 0.0f, 0.0f, 
+                      0.0f, 1.0f, 0.0f, 0.0f, 
+                      0.0f, 0.0f, 1.0f, 0.0f,
+                      0.0f, 0.0f, 0.0f, 1.0f };
+
+    return result;
+}
+
+// Add two matrices
+RMDEF Matrix MatrixAdd(Matrix left, Matrix right)
+{
+    Matrix result = MatrixIdentity();
+
+    result.m0 = left.m0 + right.m0;
+    result.m1 = left.m1 + right.m1;
+    result.m2 = left.m2 + right.m2;
+    result.m3 = left.m3 + right.m3;
+    result.m4 = left.m4 + right.m4;
+    result.m5 = left.m5 + right.m5;
+    result.m6 = left.m6 + right.m6;
+    result.m7 = left.m7 + right.m7;
+    result.m8 = left.m8 + right.m8;
+    result.m9 = left.m9 + right.m9;
+    result.m10 = left.m10 + right.m10;
+    result.m11 = left.m11 + right.m11;
+    result.m12 = left.m12 + right.m12;
+    result.m13 = left.m13 + right.m13;
+    result.m14 = left.m14 + right.m14;
+    result.m15 = left.m15 + right.m15;
+
+    return result;
+}
+
+// Substract two matrices (left - right)
+RMDEF Matrix MatrixSubstract(Matrix left, Matrix right)
+{
+    Matrix result = MatrixIdentity();
+
+    result.m0 = left.m0 - right.m0;
+    result.m1 = left.m1 - right.m1;
+    result.m2 = left.m2 - right.m2;
+    result.m3 = left.m3 - right.m3;
+    result.m4 = left.m4 - right.m4;
+    result.m5 = left.m5 - right.m5;
+    result.m6 = left.m6 - right.m6;
+    result.m7 = left.m7 - right.m7;
+    result.m8 = left.m8 - right.m8;
+    result.m9 = left.m9 - right.m9;
+    result.m10 = left.m10 - right.m10;
+    result.m11 = left.m11 - right.m11;
+    result.m12 = left.m12 - right.m12;
+    result.m13 = left.m13 - right.m13;
+    result.m14 = left.m14 - right.m14;
+    result.m15 = left.m15 - right.m15;
+
+    return result;
+}
+
+// Returns translation matrix
+RMDEF Matrix MatrixTranslate(float x, float y, float z)
+{
+    Matrix result = { 1.0f, 0.0f, 0.0f, 0.0f, 
+                      0.0f, 1.0f, 0.0f, 0.0f, 
+                      0.0f, 0.0f, 1.0f, 0.0f, 
+                      x, y, z, 1.0f };
+
+    return result;
+}
+
+// Create rotation matrix from axis and angle
+// NOTE: Angle should be provided in radians
+RMDEF Matrix MatrixRotate(Vector3 axis, float angle)
+{
+    Matrix result;
+
+    Matrix mat = MatrixIdentity();
+
+    float x = axis.x, y = axis.y, z = axis.z;
+
+    float length = sqrt(x*x + y*y + z*z);
+
+    if ((length != 1.0f) && (length != 0.0f))
+    {
+        length = 1.0f/length;
+        x *= length;
+        y *= length;
+        z *= length;
+    }
+
+    float sinres = sinf(angle);
+    float cosres = cosf(angle);
+    float t = 1.0f - cosres;
+
+    // Cache some matrix values (speed optimization)
+    float a00 = mat.m0, a01 = mat.m1, a02 = mat.m2, a03 = mat.m3;
+    float a10 = mat.m4, a11 = mat.m5, a12 = mat.m6, a13 = mat.m7;
+    float a20 = mat.m8, a21 = mat.m9, a22 = mat.m10, a23 = mat.m11;
+
+    // Construct the elements of the rotation matrix
+    float b00 = x*x*t + cosres, b01 = y*x*t + z*sinres, b02 = z*x*t - y*sinres;
+    float b10 = x*y*t - z*sinres, b11 = y*y*t + cosres, b12 = z*y*t + x*sinres;
+    float b20 = x*z*t + y*sinres, b21 = y*z*t - x*sinres, b22 = z*z*t + cosres;
+
+    // Perform rotation-specific matrix multiplication
+    result.m0 = a00*b00 + a10*b01 + a20*b02;
+    result.m1 = a01*b00 + a11*b01 + a21*b02;
+    result.m2 = a02*b00 + a12*b01 + a22*b02;
+    result.m3 = a03*b00 + a13*b01 + a23*b02;
+    result.m4 = a00*b10 + a10*b11 + a20*b12;
+    result.m5 = a01*b10 + a11*b11 + a21*b12;
+    result.m6 = a02*b10 + a12*b11 + a22*b12;
+    result.m7 = a03*b10 + a13*b11 + a23*b12;
+    result.m8 = a00*b20 + a10*b21 + a20*b22;
+    result.m9 = a01*b20 + a11*b21 + a21*b22;
+    result.m10 = a02*b20 + a12*b21 + a22*b22;
+    result.m11 = a03*b20 + a13*b21 + a23*b22;
+    result.m12 = mat.m12;
+    result.m13 = mat.m13;
+    result.m14 = mat.m14;
+    result.m15 = mat.m15;
+
+    return result;
+}
+
+/*
+// Another implementation for MatrixRotate...
+RMDEF Matrix MatrixRotate(float angle, float x, float y, float z)
+{
+    Matrix result = MatrixIdentity();
+
+    float c = cosf(angle);      // cosine
+    float s = sinf(angle);      // sine
+    float c1 = 1.0f - c;        // 1 - c
+
+    float m0 = result.m0, m4 = result.m4, m8 = result.m8, m12 = result.m12,
+          m1 = result.m1, m5 = result.m5, m9 = result.m9,  m13 = result.m13,
+          m2 = result.m2, m6 = result.m6, m10 = result.m10, m14 = result.m14;
+
+    // build rotation matrix
+    float r0 = x*x*c1 + c;
+    float r1 = x*y*c1 + z*s;
+    float r2 = x*z*c1 - y*s;
+    float r4 = x*y*c1 - z*s;
+    float r5 = y*y*c1 + c;
+    float r6 = y*z*c1 + x*s;
+    float r8 = x*z*c1 + y*s;
+    float r9 = y*z*c1 - x*s;
+    float r10= z*z*c1 + c;
+
+    // multiply rotation matrix
+    result.m0 = r0*m0 + r4*m1 + r8*m2;
+    result.m1 = r1*m0 + r5*m1 + r9*m2;
+    result.m2 = r2*m0 + r6*m1 + r10*m2;
+    result.m4 = r0*m4 + r4*m5 + r8*m6;
+    result.m5 = r1*m4 + r5*m5 + r9*m6;
+    result.m6 = r2*m4 + r6*m5 + r10*m6;
+    result.m8 = r0*m8 + r4*m9 + r8*m10;
+    result.m9 = r1*m8 + r5*m9 + r9*m10;
+    result.m10 = r2*m8 + r6*m9 + r10*m10;
+    result.m12 = r0*m12+ r4*m13 + r8*m14;
+    result.m13 = r1*m12+ r5*m13 + r9*m14;
+    result.m14 = r2*m12+ r6*m13 + r10*m14;
+
+    return result;
+}
+*/
+
+// Returns x-rotation matrix (angle in radians)
+RMDEF Matrix MatrixRotateX(float angle)
+{
+    Matrix result = MatrixIdentity();
+
+    float cosres = cosf(angle);
+    float sinres = sinf(angle);
+
+    result.m5 = cosres;
+    result.m6 = -sinres;
+    result.m9 = sinres;
+    result.m10 = cosres;
+
+    return result;
+}
+
+// Returns y-rotation matrix (angle in radians)
+RMDEF Matrix MatrixRotateY(float angle)
+{
+    Matrix result = MatrixIdentity();
+
+    float cosres = cosf(angle);
+    float sinres = sinf(angle);
+
+    result.m0 = cosres;
+    result.m2 = sinres;
+    result.m8 = -sinres;
+    result.m10 = cosres;
+
+    return result;
+}
+
+// Returns z-rotation matrix (angle in radians)
+RMDEF Matrix MatrixRotateZ(float angle)
+{
+    Matrix result = MatrixIdentity();
+
+    float cosres = cosf(angle);
+    float sinres = sinf(angle);
+
+    result.m0 = cosres;
+    result.m1 = -sinres;
+    result.m4 = sinres;
+    result.m5 = cosres;
+
+    return result;
+}
+
+// Returns scaling matrix
+RMDEF Matrix MatrixScale(float x, float y, float z)
+{
+    Matrix result = { x, 0.0f, 0.0f, 0.0f, 
+                      0.0f, y, 0.0f, 0.0f, 
+                      0.0f, 0.0f, z, 0.0f, 
+                      0.0f, 0.0f, 0.0f, 1.0f };
+
+    return result;
+}
+
+// Returns two matrix multiplication
+// NOTE: When multiplying matrices... the order matters!
+RMDEF Matrix MatrixMultiply(Matrix left, Matrix right)
+{
+    Matrix result;
+
+    result.m0 = right.m0*left.m0 + right.m1*left.m4 + right.m2*left.m8 + right.m3*left.m12;
+    result.m1 = right.m0*left.m1 + right.m1*left.m5 + right.m2*left.m9 + right.m3*left.m13;
+    result.m2 = right.m0*left.m2 + right.m1*left.m6 + right.m2*left.m10 + right.m3*left.m14;
+    result.m3 = right.m0*left.m3 + right.m1*left.m7 + right.m2*left.m11 + right.m3*left.m15;
+    result.m4 = right.m4*left.m0 + right.m5*left.m4 + right.m6*left.m8 + right.m7*left.m12;
+    result.m5 = right.m4*left.m1 + right.m5*left.m5 + right.m6*left.m9 + right.m7*left.m13;
+    result.m6 = right.m4*left.m2 + right.m5*left.m6 + right.m6*left.m10 + right.m7*left.m14;
+    result.m7 = right.m4*left.m3 + right.m5*left.m7 + right.m6*left.m11 + right.m7*left.m15;
+    result.m8 = right.m8*left.m0 + right.m9*left.m4 + right.m10*left.m8 + right.m11*left.m12;
+    result.m9 = right.m8*left.m1 + right.m9*left.m5 + right.m10*left.m9 + right.m11*left.m13;
+    result.m10 = right.m8*left.m2 + right.m9*left.m6 + right.m10*left.m10 + right.m11*left.m14;
+    result.m11 = right.m8*left.m3 + right.m9*left.m7 + right.m10*left.m11 + right.m11*left.m15;
+    result.m12 = right.m12*left.m0 + right.m13*left.m4 + right.m14*left.m8 + right.m15*left.m12;
+    result.m13 = right.m12*left.m1 + right.m13*left.m5 + right.m14*left.m9 + right.m15*left.m13;
+    result.m14 = right.m12*left.m2 + right.m13*left.m6 + right.m14*left.m10 + right.m15*left.m14;
+    result.m15 = right.m12*left.m3 + right.m13*left.m7 + right.m14*left.m11 + right.m15*left.m15;
+
+    return result;
+}
+
+// Returns perspective projection matrix
+RMDEF Matrix MatrixFrustum(double left, double right, double bottom, double top, double near, double far)
+{
+    Matrix result;
+
+    float rl = (right - left);
+    float tb = (top - bottom);
+    float fn = (far - near);
+
+    result.m0 = (near*2.0f)/rl;
+    result.m1 = 0.0f;
+    result.m2 = 0.0f;
+    result.m3 = 0.0f;
+
+    result.m4 = 0.0f;
+    result.m5 = (near*2.0f)/tb;
+    result.m6 = 0.0f;
+    result.m7 = 0.0f;
+
+    result.m8 = (right + left)/rl;
+    result.m9 = (top + bottom)/tb;
+    result.m10 = -(far + near)/fn;
+    result.m11 = -1.0f;
+
+    result.m12 = 0.0f;
+    result.m13 = 0.0f;
+    result.m14 = -(far*near*2.0f)/fn;
+    result.m15 = 0.0f;
+
+    return result;
+}
+
+// Returns perspective projection matrix
+RMDEF Matrix MatrixPerspective(double fovy, double aspect, double near, double far)
+{
+    double top = near*tan(fovy*PI/360.0);
+    double right = top*aspect;
+
+    return MatrixFrustum(-right, right, -top, top, near, far);
+}
+
+// Returns orthographic projection matrix
+RMDEF Matrix MatrixOrtho(double left, double right, double bottom, double top, double near, double far)
+{
+    Matrix result;
+
+    float rl = (right - left);
+    float tb = (top - bottom);
+    float fn = (far - near);
+
+    result.m0 = 2.0f/rl;
+    result.m1 = 0.0f;
+    result.m2 = 0.0f;
+    result.m3 = 0.0f;
+    result.m4 = 0.0f;
+    result.m5 = 2.0f/tb;
+    result.m6 = 0.0f;
+    result.m7 = 0.0f;
+    result.m8 = 0.0f;
+    result.m9 = 0.0f;
+    result.m10 = -2.0f/fn;
+    result.m11 = 0.0f;
+    result.m12 = -(left + right)/rl;
+    result.m13 = -(top + bottom)/tb;
+    result.m14 = -(far + near)/fn;
+    result.m15 = 1.0f;
+
+    return result;
+}
+
+// Returns camera look-at matrix (view matrix)
+RMDEF Matrix MatrixLookAt(Vector3 eye, Vector3 target, Vector3 up)
+{
+    Matrix result;
+
+    Vector3 z = VectorSubtract(eye, target);
+    VectorNormalize(&z);
+    Vector3 x = VectorCrossProduct(up, z);
+    VectorNormalize(&x);
+    Vector3 y = VectorCrossProduct(z, x);
+    VectorNormalize(&y);
+
+    result.m0 = x.x;
+    result.m1 = x.y;
+    result.m2 = x.z;
+    result.m3 = -((x.x*eye.x) + (x.y*eye.y) + (x.z*eye.z));
+    result.m4 = y.x;
+    result.m5 = y.y;
+    result.m6 = y.z;
+    result.m7 = -((y.x*eye.x) + (y.y*eye.y) + (y.z*eye.z));
+    result.m8 = z.x;
+    result.m9 = z.y;
+    result.m10 = z.z;
+    result.m11 = -((z.x*eye.x) + (z.y*eye.y) + (z.z*eye.z));
+    result.m12 = 0.0f;
+    result.m13 = 0.0f;
+    result.m14 = 0.0f;
+    result.m15 = 1.0f;
+
+    return result;
+}
+
+// Print matrix utility (for debug)
+RMDEF void PrintMatrix(Matrix m)
+{
+    printf("----------------------\n");
+    printf("%2.2f %2.2f %2.2f %2.2f\n", m.m0, m.m4, m.m8, m.m12);
+    printf("%2.2f %2.2f %2.2f %2.2f\n", m.m1, m.m5, m.m9, m.m13);
+    printf("%2.2f %2.2f %2.2f %2.2f\n", m.m2, m.m6, m.m10, m.m14);
+    printf("%2.2f %2.2f %2.2f %2.2f\n", m.m3, m.m7, m.m11, m.m15);
+    printf("----------------------\n");
+}
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Quaternion math
+//----------------------------------------------------------------------------------
+
+// Computes the length of a quaternion
+RMDEF float QuaternionLength(Quaternion quat)
+{
+    return sqrt(quat.x*quat.x + quat.y*quat.y + quat.z*quat.z + quat.w*quat.w);
+}
+
+// Normalize provided quaternion
+RMDEF void QuaternionNormalize(Quaternion *q)
+{
+    float length, ilength;
+
+    length = QuaternionLength(*q);
+
+    if (length == 0.0f) length = 1.0f;
+
+    ilength = 1.0f/length;
+
+    q->x *= ilength;
+    q->y *= ilength;
+    q->z *= ilength;
+    q->w *= ilength;
+}
+
+// Calculate two quaternion multiplication
+RMDEF Quaternion QuaternionMultiply(Quaternion q1, Quaternion q2)
+{
+    Quaternion result;
+
+    float qax = q1.x, qay = q1.y, qaz = q1.z, qaw = q1.w;
+    float qbx = q2.x, qby = q2.y, qbz = q2.z, qbw = q2.w;
+
+    result.x = qax*qbw + qaw*qbx + qay*qbz - qaz*qby;
+    result.y = qay*qbw + qaw*qby + qaz*qbx - qax*qbz;
+    result.z = qaz*qbw + qaw*qbz + qax*qby - qay*qbx;
+    result.w = qaw*qbw - qax*qbx - qay*qby - qaz*qbz;
+
+    return result;
+}
+
+// Calculates spherical linear interpolation between two quaternions
+RMDEF Quaternion QuaternionSlerp(Quaternion q1, Quaternion q2, float amount)
+{
+    Quaternion result;
+
+    float cosHalfTheta =  q1.x*q2.x + q1.y*q2.y + q1.z*q2.z + q1.w*q2.w;
+
+    if (fabs(cosHalfTheta) >= 1.0f) result = q1;
+    else
+    {
+        float halfTheta = acos(cosHalfTheta);
+        float sinHalfTheta = sqrt(1.0f - cosHalfTheta*cosHalfTheta);
+
+        if (fabs(sinHalfTheta) < 0.001f)
+        {
+            result.x = (q1.x*0.5f + q2.x*0.5f);
+            result.y = (q1.y*0.5f + q2.y*0.5f);
+            result.z = (q1.z*0.5f + q2.z*0.5f);
+            result.w = (q1.w*0.5f + q2.w*0.5f);
+        }
+        else
+        {
+            float ratioA = sinf((1 - amount)*halfTheta)/sinHalfTheta;
+            float ratioB = sinf(amount*halfTheta)/sinHalfTheta;
+
+            result.x = (q1.x*ratioA + q2.x*ratioB);
+            result.y = (q1.y*ratioA + q2.y*ratioB);
+            result.z = (q1.z*ratioA + q2.z*ratioB);
+            result.w = (q1.w*ratioA + q2.w*ratioB);
+        }
+    }
+
+    return result;
+}
+
+// Returns a quaternion for a given rotation matrix
+RMDEF Quaternion QuaternionFromMatrix(Matrix matrix)
+{
+    Quaternion result;
+
+    float trace = MatrixTrace(matrix);
+
+    if (trace > 0.0f)
+    {
+        float s = (float)sqrt(trace + 1)*2.0f;
+        float invS = 1.0f/s;
+
+        result.w = s*0.25f;
+        result.x = (matrix.m6 - matrix.m9)*invS;
+        result.y = (matrix.m8 - matrix.m2)*invS;
+        result.z = (matrix.m1 - matrix.m4)*invS;
+    }
+    else
+    {
+        float m00 = matrix.m0, m11 = matrix.m5, m22 = matrix.m10;
+
+        if (m00 > m11 && m00 > m22)
+        {
+            float s = (float)sqrt(1.0f + m00 - m11 - m22)*2.0f;
+            float invS = 1.0f/s;
+
+            result.w = (matrix.m6 - matrix.m9)*invS;
+            result.x = s*0.25f;
+            result.y = (matrix.m4 + matrix.m1)*invS;
+            result.z = (matrix.m8 + matrix.m2)*invS;
+        }
+        else if (m11 > m22)
+        {
+            float s = (float)sqrt(1.0f + m11 - m00 - m22)*2.0f;
+            float invS = 1.0f/s;
+
+            result.w = (matrix.m8 - matrix.m2)*invS;
+            result.x = (matrix.m4 + matrix.m1)*invS;
+            result.y = s*0.25f;
+            result.z = (matrix.m9 + matrix.m6)*invS;
+        }
+        else
+        {
+            float s = (float)sqrt(1.0f + m22 - m00 - m11)*2.0f;
+            float invS = 1.0f/s;
+
+            result.w = (matrix.m1 - matrix.m4)*invS;
+            result.x = (matrix.m8 + matrix.m2)*invS;
+            result.y = (matrix.m9 + matrix.m6)*invS;
+            result.z = s*0.25f;
+        }
+    }
+
+    return result;
+}
+
+// Returns a matrix for a given quaternion
+RMDEF Matrix QuaternionToMatrix(Quaternion q)
+{
+    Matrix result;
+
+    float x = q.x, y = q.y, z = q.z, w = q.w;
+
+    float x2 = x + x;
+    float y2 = y + y;
+    float z2 = z + z;
+
+    float xx = x*x2;
+    float xy = x*y2;
+    float xz = x*z2;
+
+    float yy = y*y2;
+    float yz = y*z2;
+    float zz = z*z2;
+
+    float wx = w*x2;
+    float wy = w*y2;
+    float wz = w*z2;
+
+    result.m0 = 1.0f - (yy + zz);
+    result.m1 = xy - wz;
+    result.m2 = xz + wy;
+    result.m3 = 0.0f;
+    result.m4 = xy + wz;
+    result.m5 = 1.0f - (xx + zz);
+    result.m6 = yz - wx;
+    result.m7 = 0.0f;
+    result.m8 = xz - wy;
+    result.m9 = yz + wx;
+    result.m10 = 1.0f - (xx + yy);
+    result.m11 = 0.0f;
+    result.m12 = 0.0f;
+    result.m13 = 0.0f;
+    result.m14 = 0.0f;
+    result.m15 = 1.0f;
+    
+    return result;
+}
+
+// Returns rotation quaternion for an angle and axis
+// NOTE: angle must be provided in radians
+RMDEF Quaternion QuaternionFromAxisAngle(Vector3 axis, float angle)
+{
+    Quaternion result = { 0.0f, 0.0f, 0.0f, 1.0f };
+
+    if (VectorLength(axis) != 0.0f)
+
+    angle *= 0.5f;
+
+    VectorNormalize(&axis);
+    
+    float sinres = sinf(angle);
+    float cosres = cosf(angle);
+
+    result.x = axis.x*sinres;
+    result.y = axis.y*sinres;
+    result.z = axis.z*sinres;
+    result.w = cosres;
+
+    QuaternionNormalize(&result);
+
+    return result;
+}
+
+// Returns the rotation angle and axis for a given quaternion
+RMDEF void QuaternionToAxisAngle(Quaternion q, Vector3 *outAxis, float *outAngle)
+{
+    if (fabs(q.w) > 1.0f) QuaternionNormalize(&q);
+
+    Vector3 resAxis = { 0.0f, 0.0f, 0.0f };
+    float resAngle = 0.0f;
+
+    resAngle = 2.0f*(float)acos(q.w);
+    float den = (float)sqrt(1.0f - q.w*q.w);
+
+    if (den > 0.0001f)
+    {
+        resAxis.x = q.x/den;
+        resAxis.y = q.y/den;
+        resAxis.z = q.z/den;
+    }
+    else
+    {
+        // This occurs when the angle is zero.
+        // Not a problem: just set an arbitrary normalized axis.
+        resAxis.x = 1.0f;
+    }
+
+    *outAxis = resAxis;
+    *outAngle = resAngle;
+}
+
+// Transform a quaternion given a transformation matrix
+RMDEF void QuaternionTransform(Quaternion *q, Matrix mat)
+{
+    float x = q->x;
+    float y = q->y;
+    float z = q->z;
+    float w = q->w;
+
+    q->x = mat.m0*x + mat.m4*y + mat.m8*z + mat.m12*w;
+    q->y = mat.m1*x + mat.m5*y + mat.m9*z + mat.m13*w;
+    q->z = mat.m2*x + mat.m6*y + mat.m10*z + mat.m14*w;
+    q->w = mat.m3*x + mat.m7*y + mat.m11*z + mat.m15*w;
+}
+
+#endif  // RAYMATH_IMPLEMENTATION

+ 2960 - 0
examples/oculus_glfw_sample/rlgl.c

@@ -0,0 +1,2960 @@
+/**********************************************************************************************
+*
+*   rlgl - raylib OpenGL abstraction layer
+*
+*   raylib now uses OpenGL 1.1 style functions (rlVertex) that are mapped to selected OpenGL version:
+*       OpenGL 1.1  - Direct map rl* -> gl*
+*       OpenGL 3.3  - Vertex data is stored in VAOs, call rlglDraw() to render
+*       OpenGL ES 2 - Vertex data is stored in VBOs or VAOs (when available), call rlglDraw() to render
+*
+*   Copyright (c) 2014 Ramon Santamaria (@raysan5)
+*
+*   This software is provided "as-is", without any express or implied warranty. In no event
+*   will the authors be held liable for any damages arising from the use of this software.
+*
+*   Permission is granted to anyone to use this software for any purpose, including commercial
+*   applications, and to alter it and redistribute it freely, subject to the following restrictions:
+*
+*     1. The origin of this software must not be misrepresented; you must not claim that you
+*     wrote the original software. If you use this software in a product, an acknowledgment
+*     in the product documentation would be appreciated but is not required.
+*
+*     2. Altered source versions must be plainly marked as such, and must not be misrepresented
+*     as being the original software.
+*
+*     3. This notice may not be removed or altered from any source distribution.
+*
+**********************************************************************************************/
+
+#include "rlgl.h"
+
+#include <stdio.h>          // Standard input / output lib
+#include <stdlib.h>         // Declares malloc() and free() for memory management, rand()
+#include <string.h>         // Declares strcmp(), strlen(), strtok()
+
+#ifndef RLGL_STANDALONE
+    #include "raymath.h"    // Required for Vector3 and Matrix functions
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_11)
+    #ifdef __APPLE__                // OpenGL include for OSX
+        #include <OpenGL/gl.h>
+    #else
+        #include <GL/gl.h>          // Basic OpenGL include
+    #endif
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_33)
+    #ifdef __APPLE__                // OpenGL include for OSX
+        #include <OpenGL/gl3.h>
+    #else
+        //#define GLEW_STATIC
+        //#include <GL/glew.h>        // GLEW header, includes OpenGL headers
+        #include "glad.h"         // glad header, includes OpenGL headers
+    #endif
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_ES2)
+    #include <EGL/egl.h>
+    #include <GLES2/gl2.h>
+    #include <GLES2/gl2ext.h>
+#endif
+
+#if defined(RLGL_STANDALONE)
+    #include <stdarg.h>         // Used for functions with variable number of parameters (TraceLog())
+#endif
+
+//----------------------------------------------------------------------------------
+// Defines and Macros
+//----------------------------------------------------------------------------------
+#define MATRIX_STACK_SIZE          16   // Matrix stack max size
+#define MAX_DRAWS_BY_TEXTURE      256   // Draws are organized by texture changes
+#define TEMP_VERTEX_BUFFER_SIZE  4096   // Temporal Vertex Buffer (required for vertex-transformations)
+                                        // NOTE: Every vertex are 3 floats (12 bytes)
+
+#ifndef GL_SHADING_LANGUAGE_VERSION
+    #define GL_SHADING_LANGUAGE_VERSION         0x8B8C
+#endif
+
+#ifndef GL_COMPRESSED_RGB_S3TC_DXT1_EXT
+    #define GL_COMPRESSED_RGB_S3TC_DXT1_EXT     0x83F0
+#endif
+#ifndef GL_COMPRESSED_RGBA_S3TC_DXT1_EXT
+    #define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT    0x83F1
+#endif
+#ifndef GL_COMPRESSED_RGBA_S3TC_DXT3_EXT
+    #define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT    0x83F2
+#endif
+#ifndef GL_COMPRESSED_RGBA_S3TC_DXT5_EXT
+    #define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT    0x83F3
+#endif
+#ifndef GL_ETC1_RGB8_OES
+    #define GL_ETC1_RGB8_OES                    0x8D64
+#endif
+#ifndef GL_COMPRESSED_RGB8_ETC2
+    #define GL_COMPRESSED_RGB8_ETC2             0x9274
+#endif
+#ifndef GL_COMPRESSED_RGBA8_ETC2_EAC
+    #define GL_COMPRESSED_RGBA8_ETC2_EAC        0x9278
+#endif
+#ifndef GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG
+    #define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG  0x8C00
+#endif
+#ifndef GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG
+    #define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02
+#endif
+#ifndef GL_COMPRESSED_RGBA_ASTC_4x4_KHR
+    #define GL_COMPRESSED_RGBA_ASTC_4x4_KHR     0x93b0
+#endif
+#ifndef GL_COMPRESSED_RGBA_ASTC_8x8_KHR
+    #define GL_COMPRESSED_RGBA_ASTC_8x8_KHR     0x93b7
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_11)
+    #define GL_UNSIGNED_SHORT_5_6_5     0x8363
+    #define GL_UNSIGNED_SHORT_5_5_5_1   0x8034
+    #define GL_UNSIGNED_SHORT_4_4_4_4   0x8033
+#endif
+//----------------------------------------------------------------------------------
+// Types and Structures Definition
+//----------------------------------------------------------------------------------
+
+// Vertex buffer (position + color arrays)
+// NOTE: Used for lines and triangles VAOs
+typedef struct {
+    int vCounter;
+    int cCounter;
+    float *vertices;            // 3 components per vertex
+    unsigned char *colors;      // 4 components per vertex
+} VertexPositionColorBuffer;
+
+// Vertex buffer (position + texcoords + color arrays)
+// NOTE: Not used
+typedef struct {
+    int vCounter;
+    int tcCounter;
+    int cCounter;
+    float *vertices;            // 3 components per vertex
+    float *texcoords;           // 2 components per vertex
+    unsigned char *colors;      // 4 components per vertex
+} VertexPositionColorTextureBuffer;
+
+// Vertex buffer (position + texcoords + normals arrays)
+// NOTE: Not used
+typedef struct {
+    int vCounter;
+    int tcCounter;
+    int nCounter;
+    float *vertices;            // 3 components per vertex
+    float *texcoords;           // 2 components per vertex
+    float *normals;             // 3 components per vertex
+    //short *normals;           // NOTE: Less data load... but padding issues and normalizing required!
+} VertexPositionTextureNormalBuffer;
+
+// Vertex buffer (position + texcoords + colors + indices arrays)
+// NOTE: Used for quads VAO
+typedef struct {
+    int vCounter;
+    int tcCounter;
+    int cCounter;
+    float *vertices;            // 3 components per vertex
+    float *texcoords;           // 2 components per vertex
+    unsigned char *colors;      // 4 components per vertex
+#if defined(GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_33)
+    unsigned int *indices;      // 6 indices per quad (could be int)
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    unsigned short *indices;    // 6 indices per quad (must be short)
+                                // NOTE: 6*2 byte = 12 byte, not alignment problem!
+#endif
+} VertexPositionColorTextureIndexBuffer;
+
+// Draw call type
+// NOTE: Used to track required draw-calls, organized by texture
+typedef struct {
+    GLuint textureId;
+    int vertexCount;
+    // TODO: Store draw state -> blending mode, shader
+} DrawCall;
+
+#if defined(RLGL_STANDALONE)
+typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType;
+#endif
+
+//----------------------------------------------------------------------------------
+// Global Variables Definition
+//----------------------------------------------------------------------------------
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+static Matrix stack[MATRIX_STACK_SIZE];
+static int stackCounter = 0;
+
+static Matrix modelview;
+static Matrix projection;
+static Matrix *currentMatrix;
+static int currentMatrixMode;
+
+static DrawMode currentDrawMode;
+
+static float currentDepth = -1.0f;
+
+// Vertex arrays for lines, triangles and quads
+static VertexPositionColorBuffer lines;         // No texture support
+static VertexPositionColorBuffer triangles;     // No texture support
+static VertexPositionColorTextureIndexBuffer quads;
+
+// Shader Programs
+static Shader defaultShader;
+static Shader currentShader;                    // By default, defaultShader
+
+// Vertex Array Objects (VAO)
+static GLuint vaoLines, vaoTriangles, vaoQuads;
+
+// Vertex Buffer Objects (VBO)
+static GLuint linesBuffer[2];
+static GLuint trianglesBuffer[2];
+static GLuint quadsBuffer[4];
+
+static DrawCall *draws;
+static int drawsCounter;
+
+// Temp vertex buffer to be used with rlTranslate, rlRotate, rlScale
+static Vector3 *tempBuffer;
+static int tempBufferCount = 0;
+static bool useTempBuffer = false;
+
+// Flags for supported extensions
+static bool vaoSupported = false;   // VAO support (OpenGL ES2 could not support VAO extension)
+
+// Compressed textures support flags
+//static bool texCompDXTSupported = false;     // DDS texture compression support
+static bool texCompETC1Supported = false;    // ETC1 texture compression support
+static bool texCompETC2Supported = false;    // ETC2/EAC texture compression support
+static bool texCompPVRTSupported = false;    // PVR texture compression support
+static bool texCompASTCSupported = false;    // ASTC texture compression support
+#endif
+
+// Compressed textures support flags
+static bool texCompDXTSupported = false;   // DDS texture compression support
+static bool npotSupported = false;         // NPOT textures full support
+
+#if defined(GRAPHICS_API_OPENGL_ES2)
+// NOTE: VAO functionality is exposed through extensions (OES)
+static PFNGLGENVERTEXARRAYSOESPROC glGenVertexArrays;
+static PFNGLBINDVERTEXARRAYOESPROC glBindVertexArray;
+static PFNGLDELETEVERTEXARRAYSOESPROC glDeleteVertexArrays;
+//static PFNGLISVERTEXARRAYOESPROC glIsVertexArray;        // NOTE: Fails in WebGL, omitted
+#endif
+
+static int blendMode = 0;
+
+// White texture useful for plain color polys (required by shader)
+// NOTE: It's required in shapes and models modules!
+unsigned int whiteTexture;
+
+//----------------------------------------------------------------------------------
+// Module specific Functions Declaration
+//----------------------------------------------------------------------------------
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+static Shader LoadDefaultShader(void);
+static void LoadDefaultShaderLocations(Shader *shader);
+static void InitializeBuffers(void);
+static void InitializeBuffersGPU(void);
+static void UpdateBuffers(void);
+static char *TextFileRead(char *fn);
+
+static void LoadCompressedTexture(unsigned char *data, int width, int height, int mipmapCount, int compressedFormat);
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_11)
+static int GenerateMipmaps(unsigned char *data, int baseWidth, int baseHeight);
+static Color *GenNextMipmap(Color *srcData, int srcWidth, int srcHeight);
+#endif
+
+#if defined(RLGL_STANDALONE)
+static void TraceLog(int msgType, const char *text, ...);
+float *MatrixToFloat(Matrix mat);   // Converts Matrix to float array
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_ES2)
+// NOTE: strdup() functions replacement (not C99, POSIX function, not available on emscripten)
+// Duplicates a string, returning an identical malloc'd string
+char *mystrdup(const char *str)
+{
+  size_t len = strlen(str) + 1;
+  void *newstr = malloc(len);
+
+  if (newstr == NULL) return NULL;
+
+  return (char *)memcpy(newstr, str, len);
+}
+#endif
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Matrix operations
+//----------------------------------------------------------------------------------
+
+#if defined(GRAPHICS_API_OPENGL_11)
+
+// Fallback to OpenGL 1.1 function calls
+//---------------------------------------
+void rlMatrixMode(int mode)
+{
+    switch (mode)
+    {
+        case RL_PROJECTION: glMatrixMode(GL_PROJECTION); break;
+        case RL_MODELVIEW: glMatrixMode(GL_MODELVIEW); break;
+        case RL_TEXTURE: glMatrixMode(GL_TEXTURE); break;
+        default: break;
+    }
+}
+
+void rlFrustum(double left, double right, double bottom, double top, double near, double far)
+{
+    glFrustum(left, right, bottom, top, near, far);
+}
+
+void rlOrtho(double left, double right, double bottom, double top, double near, double far)
+{
+    glOrtho(left, right, bottom, top, near, far);
+}
+
+void rlPushMatrix(void) { glPushMatrix(); }
+void rlPopMatrix(void) { glPopMatrix(); }
+void rlLoadIdentity(void) { glLoadIdentity(); }
+void rlTranslatef(float x, float y, float z) { glTranslatef(x, y, z); }
+void rlRotatef(float angleDeg, float x, float y, float z) { glRotatef(angleDeg, x, y, z); }
+void rlScalef(float x, float y, float z) { glScalef(x, y, z); }
+void rlMultMatrixf(float *mat) { glMultMatrixf(mat); }
+
+#elif defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+
+// Choose the current matrix to be transformed
+void rlMatrixMode(int mode)
+{
+    if (mode == RL_PROJECTION) currentMatrix = &projection;
+    else if (mode == RL_MODELVIEW) currentMatrix = &modelview;
+    //else if (mode == RL_TEXTURE) // Not supported
+
+    currentMatrixMode = mode;
+}
+
+// Push the current matrix to stack
+void rlPushMatrix(void)
+{
+    if (stackCounter == MATRIX_STACK_SIZE - 1)
+    {
+        TraceLog(ERROR, "Stack Buffer Overflow (MAX %i Matrix)", MATRIX_STACK_SIZE);
+    }
+
+    stack[stackCounter] = *currentMatrix;
+    rlLoadIdentity();
+    stackCounter++;
+
+    if (currentMatrixMode == RL_MODELVIEW) useTempBuffer = true;
+}
+
+// Pop lattest inserted matrix from stack
+void rlPopMatrix(void)
+{
+    if (stackCounter > 0)
+    {
+        Matrix mat = stack[stackCounter - 1];
+        *currentMatrix = mat;
+        stackCounter--;
+    }
+}
+
+// Reset current matrix to identity matrix
+void rlLoadIdentity(void)
+{
+    *currentMatrix = MatrixIdentity();
+}
+
+// Multiply the current matrix by a translation matrix
+void rlTranslatef(float x, float y, float z)
+{
+    Matrix matTranslation = MatrixTranslate(x, y, z);
+    MatrixTranspose(&matTranslation);
+
+    *currentMatrix = MatrixMultiply(*currentMatrix, matTranslation);
+}
+
+// Multiply the current matrix by a rotation matrix
+void rlRotatef(float angleDeg, float x, float y, float z)
+{
+    Matrix matRotation = MatrixIdentity();
+
+    Vector3 axis = (Vector3){ x, y, z };
+    VectorNormalize(&axis);
+    matRotation = MatrixRotate(axis, angleDeg*DEG2RAD);
+
+    MatrixTranspose(&matRotation);
+
+    *currentMatrix = MatrixMultiply(*currentMatrix, matRotation);
+}
+
+// Multiply the current matrix by a scaling matrix
+void rlScalef(float x, float y, float z)
+{
+    Matrix matScale = MatrixScale(x, y, z);
+    MatrixTranspose(&matScale);
+
+    *currentMatrix = MatrixMultiply(*currentMatrix, matScale);
+}
+
+// Multiply the current matrix by another matrix
+void rlMultMatrixf(float *m)
+{
+    // Matrix creation from array
+    Matrix mat = { m[0], m[1], m[2], m[3],
+                   m[4], m[5], m[6], m[7],
+                   m[8], m[9], m[10], m[11],
+                   m[12], m[13], m[14], m[15] };
+
+    *currentMatrix = MatrixMultiply(*currentMatrix, mat);
+}
+
+// Multiply the current matrix by a perspective matrix generated by parameters
+void rlFrustum(double left, double right, double bottom, double top, double near, double far)
+{
+    Matrix matPerps = MatrixFrustum(left, right, bottom, top, near, far);
+    MatrixTranspose(&matPerps);
+
+    *currentMatrix = MatrixMultiply(*currentMatrix, matPerps);
+}
+
+// Multiply the current matrix by an orthographic matrix generated by parameters
+void rlOrtho(double left, double right, double bottom, double top, double near, double far)
+{
+    Matrix matOrtho = MatrixOrtho(left, right, bottom, top, near, far);
+    MatrixTranspose(&matOrtho);
+
+    *currentMatrix = MatrixMultiply(*currentMatrix, matOrtho);
+}
+
+#endif
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Vertex level operations
+//----------------------------------------------------------------------------------
+#if defined(GRAPHICS_API_OPENGL_11)
+
+// Fallback to OpenGL 1.1 function calls
+//---------------------------------------
+void rlBegin(int mode)
+{
+    switch (mode)
+    {
+        case RL_LINES: glBegin(GL_LINES); break;
+        case RL_TRIANGLES: glBegin(GL_TRIANGLES); break;
+        case RL_QUADS: glBegin(GL_QUADS); break;
+        default: break;
+    }
+}
+
+void rlEnd() { glEnd(); }
+void rlVertex2i(int x, int y) { glVertex2i(x, y); }
+void rlVertex2f(float x, float y) { glVertex2f(x, y); }
+void rlVertex3f(float x, float y, float z) { glVertex3f(x, y, z); }
+void rlTexCoord2f(float x, float y) { glTexCoord2f(x, y); }
+void rlNormal3f(float x, float y, float z) { glNormal3f(x, y, z); }
+void rlColor4ub(byte r, byte g, byte b, byte a) { glColor4ub(r, g, b, a); }
+void rlColor3f(float x, float y, float z) { glColor3f(x, y, z); }
+void rlColor4f(float x, float y, float z, float w) { glColor4f(x, y, z, w); }
+
+#elif defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+
+// Initialize drawing mode (how to organize vertex)
+void rlBegin(int mode)
+{
+    // Draw mode can only be RL_LINES, RL_TRIANGLES and RL_QUADS
+    currentDrawMode = mode;
+}
+
+// Finish vertex providing
+void rlEnd(void)
+{
+    if (useTempBuffer)
+    {
+        // NOTE: In this case, *currentMatrix is already transposed because transposing has been applied
+        // independently to translation-scale-rotation matrices -> t(M1 x M2) = t(M2) x t(M1)
+        // This way, rlTranslatef(), rlRotatef()... behaviour is the same than OpenGL 1.1
+
+        // Apply transformation matrix to all temp vertices
+        for (int i = 0; i < tempBufferCount; i++) VectorTransform(&tempBuffer[i], *currentMatrix);
+
+        // Deactivate tempBuffer usage to allow rlVertex3f do its job
+        useTempBuffer = false;
+
+        // Copy all transformed vertices to right VAO
+        for (int i = 0; i < tempBufferCount; i++) rlVertex3f(tempBuffer[i].x, tempBuffer[i].y, tempBuffer[i].z);
+
+        // Reset temp buffer
+        tempBufferCount = 0;
+    }
+
+    // Make sure vertexCount is the same for vertices-texcoords-normals-colors
+    // NOTE: In OpenGL 1.1, one glColor call can be made for all the subsequent glVertex calls.
+    switch (currentDrawMode)
+    {
+        case RL_LINES:
+        {
+            if (lines.vCounter != lines.cCounter)
+            {
+                int addColors = lines.vCounter - lines.cCounter;
+
+                for (int i = 0; i < addColors; i++)
+                {
+                    lines.colors[4*lines.cCounter] = lines.colors[4*lines.cCounter - 4];
+                    lines.colors[4*lines.cCounter + 1] = lines.colors[4*lines.cCounter - 3];
+                    lines.colors[4*lines.cCounter + 2] = lines.colors[4*lines.cCounter - 2];
+                    lines.colors[4*lines.cCounter + 3] = lines.colors[4*lines.cCounter - 1];
+
+                    lines.cCounter++;
+                }
+            }
+        } break;
+        case RL_TRIANGLES:
+        {
+            if (triangles.vCounter != triangles.cCounter)
+            {
+                int addColors = triangles.vCounter - triangles.cCounter;
+
+                for (int i = 0; i < addColors; i++)
+                {
+                    triangles.colors[4*triangles.cCounter] = triangles.colors[4*triangles.cCounter - 4];
+                    triangles.colors[4*triangles.cCounter + 1] = triangles.colors[4*triangles.cCounter - 3];
+                    triangles.colors[4*triangles.cCounter + 2] = triangles.colors[4*triangles.cCounter - 2];
+                    triangles.colors[4*triangles.cCounter + 3] = triangles.colors[4*triangles.cCounter - 1];
+
+                    triangles.cCounter++;
+                }
+            }
+        } break;
+        case RL_QUADS:
+        {
+            // Make sure colors count match vertex count
+            if (quads.vCounter != quads.cCounter)
+            {
+                int addColors = quads.vCounter - quads.cCounter;
+
+                for (int i = 0; i < addColors; i++)
+                {
+                    quads.colors[4*quads.cCounter] = quads.colors[4*quads.cCounter - 4];
+                    quads.colors[4*quads.cCounter + 1] = quads.colors[4*quads.cCounter - 3];
+                    quads.colors[4*quads.cCounter + 2] = quads.colors[4*quads.cCounter - 2];
+                    quads.colors[4*quads.cCounter + 3] = quads.colors[4*quads.cCounter - 1];
+
+                    quads.cCounter++;
+                }
+            }
+
+            // Make sure texcoords count match vertex count
+            if (quads.vCounter != quads.tcCounter)
+            {
+                int addTexCoords = quads.vCounter - quads.tcCounter;
+
+                for (int i = 0; i < addTexCoords; i++)
+                {
+                    quads.texcoords[2*quads.tcCounter] = 0.0f;
+                    quads.texcoords[2*quads.tcCounter + 1] = 0.0f;
+
+                    quads.tcCounter++;
+                }
+            }
+
+            // TODO: Make sure normals count match vertex count... if normals support is added in a future... :P
+
+        } break;
+        default: break;
+    }
+    
+    // NOTE: Depth increment is dependant on rlOrtho(): z-near and z-far values,
+    // as well as depth buffer bit-depth (16bit or 24bit or 32bit)
+    // Correct increment formula would be: depthInc = (zfar - znear)/pow(2, bits)
+    currentDepth += (1.0f/20000.0f);
+}
+
+// Define one vertex (position)
+void rlVertex3f(float x, float y, float z)
+{
+    if (useTempBuffer)
+    {
+        tempBuffer[tempBufferCount].x = x;
+        tempBuffer[tempBufferCount].y = y;
+        tempBuffer[tempBufferCount].z = z;
+        tempBufferCount++;
+    }
+    else
+    {
+        switch (currentDrawMode)
+        {
+            case RL_LINES:
+            {
+                // Verify that MAX_LINES_BATCH limit not reached
+                if (lines.vCounter / 2 < MAX_LINES_BATCH)
+                {
+                    lines.vertices[3*lines.vCounter] = x;
+                    lines.vertices[3*lines.vCounter + 1] = y;
+                    lines.vertices[3*lines.vCounter + 2] = z;
+
+                    lines.vCounter++;
+                }
+                else TraceLog(ERROR, "MAX_LINES_BATCH overflow");
+
+            } break;
+            case RL_TRIANGLES:
+            {
+                // Verify that MAX_TRIANGLES_BATCH limit not reached
+                if (triangles.vCounter / 3 < MAX_TRIANGLES_BATCH)
+                {
+                    triangles.vertices[3*triangles.vCounter] = x;
+                    triangles.vertices[3*triangles.vCounter + 1] = y;
+                    triangles.vertices[3*triangles.vCounter + 2] = z;
+
+                    triangles.vCounter++;
+                }
+                else TraceLog(ERROR, "MAX_TRIANGLES_BATCH overflow");
+
+            } break;
+            case RL_QUADS:
+            {
+                // Verify that MAX_QUADS_BATCH limit not reached
+                if (quads.vCounter / 4 < MAX_QUADS_BATCH)
+                {
+                    quads.vertices[3*quads.vCounter] = x;
+                    quads.vertices[3*quads.vCounter + 1] = y;
+                    quads.vertices[3*quads.vCounter + 2] = z;
+
+                    quads.vCounter++;
+
+                    draws[drawsCounter - 1].vertexCount++;
+                }
+                else TraceLog(ERROR, "MAX_QUADS_BATCH overflow");
+
+            } break;
+            default: break;
+        }
+    }
+}
+
+// Define one vertex (position)
+void rlVertex2f(float x, float y)
+{
+    rlVertex3f(x, y, currentDepth);
+}
+
+// Define one vertex (position)
+void rlVertex2i(int x, int y)
+{
+    rlVertex3f((float)x, (float)y, currentDepth);
+}
+
+// Define one vertex (texture coordinate)
+// NOTE: Texture coordinates are limited to QUADS only
+void rlTexCoord2f(float x, float y)
+{
+    if (currentDrawMode == RL_QUADS)
+    {
+        quads.texcoords[2*quads.tcCounter] = x;
+        quads.texcoords[2*quads.tcCounter + 1] = y;
+
+        quads.tcCounter++;
+    }
+}
+
+// Define one vertex (normal)
+// NOTE: Normals limited to TRIANGLES only ?
+void rlNormal3f(float x, float y, float z)
+{
+    // TODO: Normals usage...
+}
+
+// Define one vertex (color)
+void rlColor4ub(byte x, byte y, byte z, byte w)
+{
+    switch (currentDrawMode)
+    {
+        case RL_LINES:
+        {
+            lines.colors[4*lines.cCounter] = x;
+            lines.colors[4*lines.cCounter + 1] = y;
+            lines.colors[4*lines.cCounter + 2] = z;
+            lines.colors[4*lines.cCounter + 3] = w;
+
+            lines.cCounter++;
+
+        } break;
+        case RL_TRIANGLES:
+        {
+            triangles.colors[4*triangles.cCounter] = x;
+            triangles.colors[4*triangles.cCounter + 1] = y;
+            triangles.colors[4*triangles.cCounter + 2] = z;
+            triangles.colors[4*triangles.cCounter + 3] = w;
+
+            triangles.cCounter++;
+
+        } break;
+        case RL_QUADS:
+        {
+            quads.colors[4*quads.cCounter] = x;
+            quads.colors[4*quads.cCounter + 1] = y;
+            quads.colors[4*quads.cCounter + 2] = z;
+            quads.colors[4*quads.cCounter + 3] = w;
+
+            quads.cCounter++;
+
+        } break;
+        default: break;
+    }
+}
+
+// Define one vertex (color)
+void rlColor4f(float r, float g, float b, float a)
+{
+    rlColor4ub((byte)(r*255), (byte)(g*255), (byte)(b*255), (byte)(a*255));
+}
+
+// Define one vertex (color)
+void rlColor3f(float x, float y, float z)
+{
+    rlColor4ub((byte)(x*255), (byte)(y*255), (byte)(z*255), 255);
+}
+
+#endif
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - OpenGL equivalent functions (common to 1.1, 3.3+, ES2)
+//----------------------------------------------------------------------------------
+
+// Enable texture usage
+void rlEnableTexture(unsigned int id)
+{
+#if defined(GRAPHICS_API_OPENGL_11)
+    glEnable(GL_TEXTURE_2D);
+    glBindTexture(GL_TEXTURE_2D, id);
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    if (draws[drawsCounter - 1].textureId != id)
+    {
+        if (draws[drawsCounter - 1].vertexCount > 0) drawsCounter++;
+
+        draws[drawsCounter - 1].textureId = id;
+        draws[drawsCounter - 1].vertexCount = 0;
+    }
+#endif
+}
+
+// Disable texture usage
+void rlDisableTexture(void)
+{
+#if defined(GRAPHICS_API_OPENGL_11)
+    glDisable(GL_TEXTURE_2D);
+    glBindTexture(GL_TEXTURE_2D, 0);
+#endif
+}
+
+void rlEnableRenderTexture(unsigned int id)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glBindFramebuffer(GL_FRAMEBUFFER, id);
+#endif
+}
+
+void rlDisableRenderTexture(void)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glBindFramebuffer(GL_FRAMEBUFFER, 0);
+#endif
+}
+
+// Enable depth test
+void rlEnableDepthTest(void)
+{
+    glEnable(GL_DEPTH_TEST);
+}
+
+// Disable depth test
+void rlDisableDepthTest(void)
+{
+    glDisable(GL_DEPTH_TEST);
+}
+
+// Unload texture from GPU memory
+void rlDeleteTextures(unsigned int id)
+{
+    glDeleteTextures(1, &id);
+}
+
+// Unload render texture from GPU memory
+void rlDeleteRenderTextures(RenderTexture2D target)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glDeleteFramebuffers(1, &target.id);
+    glDeleteTextures(1, &target.texture.id);
+    glDeleteTextures(1, &target.depth.id);
+#endif
+}
+
+// Unload shader from GPU memory
+void rlDeleteShader(unsigned int id)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glDeleteProgram(id);
+#endif
+}
+
+// Unload vertex data (VAO) from GPU memory
+void rlDeleteVertexArrays(unsigned int id)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    if (vaoSupported) 
+    {
+        glDeleteVertexArrays(1, &id);
+        TraceLog(INFO, "[VAO ID %i] Unloaded model data from VRAM (GPU)", id);
+    }
+#endif
+}
+
+// Unload vertex data (VBO) from GPU memory
+void rlDeleteBuffers(unsigned int id)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glDeleteBuffers(1, &id);
+    
+    if (!vaoSupported) TraceLog(INFO, "[VBO ID %i] Unloaded model vertex data from VRAM (GPU)", id);
+#endif
+}
+
+// Clear color buffer with color
+void rlClearColor(byte r, byte g, byte b, byte a)
+{
+    // Color values clamp to 0.0f(0) and 1.0f(255)
+    float cr = (float)r/255;
+    float cg = (float)g/255;
+    float cb = (float)b/255;
+    float ca = (float)a/255;
+
+    glClearColor(cr, cg, cb, ca);
+}
+
+// Clear used screen buffers (color and depth)
+void rlClearScreenBuffers(void)
+{
+    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);     // Clear used buffers: Color and Depth (Depth is used for 3D)
+    //glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);     // Stencil buffer not used...
+}
+
+// Returns current OpenGL version
+int rlGetVersion(void)
+{
+#if defined(GRAPHICS_API_OPENGL_11)
+    return OPENGL_11;
+#elif defined(GRAPHICS_API_OPENGL_33)
+    return OPENGL_33;
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    return OPENGL_ES_20;
+#endif
+}
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - rlgl Functions
+//----------------------------------------------------------------------------------
+
+// Init OpenGL 3.3+ required data
+void rlglInit(void)
+{
+    // Check OpenGL information and capabilities
+    //------------------------------------------------------------------------------
+    
+    // Print current OpenGL and GLSL version
+    TraceLog(INFO, "GPU: Vendor:   %s", glGetString(GL_VENDOR));
+    TraceLog(INFO, "GPU: Renderer: %s", glGetString(GL_RENDERER));
+    TraceLog(INFO, "GPU: Version:  %s", glGetString(GL_VERSION));
+    TraceLog(INFO, "GPU: GLSL:     %s", glGetString(GL_SHADING_LANGUAGE_VERSION));
+
+    // NOTE: We can get a bunch of extra information about GPU capabilities (glGet*)
+    //int maxTexSize;
+    //glGetIntegerv(GL_MAX_TEXTURE_SIZE, &maxTexSize);
+    //TraceLog(INFO, "GL_MAX_TEXTURE_SIZE: %i", maxTexSize);
+    
+    //GL_MAX_TEXTURE_IMAGE_UNITS
+    //GL_MAX_VIEWPORT_DIMS
+
+    //int numAuxBuffers;
+    //glGetIntegerv(GL_AUX_BUFFERS, &numAuxBuffers);
+    //TraceLog(INFO, "GL_AUX_BUFFERS: %i", numAuxBuffers);
+    
+    //GLint numComp = 0;
+    //GLint format[32] = { 0 };
+    //glGetIntegerv(GL_NUM_COMPRESSED_TEXTURE_FORMATS, &numComp);
+    //glGetIntegerv(GL_COMPRESSED_TEXTURE_FORMATS, format);
+    //for (int i = 0; i < numComp; i++) TraceLog(INFO, "Supported compressed format: 0x%x", format[i]);
+
+    // NOTE: We don't need that much data on screen... right now...
+    
+#if defined(GRAPHICS_API_OPENGL_11)
+    //TraceLog(INFO, "OpenGL 1.1 (or driver default) profile initialized");
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    // Get supported extensions list
+    GLint numExt = 0;
+    
+#if defined(GRAPHICS_API_OPENGL_33)
+
+    // NOTE: On OpenGL 3.3 VAO and NPOT are supported by default
+    vaoSupported = true;
+    npotSupported = true;
+
+    // NOTE: We don't need to check again supported extensions but we do (in case GLEW is replaced sometime)
+    // We get a list of available extensions and we check for some of them (compressed textures)
+    glGetIntegerv(GL_NUM_EXTENSIONS, &numExt);
+    const char *extList[numExt];
+    
+    for (int i = 0; i < numExt; i++) extList[i] = (char *)glGetStringi(GL_EXTENSIONS, i);
+    
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    char *extensions = (char *)glGetString(GL_EXTENSIONS);  // One big const string
+    
+    // NOTE: We have to duplicate string because glGetString() returns a const value
+    // If not duplicated, it fails in some systems (Raspberry Pi)
+    char *extensionsDup = mystrdup(extensions);
+    
+    // NOTE: String could be splitted using strtok() function (string.h)
+    // NOTE: strtok() modifies the received string, it can not be const
+    
+    char *extList[512];     // Allocate 512 strings pointers (2 KB)
+
+    extList[numExt] = strtok(extensionsDup, " ");
+
+    while (extList[numExt] != NULL)
+    {
+        numExt++;
+        extList[numExt] = strtok(NULL, " ");
+    }
+    
+    free(extensionsDup);    // Duplicated string must be deallocated
+    
+    numExt -= 1;
+#endif
+
+    TraceLog(INFO, "Number of supported extensions: %i", numExt);
+
+    // Show supported extensions
+    //for (int i = 0; i < numExt; i++)  TraceLog(INFO, "Supported extension: %s", extList[i]);
+
+    // Check required extensions
+    for (int i = 0; i < numExt; i++)
+    {
+#if defined(GRAPHICS_API_OPENGL_ES2)
+        // Check VAO support
+        // NOTE: Only check on OpenGL ES, OpenGL 3.3 has VAO support as core feature
+        if (strcmp(extList[i], (const char *)"GL_OES_vertex_array_object") == 0)
+        {
+            vaoSupported = true;
+            
+            // The extension is supported by our hardware and driver, try to get related functions pointers           
+            // NOTE: emscripten does not support VAOs natively, it uses emulation and it reduces overall performance...
+            glGenVertexArrays = (PFNGLGENVERTEXARRAYSOESPROC)eglGetProcAddress("glGenVertexArraysOES");
+            glBindVertexArray = (PFNGLBINDVERTEXARRAYOESPROC)eglGetProcAddress("glBindVertexArrayOES");
+            glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSOESPROC)eglGetProcAddress("glDeleteVertexArraysOES");
+            //glIsVertexArray = (PFNGLISVERTEXARRAYOESPROC)eglGetProcAddress("glIsVertexArrayOES");     // NOTE: Fails in WebGL, omitted
+        }
+        
+        // Check NPOT textures support
+        // NOTE: Only check on OpenGL ES, OpenGL 3.3 has NPOT textures full support as core feature
+        if (strcmp(extList[i], (const char *)"GL_OES_texture_npot") == 0) npotSupported = true;
+#endif   
+        
+        // DDS texture compression support
+        if ((strcmp(extList[i], (const char *)"GL_EXT_texture_compression_s3tc") == 0) ||
+            (strcmp(extList[i], (const char *)"GL_WEBKIT_WEBGL_compressed_texture_s3tc") == 0)) texCompDXTSupported = true; 
+        
+        // ETC1 texture compression support
+        if (strcmp(extList[i], (const char *)"GL_OES_compressed_ETC1_RGB8_texture") == 0) texCompETC1Supported = true;
+
+        // ETC2/EAC texture compression support
+        if (strcmp(extList[i], (const char *)"GL_ARB_ES3_compatibility") == 0) texCompETC2Supported = true;
+
+        // PVR texture compression support
+        if (strcmp(extList[i], (const char *)"GL_IMG_texture_compression_pvrtc") == 0) texCompPVRTSupported = true;
+
+        // ASTC texture compression support
+        if (strcmp(extList[i], (const char *)"GL_KHR_texture_compression_astc_hdr") == 0) texCompASTCSupported = true;
+    }
+    
+#if defined(GRAPHICS_API_OPENGL_ES2)
+    if (vaoSupported) TraceLog(INFO, "[EXTENSION] VAO extension detected, VAO functions initialized successfully");
+    else TraceLog(WARNING, "[EXTENSION] VAO extension not found, VAO usage not supported");
+    
+    if (npotSupported) TraceLog(INFO, "[EXTENSION] NPOT textures extension detected, full NPOT textures supported");
+    else TraceLog(WARNING, "[EXTENSION] NPOT textures extension not found, limited NPOT support (no-mipmaps, no-repeat)");
+#endif
+
+    if (texCompDXTSupported) TraceLog(INFO, "[EXTENSION] DXT compressed textures supported");
+    if (texCompETC1Supported) TraceLog(INFO, "[EXTENSION] ETC1 compressed textures supported");
+    if (texCompETC2Supported) TraceLog(INFO, "[EXTENSION] ETC2/EAC compressed textures supported");
+    if (texCompPVRTSupported) TraceLog(INFO, "[EXTENSION] PVRT compressed textures supported");
+    if (texCompASTCSupported) TraceLog(INFO, "[EXTENSION] ASTC compressed textures supported");
+
+    // Initialize buffers, default shaders and default textures
+    //----------------------------------------------------------
+    
+    // Set default draw mode
+    currentDrawMode = RL_TRIANGLES;
+
+    // Reset projection and modelview matrices
+    projection = MatrixIdentity();
+    modelview = MatrixIdentity();
+    currentMatrix = &modelview;
+
+    // Initialize matrix stack
+    for (int i = 0; i < MATRIX_STACK_SIZE; i++) stack[i] = MatrixIdentity();
+    
+    // Create default white texture for plain colors (required by shader)
+    unsigned char pixels[4] = { 255, 255, 255, 255 };   // 1 pixel RGBA (4 bytes)
+
+    whiteTexture = rlglLoadTexture(pixels, 1, 1, UNCOMPRESSED_R8G8B8A8, 1);
+
+    if (whiteTexture != 0) TraceLog(INFO, "[TEX ID %i] Base white texture loaded successfully", whiteTexture);
+    else TraceLog(WARNING, "Base white texture could not be loaded");
+
+    // Init default Shader (customized for GL 3.3 and ES2)
+    defaultShader = LoadDefaultShader();
+    //customShader = LoadShader("custom.vs", "custom.fs");     // Works ok
+    
+    currentShader = defaultShader;
+
+    InitializeBuffers();        // Init vertex arrays
+    InitializeBuffersGPU();     // Init VBO and VAO
+
+    // Init temp vertex buffer, used when transformation required (translate, rotate, scale)
+    tempBuffer = (Vector3 *)malloc(sizeof(Vector3)*TEMP_VERTEX_BUFFER_SIZE);
+
+    for (int i = 0; i < TEMP_VERTEX_BUFFER_SIZE; i++) tempBuffer[i] = VectorZero();
+
+    // Init draw calls tracking system
+    draws = (DrawCall *)malloc(sizeof(DrawCall)*MAX_DRAWS_BY_TEXTURE);
+
+    for (int i = 0; i < MAX_DRAWS_BY_TEXTURE; i++)
+    {
+        draws[i].textureId = 0;
+        draws[i].vertexCount = 0;
+    }
+
+    drawsCounter = 1;
+    draws[drawsCounter - 1].textureId = whiteTexture;
+#endif
+}
+
+// Vertex Buffer Object deinitialization (memory free)
+void rlglClose(void)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    // Unbind everything
+    if (vaoSupported) glBindVertexArray(0);
+    glDisableVertexAttribArray(0);
+    glDisableVertexAttribArray(1);
+    glDisableVertexAttribArray(2);
+    glDisableVertexAttribArray(3);
+    glBindBuffer(GL_ARRAY_BUFFER, 0);
+    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+
+    glUseProgram(0);
+
+    // Delete VBOs
+    glDeleteBuffers(1, &linesBuffer[0]);
+    glDeleteBuffers(1, &linesBuffer[1]);
+    glDeleteBuffers(1, &trianglesBuffer[0]);
+    glDeleteBuffers(1, &trianglesBuffer[1]);
+    glDeleteBuffers(1, &quadsBuffer[0]);
+    glDeleteBuffers(1, &quadsBuffer[1]);
+    glDeleteBuffers(1, &quadsBuffer[2]);
+    glDeleteBuffers(1, &quadsBuffer[3]);
+
+    if (vaoSupported)
+    {
+        // Delete VAOs
+        glDeleteVertexArrays(1, &vaoLines);
+        glDeleteVertexArrays(1, &vaoTriangles);
+        glDeleteVertexArrays(1, &vaoQuads);
+    }
+
+    //glDetachShader(defaultShaderProgram, vertexShader);
+    //glDetachShader(defaultShaderProgram, fragmentShader);
+    //glDeleteShader(vertexShader);     // Already deleted on shader compilation
+    //glDeleteShader(fragmentShader);   // Already deleted on sahder compilation
+    glDeleteProgram(defaultShader.id);
+
+    // Free vertex arrays memory
+    free(lines.vertices);
+    free(lines.colors);
+
+    free(triangles.vertices);
+    free(triangles.colors);
+
+    free(quads.vertices);
+    free(quads.texcoords);
+    free(quads.colors);
+    free(quads.indices);
+
+    // Free GPU texture
+    glDeleteTextures(1, &whiteTexture);
+    TraceLog(INFO, "[TEX ID %i] Unloaded texture data (base white texture) from VRAM", whiteTexture);
+
+    free(draws);
+#endif
+}
+
+// Drawing batches: triangles, quads, lines
+void rlglDraw(void)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    UpdateBuffers();
+
+    if ((lines.vCounter > 0) || (triangles.vCounter > 0) || (quads.vCounter > 0))
+    {
+        glUseProgram(currentShader.id);
+        
+        Matrix matMVP = MatrixMultiply(modelview, projection);        // Create modelview-projection matrix
+
+        glUniformMatrix4fv(currentShader.mvpLoc, 1, false, MatrixToFloat(matMVP));
+        glUniform1i(currentShader.mapDiffuseLoc, 0);
+        glUniform4f(currentShader.tintColorLoc, 1.0f, 1.0f, 1.0f, 1.0f);
+    }
+
+    // NOTE: We draw in this order: lines, triangles, quads
+    
+    if (lines.vCounter > 0)
+    {
+        glBindTexture(GL_TEXTURE_2D, whiteTexture);
+
+        if (vaoSupported)
+        {
+            glBindVertexArray(vaoLines);
+        }
+        else
+        {
+            glBindBuffer(GL_ARRAY_BUFFER, linesBuffer[0]);
+            glVertexAttribPointer(currentShader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+            glEnableVertexAttribArray(currentShader.vertexLoc);
+
+            if (currentShader.colorLoc != -1)
+            {
+                glBindBuffer(GL_ARRAY_BUFFER, linesBuffer[1]);
+                glVertexAttribPointer(currentShader.colorLoc, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, 0);
+                glEnableVertexAttribArray(currentShader.colorLoc);
+            }
+        }
+
+        glDrawArrays(GL_LINES, 0, lines.vCounter);
+
+        if (!vaoSupported) glBindBuffer(GL_ARRAY_BUFFER, 0);
+        glBindTexture(GL_TEXTURE_2D, 0);
+    }
+
+    if (triangles.vCounter > 0)
+    {
+        glBindTexture(GL_TEXTURE_2D, whiteTexture);
+
+        if (vaoSupported)
+        {
+            glBindVertexArray(vaoTriangles);
+        }
+        else
+        {
+            glBindBuffer(GL_ARRAY_BUFFER, trianglesBuffer[0]);
+            glVertexAttribPointer(currentShader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+            glEnableVertexAttribArray(currentShader.vertexLoc);
+
+            if (currentShader.colorLoc != -1)
+            {
+                glBindBuffer(GL_ARRAY_BUFFER, trianglesBuffer[1]);
+                glVertexAttribPointer(currentShader.colorLoc, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, 0);
+                glEnableVertexAttribArray(currentShader.colorLoc);
+            }
+        }
+
+        glDrawArrays(GL_TRIANGLES, 0, triangles.vCounter);
+
+        if (!vaoSupported) glBindBuffer(GL_ARRAY_BUFFER, 0);
+        glBindTexture(GL_TEXTURE_2D, 0);
+    }
+
+    if (quads.vCounter > 0)
+    {
+        int quadsCount = 0;
+        int numIndicesToProcess = 0;
+        int indicesOffset = 0;
+
+        if (vaoSupported)
+        {
+            glBindVertexArray(vaoQuads);
+        }
+        else
+        {
+            // Enable vertex attributes
+            glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[0]);
+            glVertexAttribPointer(currentShader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+            glEnableVertexAttribArray(currentShader.vertexLoc);
+
+            glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[1]);
+            glVertexAttribPointer(currentShader.texcoordLoc, 2, GL_FLOAT, 0, 0, 0);
+            glEnableVertexAttribArray(currentShader.texcoordLoc);
+
+            if (currentShader.colorLoc != -1)
+            {
+                glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[2]);
+                glVertexAttribPointer(currentShader.colorLoc, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, 0);
+                glEnableVertexAttribArray(currentShader.colorLoc);
+            }
+            
+            glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, quadsBuffer[3]);
+        }
+
+        //TraceLog(DEBUG, "Draws required per frame: %i", drawsCounter);
+
+        for (int i = 0; i < drawsCounter; i++)
+        {
+            quadsCount = draws[i].vertexCount/4;
+            numIndicesToProcess = quadsCount*6;  // Get number of Quads * 6 index by Quad
+
+            //TraceLog(DEBUG, "Quads to render: %i - Vertex Count: %i", quadsCount, draws[i].vertexCount);
+
+            glBindTexture(GL_TEXTURE_2D, draws[i].textureId);
+
+            // NOTE: The final parameter tells the GPU the offset in bytes from the start of the index buffer to the location of the first index to process
+#if defined(GRAPHICS_API_OPENGL_33)
+            glDrawElements(GL_TRIANGLES, numIndicesToProcess, GL_UNSIGNED_INT, (GLvoid*) (sizeof(GLuint) * indicesOffset));
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+            glDrawElements(GL_TRIANGLES, numIndicesToProcess, GL_UNSIGNED_SHORT, (GLvoid*) (sizeof(GLushort) * indicesOffset));
+#endif
+            //GLenum err;
+            //if ((err = glGetError()) != GL_NO_ERROR) TraceLog(INFO, "OpenGL error: %i", (int)err);    //GL_INVALID_ENUM!
+
+            indicesOffset += draws[i].vertexCount/4*6;
+        }
+
+        if (!vaoSupported)
+        {
+            glBindBuffer(GL_ARRAY_BUFFER, 0);
+            glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
+        }
+
+        glBindTexture(GL_TEXTURE_2D, 0);  // Unbind textures
+    }
+
+    if (vaoSupported) glBindVertexArray(0);   // Unbind VAO
+
+    glUseProgram(0);    // Unbind shader program
+
+    // Reset draws counter
+    drawsCounter = 1;
+    draws[0].textureId = whiteTexture;
+    draws[0].vertexCount = 0;
+
+    // Reset vertex counters for next frame
+    lines.vCounter = 0;
+    lines.cCounter = 0;
+
+    triangles.vCounter = 0;
+    triangles.cCounter = 0;
+
+    quads.vCounter = 0;
+    quads.tcCounter = 0;
+    quads.cCounter = 0;
+    
+    // Reset depth for next draw
+    currentDepth = -1.0f;
+#endif
+}
+
+// Draw a 3d model
+// NOTE: Model transform can come within model struct
+void rlglDrawModel(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color color, bool wires)
+{
+#if defined (GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_33)
+    // NOTE: glPolygonMode() not available on OpenGL ES
+    if (wires) glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_11)
+    glEnable(GL_TEXTURE_2D);
+    glBindTexture(GL_TEXTURE_2D, model.material.texDiffuse.id);
+
+    // NOTE: On OpenGL 1.1 we use Vertex Arrays to draw model
+    glEnableClientState(GL_VERTEX_ARRAY);                     // Enable vertex array
+    glEnableClientState(GL_TEXTURE_COORD_ARRAY);              // Enable texture coords array
+    glEnableClientState(GL_NORMAL_ARRAY);                     // Enable normals array
+
+    glVertexPointer(3, GL_FLOAT, 0, model.mesh.vertices);     // Pointer to vertex coords array
+    glTexCoordPointer(2, GL_FLOAT, 0, model.mesh.texcoords);  // Pointer to texture coords array
+    glNormalPointer(GL_FLOAT, 0, model.mesh.normals);         // Pointer to normals array
+    //glColorPointer(4, GL_UNSIGNED_BYTE, 0, model.mesh.colors);   // Pointer to colors array (NOT USED)
+
+    rlPushMatrix();
+        rlTranslatef(position.x, position.y, position.z);
+        rlScalef(scale.x, scale.y, scale.z);
+        rlRotatef(rotationAngle, rotationAxis.x, rotationAxis.y, rotationAxis.z);
+
+        rlColor4ub(color.r, color.g, color.b, color.a);
+
+        glDrawArrays(GL_TRIANGLES, 0, model.mesh.vertexCount);
+    rlPopMatrix();
+
+    glDisableClientState(GL_VERTEX_ARRAY);                     // Disable vertex array
+    glDisableClientState(GL_TEXTURE_COORD_ARRAY);              // Disable texture coords array
+    glDisableClientState(GL_NORMAL_ARRAY);                     // Disable normals array
+
+    glDisable(GL_TEXTURE_2D);
+    glBindTexture(GL_TEXTURE_2D, 0);
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glUseProgram(model.material.shader.id);
+    
+    // At this point the modelview matrix just contains the view matrix (camera)
+    // That's because Begin3dMode() sets it an no model-drawing function modifies it, all use rlPushMatrix() and rlPopMatrix()
+    Matrix matView = modelview;         // View matrix (camera)
+    Matrix matProjection = projection;  // Projection matrix (perspective)
+    
+    // Calculate transformation matrix from function parameters
+    // Get transform matrix (rotation -> scale -> translation)
+    Matrix matRotation = MatrixRotate(rotationAxis, rotationAngle*DEG2RAD);
+    Matrix matScale = MatrixScale(scale.x, scale.y, scale.z);
+    Matrix matTranslation = MatrixTranslate(position.x, position.y, position.z);
+    Matrix matTransform = MatrixMultiply(MatrixMultiply(matScale, matRotation), matTranslation);
+    
+    // Combine model internal transformation matrix (model.transform) with matrix generated by function parameters (matTransform)
+    Matrix matModel = MatrixMultiply(model.transform, matTransform);    // Transform to world-space coordinates
+    
+    // Calculate model-view matrix combining matModel and matView
+    Matrix matModelView = MatrixMultiply(matModel, matView);            // Transform to camera-space coordinates
+
+    // Calculate model-view-projection matrix (MVP)
+    Matrix matMVP = MatrixMultiply(matModelView, matProjection);        // Transform to screen-space coordinates
+
+    // Send combined model-view-projection matrix to shader
+    glUniformMatrix4fv(model.material.shader.mvpLoc, 1, false, MatrixToFloat(matMVP));
+
+    // Apply color tinting to model
+    // NOTE: Just update one uniform on fragment shader
+    float vColor[4] = { (float)color.r/255, (float)color.g/255, (float)color.b/255, (float)color.a/255 };
+    glUniform4fv(model.material.shader.tintColorLoc, 1, vColor);
+
+    // Set shader textures (diffuse, normal, specular)
+    glActiveTexture(GL_TEXTURE0);
+    glBindTexture(GL_TEXTURE_2D, model.material.texDiffuse.id);
+    glUniform1i(model.material.shader.mapDiffuseLoc, 0);        // Texture fits in active texture unit 0
+    
+    if (model.material.texNormal.id != 0)
+    {
+        glActiveTexture(GL_TEXTURE1);
+        glBindTexture(GL_TEXTURE_2D, model.material.texNormal.id);
+        glUniform1i(model.material.shader.mapNormalLoc, 1);     // Texture fits in active texture unit 1
+    }
+    
+    if (model.material.texSpecular.id != 0)
+    {
+        glActiveTexture(GL_TEXTURE2);
+        glBindTexture(GL_TEXTURE_2D, model.material.texSpecular.id);
+        glUniform1i(model.material.shader.mapSpecularLoc, 2);   // Texture fits in active texture unit 2
+    }
+
+    if (vaoSupported)
+    {
+        glBindVertexArray(model.mesh.vaoId);
+    }
+    else
+    {
+        // Bind model VBO data: vertex position
+        glBindBuffer(GL_ARRAY_BUFFER, model.mesh.vboId[0]);
+        glVertexAttribPointer(model.material.shader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+        glEnableVertexAttribArray(model.material.shader.vertexLoc);
+
+        // Bind model VBO data: vertex texcoords
+        glBindBuffer(GL_ARRAY_BUFFER, model.mesh.vboId[1]);
+        glVertexAttribPointer(model.material.shader.texcoordLoc, 2, GL_FLOAT, 0, 0, 0);
+        glEnableVertexAttribArray(model.material.shader.texcoordLoc);
+
+        // Bind model VBO data: vertex normals (if available)
+        if (model.material.shader.normalLoc != -1)
+        {
+            glBindBuffer(GL_ARRAY_BUFFER, model.mesh.vboId[2]);
+            glVertexAttribPointer(model.material.shader.normalLoc, 3, GL_FLOAT, 0, 0, 0);
+            glEnableVertexAttribArray(model.material.shader.normalLoc);
+        }
+        
+        // TODO: Bind model VBO data: colors, tangents, texcoords2 (if available)
+    }
+
+    // Draw call!
+    glDrawArrays(GL_TRIANGLES, 0, model.mesh.vertexCount);
+    
+    //glDisableVertexAttribArray(model.shader.vertexLoc);
+    //glDisableVertexAttribArray(model.shader.texcoordLoc);
+    //if (model.shader.normalLoc != -1) glDisableVertexAttribArray(model.shader.normalLoc);
+    
+    if (model.material.texNormal.id != 0)
+    {
+        glActiveTexture(GL_TEXTURE1);
+        glBindTexture(GL_TEXTURE_2D, 0);
+    }
+    
+    if (model.material.texSpecular.id != 0)
+    {
+        glActiveTexture(GL_TEXTURE2);
+        glBindTexture(GL_TEXTURE_2D, 0);
+    }
+
+    glActiveTexture(GL_TEXTURE0);               // Set shader active texture to default 0
+    glBindTexture(GL_TEXTURE_2D, 0);            // Unbind textures
+
+    if (vaoSupported) glBindVertexArray(0);     // Unbind VAO
+    else glBindBuffer(GL_ARRAY_BUFFER, 0);      // Unbind VBOs
+
+    glUseProgram(0);        // Unbind shader program
+#endif
+
+#if defined (GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_33)
+    // NOTE: glPolygonMode() not available on OpenGL ES
+    if (wires) glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+#endif
+}
+
+// Initialize Graphics Device (OpenGL stuff)
+// NOTE: Stores global variables screenWidth and screenHeight
+void rlglInitGraphics(int offsetX, int offsetY, int width, int height)
+{   
+    // NOTE: Required! viewport must be recalculated if screen resized!
+    glViewport(offsetX/2, offsetY/2, width - offsetX, height - offsetY);    // Set viewport width and height
+
+    // NOTE: Don't confuse glViewport with the transformation matrix
+    // NOTE: glViewport just defines the area of the context that you will actually draw to.
+
+    glClearColor(0.0f, 0.0f, 0.0f, 1.0f);                   // Set clear color (black)
+    //glClearDepth(1.0f);                                   // Clear depth buffer (default)
+    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);     // Clear used buffers, depth buffer is used for 3D
+
+    glDisable(GL_DEPTH_TEST);                               // Disable depth testing for 2D (only used for 3D)
+    glDepthFunc(GL_LEQUAL);                                 // Type of depth testing to apply
+
+    glEnable(GL_BLEND);                                     // Enable color blending (required to work with transparencies)
+    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);      // Color blending function (how colors are mixed)
+
+#if defined(GRAPHICS_API_OPENGL_11)
+    glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);      // Improve quality of color and texture coordinate interpolation (Deprecated in OGL 3.0)
+                                                            // Other options: GL_FASTEST, GL_DONT_CARE (default)
+#endif
+
+    rlMatrixMode(RL_PROJECTION);                // Switch to PROJECTION matrix
+    rlLoadIdentity();                           // Reset current matrix (PROJECTION)
+
+    rlOrtho(0, width - offsetX, height - offsetY, 0, 0.0f, 1.0f); // Config orthographic mode: top-left corner --> (0,0)
+
+    rlMatrixMode(RL_MODELVIEW);                 // Switch back to MODELVIEW matrix
+    rlLoadIdentity();                           // Reset current matrix (MODELVIEW)
+
+    // NOTE: All shapes/models triangles are drawn CCW
+
+    glEnable(GL_CULL_FACE);       // Enable backface culling (Disabled by default)
+    //glCullFace(GL_BACK);        // Cull the Back face (default)
+    //glFrontFace(GL_CCW);        // Front face are defined counter clockwise (default)
+
+#if defined(GRAPHICS_API_OPENGL_11)
+    glShadeModel(GL_SMOOTH);      // Smooth shading between vertex (vertex colors interpolation) (Deprecated on OpenGL 3.3+)
+                                  // Possible options: GL_SMOOTH (Color interpolation) or GL_FLAT (no interpolation)
+#endif
+
+    TraceLog(INFO, "OpenGL graphic device initialized successfully");
+}
+
+// Get world coordinates from screen coordinates
+Vector3 rlglUnproject(Vector3 source, Matrix proj, Matrix view)
+{
+    Vector3 result = { 0.0f, 0.0f, 0.0f };
+    
+    // Calculate unproject matrix (multiply projection matrix and view matrix) and invert it
+    Matrix matProjView = MatrixMultiply(proj, view);
+    MatrixInvert(&matProjView);
+    
+    // Create quaternion from source point
+    Quaternion quat = { source.x, source.y, source.z, 1.0f };
+    
+    // Multiply quat point by unproject matrix
+    QuaternionTransform(&quat, matProjView);
+    
+    // Normalized world points in vectors
+    result.x = quat.x/quat.w;
+    result.y = quat.y/quat.w;
+    result.z = quat.z/quat.w;
+
+    return result;
+}
+
+// Convert image data to OpenGL texture (returns OpenGL valid Id)
+unsigned int rlglLoadTexture(void *data, int width, int height, int textureFormat, int mipmapCount)
+{
+    glBindTexture(GL_TEXTURE_2D, 0);    // Free any old binding
+
+    GLuint id = 0;
+    
+    // Check texture format support by OpenGL 1.1 (compressed textures not supported)
+    if ((rlGetVersion() == OPENGL_11) && (textureFormat >= 8))
+    {
+        TraceLog(WARNING, "OpenGL 1.1 does not support GPU compressed texture formats");
+        return id;
+    }
+    
+    if ((!texCompDXTSupported) && ((textureFormat == COMPRESSED_DXT1_RGB) || (textureFormat == COMPRESSED_DXT1_RGBA) ||
+        (textureFormat == COMPRESSED_DXT3_RGBA) || (textureFormat == COMPRESSED_DXT5_RGBA)))
+    {
+        TraceLog(WARNING, "DXT compressed texture format not supported");
+        return id;
+    }
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)    
+    if ((!texCompETC1Supported) && (textureFormat == COMPRESSED_ETC1_RGB))
+    {
+        TraceLog(WARNING, "ETC1 compressed texture format not supported");
+        return id;
+    }
+    
+    if ((!texCompETC2Supported) && ((textureFormat == COMPRESSED_ETC2_RGB) || (textureFormat == COMPRESSED_ETC2_EAC_RGBA)))
+    {
+        TraceLog(WARNING, "ETC2 compressed texture format not supported");
+        return id;
+    }
+    
+    if ((!texCompPVRTSupported) && ((textureFormat == COMPRESSED_PVRT_RGB) || (textureFormat == COMPRESSED_PVRT_RGBA)))
+    {
+        TraceLog(WARNING, "PVRT compressed texture format not supported");
+        return id;
+    }
+    
+    if ((!texCompASTCSupported) && ((textureFormat == COMPRESSED_ASTC_4x4_RGBA) || (textureFormat == COMPRESSED_ASTC_8x8_RGBA)))
+    {
+        TraceLog(WARNING, "ASTC compressed texture format not supported");
+        return id;
+    }
+#endif
+
+    glGenTextures(1, &id);              // Generate Pointer to the texture
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    //glActiveTexture(GL_TEXTURE0);     // If not defined, using GL_TEXTURE0 by default (shader texture)
+#endif
+
+    glBindTexture(GL_TEXTURE_2D, id);
+
+#if defined(GRAPHICS_API_OPENGL_33)
+    // NOTE: We define internal (GPU) format as GL_RGBA8 (probably BGRA8 in practice, driver takes care)
+    // NOTE: On embedded systems, we let the driver choose the best internal format
+
+    // Support for multiple color modes (16bit color modes and grayscale)
+    // (sized)internalFormat    format          type
+    // GL_R                     GL_RED      GL_UNSIGNED_BYTE
+    // GL_RGB565                GL_RGB      GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT_5_6_5
+    // GL_RGB5_A1               GL_RGBA     GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT_5_5_5_1
+    // GL_RGBA4                 GL_RGBA     GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT_4_4_4_4
+    // GL_RGBA8                 GL_RGBA     GL_UNSIGNED_BYTE
+    // GL_RGB8                  GL_RGB      GL_UNSIGNED_BYTE
+    
+    switch (textureFormat)
+    {
+        case UNCOMPRESSED_GRAYSCALE:
+        {
+            glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, (unsigned char *)data);
+            
+            // With swizzleMask we define how a one channel texture will be mapped to RGBA
+            // Required GL >= 3.3 or EXT_texture_swizzle/ARB_texture_swizzle
+            GLint swizzleMask[] = { GL_RED, GL_RED, GL_RED, GL_ONE };
+            glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzleMask);
+            
+            TraceLog(INFO, "[TEX ID %i] Grayscale texture loaded and swizzled", id);
+        } break;
+        case UNCOMPRESSED_GRAY_ALPHA:
+        {
+            glTexImage2D(GL_TEXTURE_2D, 0, GL_RG8, width, height, 0, GL_RG, GL_UNSIGNED_BYTE, (unsigned char *)data);
+            
+            GLint swizzleMask[] = { GL_RED, GL_RED, GL_RED, GL_GREEN };
+            glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzleMask);
+        } break;
+
+        case UNCOMPRESSED_R5G6B5: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB565, width, height, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G5B5A1: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB5_A1, width, height, 0, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1, (unsigned short *)data); break;
+        case UNCOMPRESSED_R4G4B4A4: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA4, width, height, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8A8: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case COMPRESSED_DXT1_RGB: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGB_S3TC_DXT1_EXT); break;
+        case COMPRESSED_DXT1_RGBA: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT); break;
+        case COMPRESSED_DXT3_RGBA: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT); break;
+        case COMPRESSED_DXT5_RGBA: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT); break;
+        case COMPRESSED_ETC1_RGB: if (texCompETC1Supported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_ETC1_RGB8_OES); break;           // NOTE: Requires OpenGL ES 2.0 or OpenGL 4.3
+        case COMPRESSED_ETC2_RGB: if (texCompETC2Supported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGB8_ETC2); break;    // NOTE: Requires OpenGL ES 3.0 or OpenGL 4.3
+        case COMPRESSED_ETC2_EAC_RGBA: if (texCompETC2Supported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA8_ETC2_EAC); break;    // NOTE: Requires OpenGL ES 3.0 or OpenGL 4.3
+        case COMPRESSED_PVRT_RGB: if (texCompPVRTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG); break;        // NOTE: Requires PowerVR GPU
+        case COMPRESSED_PVRT_RGBA: if (texCompPVRTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG); break;     // NOTE: Requires PowerVR GPU
+        case COMPRESSED_ASTC_4x4_RGBA: if (texCompASTCSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_ASTC_4x4_KHR); break; // NOTE: Requires OpenGL ES 3.1 or OpenGL 4.3
+        case COMPRESSED_ASTC_8x8_RGBA: if (texCompASTCSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_ASTC_8x8_KHR); break; // NOTE: Requires OpenGL ES 3.1 or OpenGL 4.3
+        default: TraceLog(WARNING, "Texture format not recognized"); break;
+    }
+#elif defined(GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_ES2)
+    // NOTE: on OpenGL ES 2.0 (WebGL), internalFormat must match format and options allowed are: GL_LUMINANCE, GL_RGB, GL_RGBA
+    switch (textureFormat)
+    {
+        case UNCOMPRESSED_GRAYSCALE: glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_GRAY_ALPHA: glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE_ALPHA, width, height, 0, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G6B5: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G5B5A1: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1, (unsigned short *)data); break;
+        case UNCOMPRESSED_R4G4B4A4: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8A8: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+#if defined(GRAPHICS_API_OPENGL_ES2)
+        case COMPRESSED_DXT1_RGB: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGB_S3TC_DXT1_EXT); break;
+        case COMPRESSED_DXT1_RGBA: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT); break;
+        case COMPRESSED_DXT3_RGBA: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT); break;     // NOTE: Not supported by WebGL
+        case COMPRESSED_DXT5_RGBA: if (texCompDXTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT); break;     // NOTE: Not supported by WebGL
+        case COMPRESSED_ETC1_RGB: if (texCompETC1Supported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_ETC1_RGB8_OES); break;           // NOTE: Requires OpenGL ES 2.0 or OpenGL 4.3
+        case COMPRESSED_ETC2_RGB: if (texCompETC2Supported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGB8_ETC2); break;    // NOTE: Requires OpenGL ES 3.0 or OpenGL 4.3
+        case COMPRESSED_ETC2_EAC_RGBA: if (texCompETC2Supported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA8_ETC2_EAC); break;    // NOTE: Requires OpenGL ES 3.0 or OpenGL 4.3
+        case COMPRESSED_PVRT_RGB: if (texCompPVRTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG); break;        // NOTE: Requires PowerVR GPU
+        case COMPRESSED_PVRT_RGBA: if (texCompPVRTSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG); break;     // NOTE: Requires PowerVR GPU
+        case COMPRESSED_ASTC_4x4_RGBA: if (texCompASTCSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_ASTC_4x4_KHR); break; // NOTE: Requires OpenGL ES 3.1 or OpenGL 4.3
+        case COMPRESSED_ASTC_8x8_RGBA: if (texCompASTCSupported) LoadCompressedTexture((unsigned char *)data, width, height, mipmapCount, GL_COMPRESSED_RGBA_ASTC_8x8_KHR); break; // NOTE: Requires OpenGL ES 3.1 or OpenGL 4.3
+#endif
+        default: TraceLog(WARNING, "Texture format not supported"); break;
+    }
+#endif
+
+    // Texture parameters configuration
+    // NOTE: glTexParameteri does NOT affect texture uploading, just the way it's used
+#if defined(GRAPHICS_API_OPENGL_ES2)
+    // NOTE: OpenGL ES 2.0 with no GL_OES_texture_npot support (i.e. WebGL) has limited NPOT support, so CLAMP_TO_EDGE must be used
+    if (npotSupported)
+    {
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);       // Set texture to repeat on x-axis
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);       // Set texture to repeat on y-axis
+    }
+    else
+    {
+        // NOTE: If using negative texture coordinates (LoadOBJ()), it does not work!
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);       // Set texture to clamp on x-axis
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);       // Set texture to clamp on y-axis
+    }
+#else
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);       // Set texture to repeat on x-axis
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);       // Set texture to repeat on y-axis
+#endif
+
+    // Magnification and minification filters
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);  // Alternative: GL_LINEAR
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);  // Alternative: GL_LINEAR
+   
+#if defined(GRAPHICS_API_OPENGL_33)
+    if (mipmapCount > 1)
+    {
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);   // Activate Trilinear filtering for mipmaps (must be available)
+    }
+#endif
+
+    // At this point we have the texture loaded in GPU and texture parameters configured
+    
+    // NOTE: If mipmaps were not in data, they are not generated automatically
+
+    // Unbind current texture
+    glBindTexture(GL_TEXTURE_2D, 0);
+
+    if (id > 0) TraceLog(INFO, "[TEX ID %i] Texture created successfully (%ix%i)", id, width, height);
+    else TraceLog(WARNING, "Texture could not be created");
+
+    return id;
+}
+
+// Load a texture to be used for rendering (fbo with color and depth attachments)
+RenderTexture2D rlglLoadRenderTexture(int width, int height)
+{
+    RenderTexture2D target;
+    
+    target.id = 0;
+    
+    target.texture.id = 0;
+    target.texture.width = width;
+    target.texture.height = height;
+    target.texture.format = UNCOMPRESSED_R8G8B8;
+    target.texture.mipmaps = 1;
+    
+    target.depth.id = 0;
+    target.depth.width = width;
+    target.depth.height = height;
+    target.depth.format = 19;       //DEPTH_COMPONENT_24BIT
+    target.depth.mipmaps = 1;
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    // Create the texture that will serve as the color attachment for the framebuffer
+    glGenTextures(1, &target.texture.id);
+    glBindTexture(GL_TEXTURE_2D, target.texture.id);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
+    glBindTexture(GL_TEXTURE_2D, 0);
+    
+#if defined(GRAPHICS_API_OPENGL_33)
+    #define USE_DEPTH_TEXTURE
+#else
+    #define USE_DEPTH_RENDERBUFFER
+#endif
+    
+#if defined(USE_DEPTH_RENDERBUFFER)
+    // Create the renderbuffer that will serve as the depth attachment for the framebuffer.
+    glGenRenderbuffers(1, &target.depth.id);
+    glBindRenderbuffer(GL_RENDERBUFFER, target.depth.id);
+    glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, width, height);    // GL_DEPTH_COMPONENT24 not supported on Android
+#elif defined(USE_DEPTH_TEXTURE)
+    // NOTE: We can also use a texture for depth buffer (GL_ARB_depth_texture/GL_OES_depth_texture extension required)
+    // A renderbuffer is simpler than a texture and could offer better performance on embedded devices
+    glGenTextures(1, &target.depth.id);
+    glBindTexture(GL_TEXTURE_2D, target.depth.id);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, width, height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
+    glBindTexture(GL_TEXTURE_2D, 0);
+#endif
+
+    // Create the framebuffer object
+    glGenFramebuffers(1, &target.id);
+    glBindFramebuffer(GL_FRAMEBUFFER, target.id);
+
+    // Attach color texture and depth renderbuffer to FBO
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, target.texture.id, 0);
+#if defined(USE_DEPTH_RENDERBUFFER)
+    glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, target.depth.id);
+#elif defined(USE_DEPTH_TEXTURE)
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, target.depth.id, 0);
+#endif
+
+    GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+
+    if (status != GL_FRAMEBUFFER_COMPLETE)
+    {
+        TraceLog(WARNING, "Framebuffer object could not be created...");
+        
+        switch(status)
+        {
+            case GL_FRAMEBUFFER_UNSUPPORTED: TraceLog(WARNING, "Framebuffer is unsupported"); break;
+            case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT: TraceLog(WARNING, "Framebuffer incomplete attachment"); break;
+#if defined(GRAPHICS_API_OPENGL_ES2)
+            case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS: TraceLog(WARNING, "Framebuffer incomplete dimensions"); break;
+#endif
+            case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT: TraceLog(WARNING, "Framebuffer incomplete missing attachment"); break;
+            default: break;
+        }
+        
+        glDeleteTextures(1, &target.texture.id);
+        glDeleteTextures(1, &target.depth.id);
+        glDeleteFramebuffers(1, &target.id);
+    }
+    else TraceLog(INFO, "[FBO ID %i] Framebuffer object created successfully", target.id);
+    
+    glBindFramebuffer(GL_FRAMEBUFFER, 0);
+#endif
+
+    return target; 
+}
+
+// Update already loaded texture in GPU with new data
+void rlglUpdateTexture(unsigned int id, int width, int height, int format, void *data)
+{
+    glBindTexture(GL_TEXTURE_2D, id);
+
+#if defined(GRAPHICS_API_OPENGL_33)
+    switch (format)
+    {
+        case UNCOMPRESSED_GRAYSCALE: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RED, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_GRAY_ALPHA: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RG, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G6B5: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G5B5A1: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1, (unsigned short *)data); break;
+        case UNCOMPRESSED_R4G4B4A4: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8A8: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        default: TraceLog(WARNING, "Texture format updating not supported"); break;
+    }
+#elif defined(GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_ES2)
+    // NOTE: on OpenGL ES 2.0 (WebGL), internalFormat must match format and options allowed are: GL_LUMINANCE, GL_RGB, GL_RGBA
+    switch (format)
+    {
+        case UNCOMPRESSED_GRAYSCALE: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_GRAY_ALPHA: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G6B5: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        case UNCOMPRESSED_R5G5B5A1: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1, (unsigned short *)data); break;
+        case UNCOMPRESSED_R4G4B4A4: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4, (unsigned short *)data); break;
+        case UNCOMPRESSED_R8G8B8A8: glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, (unsigned char *)data); break;
+        default: TraceLog(WARNING, "Texture format updating not supported"); break;
+    }
+#endif
+}
+
+// Generate mipmap data for selected texture
+void rlglGenerateMipmaps(Texture2D texture)
+{
+    glBindTexture(GL_TEXTURE_2D, texture.id);
+    
+    // Check if texture is power-of-two (POT)
+    bool texIsPOT = false;
+   
+    if (((texture.width > 0) && ((texture.width & (texture.width - 1)) == 0)) && 
+        ((texture.height > 0) && ((texture.height & (texture.height - 1)) == 0))) texIsPOT = true;
+
+    if ((texIsPOT) || (npotSupported))
+    {
+#if defined(GRAPHICS_API_OPENGL_11)
+        // Compute required mipmaps
+        void *data = rlglReadTexturePixels(texture);
+        
+        // NOTE: data size is reallocated to fit mipmaps data
+        // NOTE: CPU mipmap generation only supports RGBA 32bit data
+        int mipmapCount = GenerateMipmaps(data, texture.width, texture.height);
+
+        int size = texture.width*texture.height*4;  // RGBA 32bit only
+        int offset = size;
+
+        int mipWidth = texture.width/2;
+        int mipHeight = texture.height/2;
+
+        // Load the mipmaps
+        for (int level = 1; level < mipmapCount; level++)
+        {
+            glTexImage2D(GL_TEXTURE_2D, level, GL_RGBA8, mipWidth, mipHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data + offset);
+
+            size = mipWidth*mipHeight*4;
+            offset += size;
+
+            mipWidth /= 2;
+            mipHeight /= 2;
+        }
+        
+        TraceLog(WARNING, "[TEX ID %i] Mipmaps generated manually on CPU side", texture.id);
+        
+        // NOTE: Once mipmaps have been generated and data has been uploaded to GPU VRAM, we can discard RAM data
+        free(data);
+        
+#elif defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+        glGenerateMipmap(GL_TEXTURE_2D);    // Generate mipmaps automatically
+        TraceLog(INFO, "[TEX ID %i] Mipmaps generated automatically", texture.id);
+
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);   // Activate Trilinear filtering for mipmaps (must be available)
+#endif
+    }
+    else TraceLog(WARNING, "[TEX ID %i] Mipmaps can not be generated", texture.id);
+
+    glBindTexture(GL_TEXTURE_2D, 0);
+}
+
+// Load vertex data into a VAO (if supported) and VBO
+Model rlglLoadModel(Mesh mesh)
+{
+    Model model;
+
+    model.mesh = mesh;
+    model.mesh.vaoId = 0;       // Vertex Array Object
+    model.mesh.vboId[0] = 0;    // Vertex positions VBO
+    model.mesh.vboId[1] = 0;    // Vertex texcoords VBO
+    model.mesh.vboId[2] = 0;    // Vertex normals VBO
+    
+    // TODO: Consider attributes: color, texcoords2, tangents (if available)
+    
+    model.transform = MatrixIdentity();
+
+#if defined(GRAPHICS_API_OPENGL_11)
+    model.material.texDiffuse.id = 0;    // No texture required
+    model.material.shader.id = 0;        // No shader used
+
+#elif defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    model.material.shader = defaultShader;          // Default model shader
+    
+    model.material.texDiffuse.id = whiteTexture;    // Default whiteTexture
+    model.material.texDiffuse.width = 1;            // Default whiteTexture width
+    model.material.texDiffuse.height = 1;           // Default whiteTexture height
+    model.material.texDiffuse.format = UNCOMPRESSED_R8G8B8A8; // Default whiteTexture format
+    
+    model.material.texNormal.id = 0;        // By default, no normal texture
+    model.material.texSpecular.id = 0;      // By default, no specular texture
+    
+    // TODO: Fill default material properties (color, glossiness...)
+    
+    GLuint vaoModel = 0;         // Vertex Array Objects (VAO)
+    GLuint vertexBuffer[3];      // Vertex Buffer Objects (VBO)
+
+    if (vaoSupported)
+    {
+        // Initialize Quads VAO (Buffer A)
+        glGenVertexArrays(1, &vaoModel);
+        glBindVertexArray(vaoModel);
+    }
+
+    // Create buffers for our vertex data (positions, texcoords, normals)
+    glGenBuffers(3, vertexBuffer);
+    
+    // NOTE: Default shader is assigned to model, so vbo buffers are properly linked to vertex attribs
+    // If model shader is changed, vbo buffers must be re-assigned to new location points (previously loaded)
+
+    // Enable vertex attributes: position
+    glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer[0]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*mesh.vertexCount, mesh.vertices, GL_STATIC_DRAW);
+    glVertexAttribPointer(model.material.shader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+    glEnableVertexAttribArray(model.material.shader.vertexLoc);
+
+    // Enable vertex attributes: texcoords
+    glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer[1]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*2*mesh.vertexCount, mesh.texcoords, GL_STATIC_DRAW);
+    glVertexAttribPointer(model.material.shader.texcoordLoc, 2, GL_FLOAT, 0, 0, 0);
+    glEnableVertexAttribArray(model.material.shader.texcoordLoc);
+
+    // Enable vertex attributes: normals
+    glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer[2]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*mesh.vertexCount, mesh.normals, GL_STATIC_DRAW);
+    glVertexAttribPointer(model.material.shader.normalLoc, 3, GL_FLOAT, 0, 0, 0);
+    glEnableVertexAttribArray(model.material.shader.normalLoc);
+    
+    glVertexAttrib4f(model.material.shader.colorLoc, 1.0f, 1.0f, 1.0f, 1.0f);    // Color vertex attribute set to default: WHITE
+    glDisableVertexAttribArray(model.material.shader.colorLoc);
+    
+    model.mesh.vboId[0] = vertexBuffer[0];     // Vertex position VBO
+    model.mesh.vboId[1] = vertexBuffer[1];     // Texcoords VBO
+    model.mesh.vboId[2] = vertexBuffer[2];     // Normals VBO
+
+    if (vaoSupported)
+    {
+        if (vaoModel > 0)
+        {
+            model.mesh.vaoId = vaoModel;
+            TraceLog(INFO, "[VAO ID %i] Model uploaded successfully to VRAM (GPU)", vaoModel);
+        }
+        else TraceLog(WARNING, "Model could not be uploaded to VRAM (GPU)");
+    }
+    else
+    {
+        TraceLog(INFO, "[VBO ID %i][VBO ID %i][VBO ID %i] Model uploaded successfully to VRAM (GPU)", model.mesh.vboId[0], model.mesh.vboId[1], model.mesh.vboId[2]);
+    }
+#endif
+
+    return model;
+}
+
+// Read screen pixel data (color buffer)
+unsigned char *rlglReadScreenPixels(int width, int height)
+{
+    unsigned char *screenData = (unsigned char *)malloc(width*height*sizeof(unsigned char)*4);
+
+    // NOTE: glReadPixels returns image flipped vertically -> (0,0) is the bottom left corner of the framebuffer
+    glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, screenData);
+
+    // Flip image vertically!
+    unsigned char *imgData = (unsigned char *)malloc(width*height*sizeof(unsigned char)*4);
+
+    for (int y = height - 1; y >= 0; y--)
+    {
+        for (int x = 0; x < (width*4); x++)
+        {
+            // Flip line
+            imgData[((height - 1) - y)*width*4 + x] = screenData[(y*width*4) + x];
+            
+            // Set alpha component value to 255 (no trasparent image retrieval)
+            // NOTE: Alpha value has already been applied to RGB in framebuffer, we don't need it!
+            if (((x + 1)%4) == 0) imgData[((height - 1) - y)*width*4 + x] = 255;
+        }
+    }
+
+    free(screenData);
+
+    return imgData;     // NOTE: image data should be freed
+}
+
+// Read texture pixel data
+// NOTE: glGetTexImage() is not available on OpenGL ES 2.0
+// Texture2D width and height are required on OpenGL ES 2.0. There is no way to get it from texture id.
+void *rlglReadTexturePixels(Texture2D texture)
+{
+    void *pixels = NULL;
+    
+#if defined(GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_33)
+    glBindTexture(GL_TEXTURE_2D, texture.id);
+    
+    // NOTE: Using texture.id, we can retrieve some texture info (but not on OpenGL ES 2.0)
+    /*
+    int width, height, format;
+    glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
+    glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
+    glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &format);
+    // Other texture info: GL_TEXTURE_RED_SIZE, GL_TEXTURE_GREEN_SIZE, GL_TEXTURE_BLUE_SIZE, GL_TEXTURE_ALPHA_SIZE
+    */
+    
+    int glFormat = 0, glType = 0;
+
+    unsigned int size = texture.width*texture.height;
+    
+    // NOTE: GL_LUMINANCE and GL_LUMINANCE_ALPHA are removed since OpenGL 3.1
+    // Must be replaced by GL_RED and GL_RG on Core OpenGL 3.3
+
+    switch (texture.format)
+    {
+#if defined(GRAPHICS_API_OPENGL_11)
+        case UNCOMPRESSED_GRAYSCALE: pixels = (unsigned char *)malloc(size); glFormat = GL_LUMINANCE; glType = GL_UNSIGNED_BYTE; break;            // 8 bit per pixel (no alpha)
+        case UNCOMPRESSED_GRAY_ALPHA: pixels = (unsigned char *)malloc(size*2); glFormat = GL_LUMINANCE_ALPHA; glType = GL_UNSIGNED_BYTE; break;   // 16 bpp (2 channels)
+#elif defined(GRAPHICS_API_OPENGL_33) 
+        case UNCOMPRESSED_GRAYSCALE: pixels = (unsigned char *)malloc(size); glFormat = GL_RED; glType = GL_UNSIGNED_BYTE; break;       
+        case UNCOMPRESSED_GRAY_ALPHA: pixels = (unsigned char *)malloc(size*2); glFormat = GL_RG; glType = GL_UNSIGNED_BYTE; break;
+#endif
+        case UNCOMPRESSED_R5G6B5: pixels = (unsigned short *)malloc(size); glFormat = GL_RGB; glType = GL_UNSIGNED_SHORT_5_6_5; break;             // 16 bpp
+        case UNCOMPRESSED_R8G8B8: pixels = (unsigned char *)malloc(size*3); glFormat = GL_RGB; glType = GL_UNSIGNED_BYTE; break;                   // 24 bpp
+        case UNCOMPRESSED_R5G5B5A1: pixels = (unsigned short *)malloc(size); glFormat = GL_RGBA; glType = GL_UNSIGNED_SHORT_5_5_5_1; break;        // 16 bpp (1 bit alpha)
+        case UNCOMPRESSED_R4G4B4A4: pixels = (unsigned short *)malloc(size); glFormat = GL_RGBA; glType = GL_UNSIGNED_SHORT_4_4_4_4; break;        // 16 bpp (4 bit alpha)
+        case UNCOMPRESSED_R8G8B8A8: pixels = (unsigned char *)malloc(size*4); glFormat = GL_RGBA; glType = GL_UNSIGNED_BYTE; break;                // 32 bpp
+        default: TraceLog(WARNING, "Texture data retrieval, format not suported"); break;
+    }
+    
+    // NOTE: Each row written to or read from by OpenGL pixel operations like glGetTexImage are aligned to a 4 byte boundary by default, which may add some padding.
+    // Use glPixelStorei to modify padding with the GL_[UN]PACK_ALIGNMENT setting. 
+    // GL_PACK_ALIGNMENT affects operations that read from OpenGL memory (glReadPixels, glGetTexImage, etc.) 
+    // GL_UNPACK_ALIGNMENT affects operations that write to OpenGL memory (glTexImage, etc.)
+    glPixelStorei(GL_PACK_ALIGNMENT, 1);
+
+    glGetTexImage(GL_TEXTURE_2D, 0, glFormat, glType, pixels);
+    
+    glBindTexture(GL_TEXTURE_2D, 0);
+#endif
+
+#if defined(GRAPHICS_API_OPENGL_ES2)
+
+    RenderTexture2D fbo = rlglLoadRenderTexture(texture.width, texture.height);
+
+    // NOTE: Two possible Options:
+    // 1 - Bind texture to color fbo attachment and glReadPixels()
+    // 2 - Create an fbo, activate it, render quad with texture, glReadPixels()
+    
+#define GET_TEXTURE_FBO_OPTION_1    // It works
+
+#if defined(GET_TEXTURE_FBO_OPTION_1)
+    glBindFramebuffer(GL_FRAMEBUFFER, fbo.id);
+    glBindTexture(GL_TEXTURE_2D, 0);
+
+    // Attach our texture to FBO -> Texture must be RGB
+    // NOTE: Previoust attached texture is automatically detached
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture.id, 0);
+    
+    pixels = (unsigned char *)malloc(texture.width*texture.height*4*sizeof(unsigned char));
+    
+    // NOTE: Despite FBO color texture is RGB, we read data as RGBA... reading as RGB doesn't work... o__O
+    glReadPixels(0, 0, texture.width, texture.height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
+    
+    // Re-attach internal FBO color texture before deleting it
+    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fbo.texture.id, 0);
+    
+    glBindFramebuffer(GL_FRAMEBUFFER, 0);
+    
+#elif defined(GET_TEXTURE_FBO_OPTION_2)
+    // Render texture to fbo
+    glBindFramebuffer(GL_FRAMEBUFFER, fbo.id);
+    
+    glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
+    glClearDepthf(1.0f);
+    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+    glViewport(0, 0, width, height);
+    //glMatrixMode(GL_PROJECTION);
+    //glLoadIdentity();
+    rlOrtho(0.0, width, height, 0.0, 0.0, 1.0); 
+    //glMatrixMode(GL_MODELVIEW);
+    //glLoadIdentity();
+    //glDisable(GL_TEXTURE_2D);
+    //glDisable(GL_BLEND);
+    glEnable(GL_DEPTH_TEST);
+    
+    Model quad;
+    //quad.mesh = GenMeshQuad(width, height);
+    quad.transform = MatrixIdentity();
+    quad.shader = defaultShader;
+    
+    DrawModel(quad, (Vector3){ 0.0f, 0.0f, 0.0f }, 1.0f, WHITE);
+    
+    pixels = (unsigned char *)malloc(texture.width*texture.height*3*sizeof(unsigned char));
+    
+    glReadPixels(0, 0, texture.width, texture.height, GL_RGB, GL_UNSIGNED_BYTE, pixels);
+
+    // Bind framebuffer 0, which means render to back buffer
+    glBindFramebuffer(GL_FRAMEBUFFER, 0);
+    
+    UnloadModel(quad);
+#endif // GET_TEXTURE_FBO_OPTION
+
+    // Clean up temporal fbo
+    rlDeleteRenderTextures(fbo);
+
+#endif
+
+    return pixels;
+}
+
+
+//----------------------------------------------------------------------------------
+// Module Functions Definition - Shaders Functions
+// NOTE: Those functions are exposed directly to the user in raylib.h
+//----------------------------------------------------------------------------------
+
+// Load a custom shader and bind default locations
+Shader LoadShader(char *vsFileName, char *fsFileName)
+{
+    Shader shader = { 0 };
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    // Shaders loading from external text file
+    char *vShaderStr = TextFileRead(vsFileName);
+    char *fShaderStr = TextFileRead(fsFileName);
+    
+    if ((vShaderStr != NULL) && (fShaderStr != NULL))
+    {
+        shader.id = LoadShaderProgram(vShaderStr, fShaderStr);
+
+        // After shader loading, we try to load default location names
+        if (shader.id != 0) LoadDefaultShaderLocations(&shader);
+        else
+        {
+            TraceLog(WARNING, "Custom shader could not be loaded");
+            shader = defaultShader;
+        }
+        
+        // Shader strings must be freed
+        free(vShaderStr);
+        free(fShaderStr);
+    }
+    else
+    {
+        TraceLog(WARNING, "Custom shader could not be loaded");
+        shader = defaultShader;
+    }        
+#endif
+
+    return shader;
+}
+
+// Load custom shader strings and return program id
+unsigned int LoadShaderProgram(char *vShaderStr, char *fShaderStr)
+{
+    unsigned int program = 0;
+	
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    GLuint vertexShader;
+    GLuint fragmentShader;
+
+    vertexShader = glCreateShader(GL_VERTEX_SHADER);
+    fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
+
+    const char *pvs = vShaderStr;
+    const char *pfs = fShaderStr;
+
+    glShaderSource(vertexShader, 1, &pvs, NULL);
+    glShaderSource(fragmentShader, 1, &pfs, NULL);
+
+    GLint success = 0;
+
+    glCompileShader(vertexShader);
+
+    glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
+
+    if (success != GL_TRUE)
+    {
+        TraceLog(WARNING, "[VSHDR ID %i] Failed to compile vertex shader...", vertexShader);
+
+        int maxLength = 0;
+        int length;
+
+        glGetShaderiv(vertexShader, GL_INFO_LOG_LENGTH, &maxLength);
+
+        char log[maxLength];
+
+        glGetShaderInfoLog(vertexShader, maxLength, &length, log);
+
+        TraceLog(INFO, "%s", log);
+    }
+    else TraceLog(INFO, "[VSHDR ID %i] Vertex shader compiled successfully", vertexShader);
+
+    glCompileShader(fragmentShader);
+
+    glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
+
+    if (success != GL_TRUE)
+    {
+        TraceLog(WARNING, "[FSHDR ID %i] Failed to compile fragment shader...", fragmentShader);
+
+        int maxLength = 0;
+        int length;
+
+        glGetShaderiv(fragmentShader, GL_INFO_LOG_LENGTH, &maxLength);
+
+        char log[maxLength];
+
+        glGetShaderInfoLog(fragmentShader, maxLength, &length, log);
+
+        TraceLog(INFO, "%s", log);
+    }
+    else TraceLog(INFO, "[FSHDR ID %i] Fragment shader compiled successfully", fragmentShader);
+
+    program = glCreateProgram();
+
+    glAttachShader(program, vertexShader);
+    glAttachShader(program, fragmentShader);
+
+    glLinkProgram(program);
+    
+    // NOTE: All uniform variables are intitialised to 0 when a program links
+
+    glGetProgramiv(program, GL_LINK_STATUS, &success);
+
+    if (success == GL_FALSE)
+    {
+        TraceLog(WARNING, "[SHDR ID %i] Failed to link shader program...", program);
+
+        int maxLength = 0;
+        int length;
+
+        glGetProgramiv(program, GL_INFO_LOG_LENGTH, &maxLength);
+
+        char log[maxLength];
+
+        glGetProgramInfoLog(program, maxLength, &length, log);
+
+        TraceLog(INFO, "%s", log);
+
+        glDeleteProgram(program);
+
+        program = 0;
+    }
+    else TraceLog(INFO, "[SHDR ID %i] Shader program loaded successfully", program);
+
+    glDeleteShader(vertexShader);
+    glDeleteShader(fragmentShader);
+#endif
+    return program;
+}
+
+// Unload a custom shader from memory
+void UnloadShader(Shader shader)
+{
+    if (shader.id != 0)
+    {
+        rlDeleteShader(shader.id);
+        TraceLog(INFO, "[SHDR ID %i] Unloaded shader program data", shader.id);
+    }
+}
+
+// Set custom shader to be used on batch draw
+void SetCustomShader(Shader shader)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    if (currentShader.id != shader.id)
+    {
+        rlglDraw();
+        currentShader = shader;
+    }
+#endif
+}
+
+// Set default shader to be used in batch draw
+void SetDefaultShader(void)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    SetCustomShader(defaultShader);
+#endif
+}
+
+// Link shader to model
+void SetModelShader(Model *model, Shader shader)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    model->material.shader = shader;
+
+    if (vaoSupported) glBindVertexArray(model->mesh.vaoId);
+
+    // Enable vertex attributes: position
+    glBindBuffer(GL_ARRAY_BUFFER, model->mesh.vboId[0]);
+    glEnableVertexAttribArray(shader.vertexLoc);
+    glVertexAttribPointer(shader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+
+    // Enable vertex attributes: texcoords
+    glBindBuffer(GL_ARRAY_BUFFER, model->mesh.vboId[1]);
+    glEnableVertexAttribArray(shader.texcoordLoc);
+    glVertexAttribPointer(shader.texcoordLoc, 2, GL_FLOAT, 0, 0, 0);
+
+    // Enable vertex attributes: normals
+    glBindBuffer(GL_ARRAY_BUFFER, model->mesh.vboId[2]);
+    glEnableVertexAttribArray(shader.normalLoc);
+    glVertexAttribPointer(shader.normalLoc, 3, GL_FLOAT, 0, 0, 0);
+
+    if (vaoSupported) glBindVertexArray(0);     // Unbind VAO
+
+#elif (GRAPHICS_API_OPENGL_11)
+    TraceLog(WARNING, "Shaders not supported on OpenGL 1.1");
+#endif
+}
+
+// Get shader uniform location
+int GetShaderLocation(Shader shader, const char *uniformName)
+{
+    int location = -1;
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)   
+    location = glGetUniformLocation(shader.id, uniformName);
+    
+    if (location == -1) TraceLog(WARNING, "[SHDR ID %i] Shader location for %s could not be found", shader.id, uniformName);
+#endif
+    return location;
+}
+
+// Set shader uniform value (float)
+void SetShaderValue(Shader shader, int uniformLoc, float *value, int size)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glUseProgram(shader.id);
+
+    if (size == 1) glUniform1fv(uniformLoc, 1, value);          // Shader uniform type: float
+    else if (size == 2) glUniform2fv(uniformLoc, 1, value);     // Shader uniform type: vec2
+    else if (size == 3) glUniform3fv(uniformLoc, 1, value);     // Shader uniform type: vec3
+    else if (size == 4) glUniform4fv(uniformLoc, 1, value);     // Shader uniform type: vec4
+    else TraceLog(WARNING, "Shader value float array size not supported");
+    
+    glUseProgram(0);
+#endif
+}
+
+// Set shader uniform value (int)
+void SetShaderValuei(Shader shader, int uniformLoc, int *value, int size)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glUseProgram(shader.id);
+
+    if (size == 1) glUniform1iv(uniformLoc, 1, value);          // Shader uniform type: int
+    else if (size == 2) glUniform2iv(uniformLoc, 1, value);     // Shader uniform type: ivec2
+    else if (size == 3) glUniform3iv(uniformLoc, 1, value);     // Shader uniform type: ivec3
+    else if (size == 4) glUniform4iv(uniformLoc, 1, value);     // Shader uniform type: ivec4
+    else TraceLog(WARNING, "Shader value int array size not supported");
+    
+    glUseProgram(0);
+#endif
+}
+
+// Set shader uniform value (matrix 4x4)
+void SetShaderValueMatrix(Shader shader, int uniformLoc, Matrix mat)
+{
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+    glUseProgram(shader.id);
+
+    glUniformMatrix4fv(uniformLoc, 1, false, MatrixToFloat(mat));
+    
+    glUseProgram(0);
+#endif
+}
+
+// Set blending mode (alpha, additive, multiplied)
+// NOTE: Only 3 blending modes predefined
+void SetBlendMode(int mode)
+{
+    if ((blendMode != mode) && (mode < 3))
+    {
+        rlglDraw();
+        
+        switch (mode)
+        {
+            case BLEND_ALPHA: glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); break;
+            case BLEND_ADDITIVE: glBlendFunc(GL_SRC_ALPHA, GL_ONE); break; // Alternative: glBlendFunc(GL_ONE, GL_ONE);
+            case BLEND_MULTIPLIED: glBlendFunc(GL_DST_COLOR, GL_ONE_MINUS_SRC_ALPHA); break;
+            default: break;
+        }
+        
+        blendMode = mode;
+    }
+}
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+void PrintProjectionMatrix(void)
+{
+    PrintMatrix(projection);
+}
+
+void PrintModelviewMatrix(void)
+{
+    PrintMatrix(modelview);
+}
+#endif
+
+//----------------------------------------------------------------------------------
+// Module specific Functions Definition
+//----------------------------------------------------------------------------------
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+// Convert image data to OpenGL texture (returns OpenGL valid Id)
+// NOTE: Expected compressed image data and POT image
+static void LoadCompressedTexture(unsigned char *data, int width, int height, int mipmapCount, int compressedFormat)
+{
+    glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+
+    int blockSize = 0;      // Bytes every block
+    int offset = 0;
+
+    if ((compressedFormat == GL_COMPRESSED_RGB_S3TC_DXT1_EXT) ||
+        (compressedFormat == GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) ||
+#if defined(GRAPHICS_API_OPENGL_ES2)
+        (compressedFormat == GL_ETC1_RGB8_OES) ||
+#endif
+        (compressedFormat == GL_COMPRESSED_RGB8_ETC2)) blockSize = 8;
+    else blockSize = 16;
+
+    // Load the mipmap levels
+    for (int level = 0; level < mipmapCount && (width || height); level++)
+    {
+        unsigned int size = 0;
+        
+        size = ((width + 3)/4)*((height + 3)/4)*blockSize;
+
+        glCompressedTexImage2D(GL_TEXTURE_2D, level, compressedFormat, width, height, 0, size, data + offset);
+
+        offset += size;
+        width  /= 2;
+        height /= 2;
+
+        // Security check for NPOT textures
+        if (width < 1) width = 1;
+        if (height < 1) height = 1;
+    }
+}
+
+// Load Shader (Vertex and Fragment)
+// NOTE: This shader program is used for batch buffers (lines, triangles, quads)
+static Shader LoadDefaultShader(void)
+{
+    Shader shader;
+
+    // Vertex shader directly defined, no external file required
+#if defined(GRAPHICS_API_OPENGL_33)
+    char vShaderStr[] = "#version 330       \n"
+        "in vec3 vertexPosition;            \n"
+        "in vec2 vertexTexCoord;            \n"
+        "in vec4 vertexColor;               \n"
+        "out vec2 fragTexCoord;             \n"
+        "out vec4 fragColor;                \n"
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    char vShaderStr[] = "#version 100       \n"
+        "attribute vec3 vertexPosition;     \n"
+        "attribute vec2 vertexTexCoord;     \n"
+        "attribute vec4 vertexColor;        \n"
+        "varying vec2 fragTexCoord;         \n"
+        "varying vec4 fragColor;            \n"
+#endif
+        "uniform mat4 mvpMatrix;            \n"
+        "void main()                        \n"
+        "{                                  \n"
+        "    fragTexCoord = vertexTexCoord; \n"
+        "    fragColor = vertexColor;       \n"
+        "    gl_Position = mvpMatrix*vec4(vertexPosition, 1.0); \n"
+        "}                                  \n";
+
+    // Fragment shader directly defined, no external file required
+#if defined(GRAPHICS_API_OPENGL_33)
+    char fShaderStr[] = "#version 330       \n"
+        "in vec2 fragTexCoord;              \n"
+        "in vec4 fragColor;                 \n"
+        "out vec4 finalColor;               \n"
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    char fShaderStr[] = "#version 100       \n"
+        "precision mediump float;           \n"     // precision required for OpenGL ES2 (WebGL)
+        "varying vec2 fragTexCoord;         \n"
+        "varying vec4 fragColor;            \n"
+#endif
+        "uniform sampler2D texture0;        \n"
+        "uniform vec4 fragTintColor;        \n"
+        "void main()                        \n"
+        "{                                  \n"
+#if defined(GRAPHICS_API_OPENGL_33)
+        "    vec4 texelColor = texture(texture0, fragTexCoord);   \n"
+        "    finalColor = texelColor*fragTintColor*fragColor;     \n"
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+        "    vec4 texelColor = texture2D(texture0, fragTexCoord); \n" // NOTE: texture2D() is deprecated on OpenGL 3.3 and ES 3.0
+        "    gl_FragColor = texelColor*fragTintColor*fragColor;   \n"
+#endif
+        "}                                  \n";
+
+    shader.id = LoadShaderProgram(vShaderStr, fShaderStr);
+
+    if (shader.id != 0) TraceLog(INFO, "[SHDR ID %i] Default shader loaded successfully", shader.id);
+    else TraceLog(WARNING, "[SHDR ID %i] Default shader could not be loaded", shader.id);
+
+    LoadDefaultShaderLocations(&shader);
+
+    return shader;
+}
+
+// Get location handlers to for shader attributes and uniforms
+// NOTE: If any location is not found, loc point becomes -1
+static void LoadDefaultShaderLocations(Shader *shader)
+{
+    // Get handles to GLSL input attibute locations
+    shader->vertexLoc = glGetAttribLocation(shader->id, "vertexPosition");
+    shader->texcoordLoc = glGetAttribLocation(shader->id, "vertexTexCoord");
+    shader->normalLoc = glGetAttribLocation(shader->id, "vertexNormal");
+    shader->colorLoc = glGetAttribLocation(shader->id, "vertexColor");
+
+    // Get handles to GLSL uniform locations (vertex shader)
+    shader->mvpLoc  = glGetUniformLocation(shader->id, "mvpMatrix");
+
+    // Get handles to GLSL uniform locations (fragment shader)
+    shader->tintColorLoc = glGetUniformLocation(shader->id, "fragTintColor");
+    shader->mapDiffuseLoc = glGetUniformLocation(shader->id, "texture0");
+    shader->mapNormalLoc = glGetUniformLocation(shader->id, "texture1");
+    shader->mapSpecularLoc = glGetUniformLocation(shader->id, "texture2");
+}
+
+// Read text file
+// NOTE: text chars array should be freed manually
+static char *TextFileRead(char *fileName)
+{
+    FILE *textFile;
+    char *text = NULL;
+
+    int count = 0;
+
+    if (fileName != NULL)
+    {
+        textFile = fopen(fileName,"rt");
+
+        if (textFile != NULL)
+        {
+            fseek(textFile, 0, SEEK_END);
+            count = ftell(textFile);
+            rewind(textFile);
+
+            if (count > 0)
+            {
+                text = (char *)malloc(sizeof(char)*(count + 1));
+                count = fread(text, sizeof(char), count, textFile);
+                text[count] = '\0';
+            }
+
+            fclose(textFile);
+        }
+        else TraceLog(WARNING, "[%s] Text file could not be opened", fileName);
+    }
+
+    return text;
+}
+
+// Allocate and initialize float array buffers to store vertex data (lines, triangles, quads)
+static void InitializeBuffers(void)
+{
+    // Initialize lines arrays (vertex position and color data)
+    lines.vertices = (float *)malloc(sizeof(float)*3*2*MAX_LINES_BATCH);        // 3 float by vertex, 2 vertex by line
+    lines.colors = (unsigned char *)malloc(sizeof(unsigned char)*4*2*MAX_LINES_BATCH);  // 4 float by color, 2 colors by line
+
+    for (int i = 0; i < (3*2*MAX_LINES_BATCH); i++) lines.vertices[i] = 0.0f;
+    for (int i = 0; i < (4*2*MAX_LINES_BATCH); i++) lines.colors[i] = 0;
+
+    lines.vCounter = 0;
+    lines.cCounter = 0;
+
+    // Initialize triangles arrays (vertex position and color data)
+    triangles.vertices = (float *)malloc(sizeof(float)*3*3*MAX_TRIANGLES_BATCH);        // 3 float by vertex, 3 vertex by triangle
+    triangles.colors = (unsigned char *)malloc(sizeof(unsigned char)*4*3*MAX_TRIANGLES_BATCH);  // 4 float by color, 3 colors by triangle
+
+    for (int i = 0; i < (3*3*MAX_TRIANGLES_BATCH); i++) triangles.vertices[i] = 0.0f;
+    for (int i = 0; i < (4*3*MAX_TRIANGLES_BATCH); i++) triangles.colors[i] = 0;
+
+    triangles.vCounter = 0;
+    triangles.cCounter = 0;
+
+    // Initialize quads arrays (vertex position, texcoord and color data... and indexes)
+    quads.vertices = (float *)malloc(sizeof(float)*3*4*MAX_QUADS_BATCH);        // 3 float by vertex, 4 vertex by quad
+    quads.texcoords = (float *)malloc(sizeof(float)*2*4*MAX_QUADS_BATCH);       // 2 float by texcoord, 4 texcoord by quad
+    quads.colors = (unsigned char *)malloc(sizeof(unsigned char)*4*4*MAX_QUADS_BATCH);  // 4 float by color, 4 colors by quad
+#if defined(GRAPHICS_API_OPENGL_33)
+    quads.indices = (unsigned int *)malloc(sizeof(int)*6*MAX_QUADS_BATCH);      // 6 int by quad (indices)
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    quads.indices = (unsigned short *)malloc(sizeof(short)*6*MAX_QUADS_BATCH);  // 6 int by quad (indices)
+#endif
+
+    for (int i = 0; i < (3*4*MAX_QUADS_BATCH); i++) quads.vertices[i] = 0.0f;
+    for (int i = 0; i < (2*4*MAX_QUADS_BATCH); i++) quads.texcoords[i] = 0.0f;
+    for (int i = 0; i < (4*4*MAX_QUADS_BATCH); i++) quads.colors[i] = 0;
+
+    int k = 0;
+
+    // Indices can be initialized right now
+    for (int i = 0; i < (6*MAX_QUADS_BATCH); i+=6)
+    {
+        quads.indices[i] = 4*k;
+        quads.indices[i+1] = 4*k+1;
+        quads.indices[i+2] = 4*k+2;
+        quads.indices[i+3] = 4*k;
+        quads.indices[i+4] = 4*k+2;
+        quads.indices[i+5] = 4*k+3;
+
+        k++;
+    }
+
+    quads.vCounter = 0;
+    quads.tcCounter = 0;
+    quads.cCounter = 0;
+
+    TraceLog(INFO, "CPU buffers (lines, triangles, quads) initialized successfully");
+}
+
+// Initialize Vertex Array Objects (Contain VBO)
+// NOTE: lines, triangles and quads buffers use currentShader
+static void InitializeBuffersGPU(void)
+{
+    if (vaoSupported)
+    {
+        // Initialize Lines VAO
+        glGenVertexArrays(1, &vaoLines);
+        glBindVertexArray(vaoLines);
+    }
+
+    // Create buffers for our vertex data
+    glGenBuffers(2, linesBuffer);
+
+    // Lines - Vertex positions buffer binding and attributes enable
+    glBindBuffer(GL_ARRAY_BUFFER, linesBuffer[0]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*2*MAX_LINES_BATCH, lines.vertices, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.vertexLoc);
+    glVertexAttribPointer(currentShader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+
+    // Lines - colors buffer
+    glBindBuffer(GL_ARRAY_BUFFER, linesBuffer[1]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(unsigned char)*4*2*MAX_LINES_BATCH, lines.colors, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.colorLoc);
+    glVertexAttribPointer(currentShader.colorLoc, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, 0);
+
+    if (vaoSupported) TraceLog(INFO, "[VAO ID %i] Lines VAO initialized successfully", vaoLines);
+    else TraceLog(INFO, "[VBO ID %i][VBO ID %i] Lines VBOs initialized successfully", linesBuffer[0], linesBuffer[1]);
+    //--------------------------------------------------------------
+
+    if (vaoSupported)
+    {
+        // Initialize Triangles VAO
+        glGenVertexArrays(1, &vaoTriangles);
+        glBindVertexArray(vaoTriangles);
+    }
+
+    // Create buffers for our vertex data
+    glGenBuffers(2, trianglesBuffer);
+
+    // Enable vertex attributes
+    glBindBuffer(GL_ARRAY_BUFFER, trianglesBuffer[0]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*3*MAX_TRIANGLES_BATCH, triangles.vertices, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.vertexLoc);
+    glVertexAttribPointer(currentShader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+
+    glBindBuffer(GL_ARRAY_BUFFER, trianglesBuffer[1]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(unsigned char)*4*3*MAX_TRIANGLES_BATCH, triangles.colors, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.colorLoc);
+    glVertexAttribPointer(currentShader.colorLoc, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, 0);
+
+    if (vaoSupported) TraceLog(INFO, "[VAO ID %i] Triangles VAO initialized successfully", vaoTriangles);
+    else TraceLog(INFO, "[VBO ID %i][VBO ID %i] Triangles VBOs initialized successfully", trianglesBuffer[0], trianglesBuffer[1]);
+    //--------------------------------------------------------------
+
+    if (vaoSupported)
+    {
+        // Initialize Quads VAO
+        glGenVertexArrays(1, &vaoQuads);
+        glBindVertexArray(vaoQuads);
+    }
+
+    // Create buffers for our vertex data
+    glGenBuffers(4, quadsBuffer);
+
+    // Enable vertex attributes
+    glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[0]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*4*MAX_QUADS_BATCH, quads.vertices, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.vertexLoc);
+    glVertexAttribPointer(currentShader.vertexLoc, 3, GL_FLOAT, 0, 0, 0);
+
+    glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[1]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(float)*2*4*MAX_QUADS_BATCH, quads.texcoords, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.texcoordLoc);
+    glVertexAttribPointer(currentShader.texcoordLoc, 2, GL_FLOAT, 0, 0, 0);
+
+    glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[2]);
+    glBufferData(GL_ARRAY_BUFFER, sizeof(unsigned char)*4*4*MAX_QUADS_BATCH, quads.colors, GL_DYNAMIC_DRAW);
+    glEnableVertexAttribArray(currentShader.colorLoc);
+    glVertexAttribPointer(currentShader.colorLoc, 4, GL_UNSIGNED_BYTE, GL_TRUE, 0, 0);
+
+    // Fill index buffer
+    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, quadsBuffer[3]);
+#if defined(GRAPHICS_API_OPENGL_33)
+    glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int)*6*MAX_QUADS_BATCH, quads.indices, GL_STATIC_DRAW);
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(short)*6*MAX_QUADS_BATCH, quads.indices, GL_STATIC_DRAW);
+#endif
+
+    if (vaoSupported) TraceLog(INFO, "[VAO ID %i] Quads VAO initialized successfully", vaoQuads);
+    else TraceLog(INFO, "[VBO ID %i][VBO ID %i][VBO ID %i][VBO ID %i] Quads VBOs initialized successfully", quadsBuffer[0], quadsBuffer[1], quadsBuffer[2], quadsBuffer[3]);
+
+    // Unbind the current VAO
+    if (vaoSupported) glBindVertexArray(0);
+}
+
+// Update VBOs with vertex array data
+// NOTE: If there is not vertex data, buffers doesn't need to be updated (vertexCount > 0)
+// TODO: If no data changed on the CPU arrays --> No need to update GPU arrays (change flag required)
+static void UpdateBuffers(void)
+{
+    if (lines.vCounter > 0)
+    {
+        // Activate Lines VAO
+        if (vaoSupported) glBindVertexArray(vaoLines);
+
+        // Lines - vertex positions buffer
+        glBindBuffer(GL_ARRAY_BUFFER, linesBuffer[0]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*2*MAX_LINES_BATCH, lines.vertices, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float)*3*lines.vCounter, lines.vertices);    // target - offset (in bytes) - size (in bytes) - data pointer
+
+        // Lines - colors buffer
+        glBindBuffer(GL_ARRAY_BUFFER, linesBuffer[1]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*4*2*MAX_LINES_BATCH, lines.colors, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(unsigned char)*4*lines.cCounter, lines.colors);
+    }
+    //--------------------------------------------------------------
+
+    if (triangles.vCounter > 0)
+    {
+        // Activate Triangles VAO
+        if (vaoSupported) glBindVertexArray(vaoTriangles);
+
+        // Triangles - vertex positions buffer
+        glBindBuffer(GL_ARRAY_BUFFER, trianglesBuffer[0]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*3*MAX_TRIANGLES_BATCH, triangles.vertices, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float)*3*triangles.vCounter, triangles.vertices);
+
+        // Triangles - colors buffer
+        glBindBuffer(GL_ARRAY_BUFFER, trianglesBuffer[1]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*4*3*MAX_TRIANGLES_BATCH, triangles.colors, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(unsigned char)*4*triangles.cCounter, triangles.colors);
+    }
+    //--------------------------------------------------------------
+
+    if (quads.vCounter > 0)
+    {
+        // Activate Quads VAO
+        if (vaoSupported) glBindVertexArray(vaoQuads);
+
+        // Quads - vertex positions buffer
+        glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[0]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*4*MAX_QUADS_BATCH, quads.vertices, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float)*3*quads.vCounter, quads.vertices);
+
+        // Quads - texture coordinates buffer
+        glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[1]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*2*4*MAX_QUADS_BATCH, quads.texcoords, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float)*2*quads.vCounter, quads.texcoords);
+
+        // Quads - colors buffer
+        glBindBuffer(GL_ARRAY_BUFFER, quadsBuffer[2]);
+        //glBufferData(GL_ARRAY_BUFFER, sizeof(float)*4*4*MAX_QUADS_BATCH, quads.colors, GL_DYNAMIC_DRAW);
+        glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(unsigned char)*4*quads.vCounter, quads.colors);
+
+        // Another option would be using buffer mapping...
+        //triangles.vertices = glMapBuffer(GL_ARRAY_BUFFER, GL_READ_WRITE);
+        // Now we can modify vertices
+        //glUnmapBuffer(GL_ARRAY_BUFFER);
+    }
+    //--------------------------------------------------------------
+
+    // Unbind the current VAO
+    if (vaoSupported) glBindVertexArray(0);
+}
+#endif //defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+
+#if defined(GRAPHICS_API_OPENGL_11)
+// Mipmaps data is generated after image data
+static int GenerateMipmaps(unsigned char *data, int baseWidth, int baseHeight)
+{
+    int mipmapCount = 1;                // Required mipmap levels count (including base level)
+    int width = baseWidth;
+    int height = baseHeight;
+    int size = baseWidth*baseHeight*4;  // Size in bytes (will include mipmaps...), RGBA only
+
+    // Count mipmap levels required
+    while ((width != 1) && (height != 1))
+    {
+        if (width != 1) width /= 2;
+        if (height != 1) height /= 2;
+
+        TraceLog(DEBUG, "Next mipmap size: %i x %i", width, height);
+
+        mipmapCount++;
+
+        size += (width*height*4);       // Add mipmap size (in bytes)
+    }
+
+    TraceLog(DEBUG, "Total mipmaps required: %i", mipmapCount);
+    TraceLog(DEBUG, "Total size of data required: %i", size);
+
+    unsigned char *temp = realloc(data, size);
+
+    if (temp != NULL) data = temp;
+    else TraceLog(WARNING, "Mipmaps required memory could not be allocated");
+
+    width = baseWidth;
+    height = baseHeight;
+    size = (width*height*4);
+
+    // Generate mipmaps
+    // NOTE: Every mipmap data is stored after data
+    Color *image = (Color *)malloc(width*height*sizeof(Color));
+    Color *mipmap = NULL;
+    int offset = 0;
+    int j = 0;
+
+    for (int i = 0; i < size; i += 4)
+    {
+        image[j].r = data[i];
+        image[j].g = data[i + 1];
+        image[j].b = data[i + 2];
+        image[j].a = data[i + 3];
+        j++;
+    }
+
+    TraceLog(DEBUG, "Mipmap base (%ix%i)", width, height);
+
+    for (int mip = 1; mip < mipmapCount; mip++)
+    {
+        mipmap = GenNextMipmap(image, width, height);
+
+        offset += (width*height*4); // Size of last mipmap
+        j = 0;
+
+        width /= 2;
+        height /= 2;
+        size = (width*height*4);    // Mipmap size to store after offset
+
+        // Add mipmap to data
+        for (int i = 0; i < size; i += 4)
+        {
+            data[offset + i] = mipmap[j].r;
+            data[offset + i + 1] = mipmap[j].g;
+            data[offset + i + 2] = mipmap[j].b;
+            data[offset + i + 3] = mipmap[j].a;
+            j++;
+        }
+
+        free(image);
+
+        image = mipmap;
+        mipmap = NULL;
+    }
+
+    free(mipmap);       // free mipmap data
+
+    return mipmapCount;
+}
+
+// Manual mipmap generation (basic scaling algorithm)
+static Color *GenNextMipmap(Color *srcData, int srcWidth, int srcHeight)
+{
+    int x2, y2;
+    Color prow, pcol;
+
+    int width = srcWidth/2;
+    int height = srcHeight/2;
+
+    Color *mipmap = (Color *)malloc(width*height*sizeof(Color));
+
+    // Scaling algorithm works perfectly (box-filter)
+    for (int y = 0; y < height; y++)
+    {
+        y2 = 2*y;
+
+        for (int x = 0; x < width; x++)
+        {
+            x2 = 2*x;
+
+            prow.r = (srcData[y2*srcWidth + x2].r + srcData[y2*srcWidth + x2 + 1].r)/2;
+            prow.g = (srcData[y2*srcWidth + x2].g + srcData[y2*srcWidth + x2 + 1].g)/2;
+            prow.b = (srcData[y2*srcWidth + x2].b + srcData[y2*srcWidth + x2 + 1].b)/2;
+            prow.a = (srcData[y2*srcWidth + x2].a + srcData[y2*srcWidth + x2 + 1].a)/2;
+
+            pcol.r = (srcData[(y2+1)*srcWidth + x2].r + srcData[(y2+1)*srcWidth + x2 + 1].r)/2;
+            pcol.g = (srcData[(y2+1)*srcWidth + x2].g + srcData[(y2+1)*srcWidth + x2 + 1].g)/2;
+            pcol.b = (srcData[(y2+1)*srcWidth + x2].b + srcData[(y2+1)*srcWidth + x2 + 1].b)/2;
+            pcol.a = (srcData[(y2+1)*srcWidth + x2].a + srcData[(y2+1)*srcWidth + x2 + 1].a)/2;
+
+            mipmap[y*width + x].r = (prow.r + pcol.r)/2;
+            mipmap[y*width + x].g = (prow.g + pcol.g)/2;
+            mipmap[y*width + x].b = (prow.b + pcol.b)/2;
+            mipmap[y*width + x].a = (prow.a + pcol.a)/2;
+        }
+    }
+
+    TraceLog(DEBUG, "Mipmap generated successfully (%ix%i)", width, height);
+
+    return mipmap;
+}
+#endif
+
+#if defined(RLGL_STANDALONE)
+
+// Output a trace log message
+// NOTE: Expected msgType: (0)Info, (1)Error, (2)Warning
+static void TraceLog(int msgType, const char *text, ...)
+{
+    va_list args;
+    va_start(args, text);
+
+    switch(msgType)
+    {
+        case INFO: fprintf(stdout, "INFO: "); break;
+        case ERROR: fprintf(stdout, "ERROR: "); break;
+        case WARNING: fprintf(stdout, "WARNING: "); break;
+        case DEBUG: fprintf(stdout, "DEBUG: "); break;
+        default: break;
+    }
+
+    vfprintf(stdout, text, args);
+    fprintf(stdout, "\n");
+
+    va_end(args);
+
+    if (msgType == ERROR) exit(1);
+}
+
+// Converts Matrix to float array
+// NOTE: Returned vector is a transposed version of the Matrix struct, 
+// it should be this way because, despite raymath use OpenGL column-major convention,
+// Matrix struct memory alignment and variables naming are not coherent
+float *MatrixToFloat(Matrix mat)
+{
+    static float buffer[16];
+
+    buffer[0] = mat.m0;
+    buffer[1] = mat.m4;
+    buffer[2] = mat.m8;
+    buffer[3] = mat.m12;
+    buffer[4] = mat.m1;
+    buffer[5] = mat.m5;
+    buffer[6] = mat.m9;
+    buffer[7] = mat.m13;
+    buffer[8] = mat.m2;
+    buffer[9] = mat.m6;
+    buffer[10] = mat.m10;
+    buffer[11] = mat.m14;
+    buffer[12] = mat.m3;
+    buffer[13] = mat.m7;
+    buffer[14] = mat.m11;
+    buffer[15] = mat.m15;
+
+    return buffer;
+}
+#endif

+ 327 - 0
examples/oculus_glfw_sample/rlgl.h

@@ -0,0 +1,327 @@
+/**********************************************************************************************
+*
+*   rlgl - raylib OpenGL abstraction layer
+*
+*   raylib now uses OpenGL 1.1 style functions (rlVertex) that are mapped to selected OpenGL version:
+*       OpenGL 1.1  - Direct map rl* -> gl*
+*       OpenGL 3.3  - Vertex data is stored in VAOs, call rlglDraw() to render
+*       OpenGL ES 2 - Vertex data is stored in VBOs or VAOs (when available), call rlglDraw() to render
+*
+*   Copyright (c) 2014 Ramon Santamaria (@raysan5)
+*
+*   This software is provided "as-is", without any express or implied warranty. In no event
+*   will the authors be held liable for any damages arising from the use of this software.
+*
+*   Permission is granted to anyone to use this software for any purpose, including commercial
+*   applications, and to alter it and redistribute it freely, subject to the following restrictions:
+*
+*     1. The origin of this software must not be misrepresented; you must not claim that you
+*     wrote the original software. If you use this software in a product, an acknowledgment
+*     in the product documentation would be appreciated but is not required.
+*
+*     2. Altered source versions must be plainly marked as such, and must not be misrepresented
+*     as being the original software.
+*
+*     3. This notice may not be removed or altered from any source distribution.
+*
+**********************************************************************************************/
+
+#ifndef RLGL_H
+#define RLGL_H
+
+//#define RLGL_STANDALONE       // NOTE: To use rlgl as standalone lib, just uncomment this line
+
+#ifndef RLGL_STANDALONE
+    #include "raylib.h"         // Required for typedef(s): Model, Shader, Texture2D
+    #include "utils.h"          // Required for function TraceLog()
+#endif
+
+#ifdef RLGL_STANDALONE
+    #define RAYMATH_STANDALONE
+#endif
+
+#include "raymath.h"            // Required for types: Vector3, Matrix
+
+// Select desired OpenGL version
+// NOTE: Those preprocessor defines are only used on rlgl module,
+// if OpenGL version is required by any other module, it uses rlGetVersion()
+
+// Choose opengl version here or just define it at compile time: -DGRAPHICS_API_OPENGL_33
+//#define GRAPHICS_API_OPENGL_11     // Only available on PLATFORM_DESKTOP
+//#define GRAPHICS_API_OPENGL_33     // Only available on PLATFORM_DESKTOP
+//#define GRAPHICS_API_OPENGL_ES2    // Only available on PLATFORM_ANDROID or PLATFORM_RPI or PLATFORM_WEB
+
+// Security check in case no GRAPHICS_API_OPENGL_* defined
+#if !defined(GRAPHICS_API_OPENGL_11) && !defined(GRAPHICS_API_OPENGL_33) && !defined(GRAPHICS_API_OPENGL_ES2)
+    #define GRAPHICS_API_OPENGL_11
+#endif
+
+// Security check in case multiple GRAPHICS_API_OPENGL_* defined
+#if defined(GRAPHICS_API_OPENGL_11)
+    #if defined(GRAPHICS_API_OPENGL_33)
+        #undef GRAPHICS_API_OPENGL_33
+    #endif
+
+    #if defined(GRAPHICS_API_OPENGL_ES2)
+        #undef GRAPHICS_API_OPENGL_ES2
+    #endif
+#endif
+
+//----------------------------------------------------------------------------------
+// Defines and Macros
+//----------------------------------------------------------------------------------
+#if defined(GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_33)
+    // NOTE: This is the maximum amount of lines, triangles and quads per frame, be careful!
+    #define MAX_LINES_BATCH         8192
+    #define MAX_TRIANGLES_BATCH     4096
+    #define MAX_QUADS_BATCH         4096
+#elif defined(GRAPHICS_API_OPENGL_ES2)
+    // NOTE: Reduce memory sizes for embedded systems (RPI and HTML5)
+    // NOTE: On HTML5 (emscripten) this is allocated on heap, by default it's only 16MB!...just take care...
+    #define MAX_LINES_BATCH         1024    // Critical for wire shapes (sphere)
+    #define MAX_TRIANGLES_BATCH     2048    // Critical for some shapes (sphere)
+    #define MAX_QUADS_BATCH         1024    // Be careful with text, every letter maps a quad
+#endif
+
+//----------------------------------------------------------------------------------
+// Types and Structures Definition
+//----------------------------------------------------------------------------------
+typedef enum { RL_PROJECTION, RL_MODELVIEW, RL_TEXTURE } MatrixMode;
+
+typedef enum { RL_LINES, RL_TRIANGLES, RL_QUADS } DrawMode;
+
+typedef enum { OPENGL_11 = 1, OPENGL_33, OPENGL_ES_20 } GlVersion;
+
+#if defined(RLGL_STANDALONE)
+    #ifndef __cplusplus
+    // Boolean type
+    typedef enum { false, true } bool;
+    #endif
+
+    // byte type
+    typedef unsigned char byte;
+    
+    // Color type, RGBA (32bit)
+    typedef struct Color {
+        unsigned char r;
+        unsigned char g;
+        unsigned char b;
+        unsigned char a;
+    } Color;
+
+    // Texture formats (support depends on OpenGL version)
+    typedef enum { 
+        UNCOMPRESSED_GRAYSCALE = 1,     // 8 bit per pixel (no alpha)
+        UNCOMPRESSED_GRAY_ALPHA,
+        UNCOMPRESSED_R5G6B5,            // 16 bpp
+        UNCOMPRESSED_R8G8B8,            // 24 bpp
+        UNCOMPRESSED_R5G5B5A1,          // 16 bpp (1 bit alpha)
+        UNCOMPRESSED_R4G4B4A4,          // 16 bpp (4 bit alpha)
+        UNCOMPRESSED_R8G8B8A8,          // 32 bpp
+        COMPRESSED_DXT1_RGB,            // 4 bpp (no alpha)
+        COMPRESSED_DXT1_RGBA,           // 4 bpp (1 bit alpha)
+        COMPRESSED_DXT3_RGBA,           // 8 bpp
+        COMPRESSED_DXT5_RGBA,           // 8 bpp
+        COMPRESSED_ETC1_RGB,            // 4 bpp
+        COMPRESSED_ETC2_RGB,            // 4 bpp
+        COMPRESSED_ETC2_EAC_RGBA,       // 8 bpp
+        COMPRESSED_PVRT_RGB,            // 4 bpp
+        COMPRESSED_PVRT_RGBA,           // 4 bpp
+        COMPRESSED_ASTC_4x4_RGBA,       // 8 bpp
+        COMPRESSED_ASTC_8x8_RGBA        // 2 bpp
+    } TextureFormat;
+    
+    // Bounding box type
+    typedef struct BoundingBox {
+        Vector3 min;
+        Vector3 max;
+    } BoundingBox;
+
+    // Mesh with vertex data type
+    // NOTE: If using OpenGL 1.1, data loaded in CPU; if OpenGL 3.3+ data loaded in GPU (vaoId)
+    typedef struct Mesh {
+        int vertexCount;            // num vertices
+        float *vertices;            // vertex position (XYZ - 3 components per vertex)
+        float *texcoords;           // vertex texture coordinates (UV - 2 components per vertex)
+        float *texcoords2;          // vertex second texture coordinates (useful for lightmaps)
+        float *normals;             // vertex normals (XYZ - 3 components per vertex)
+        float *tangents;            // vertex tangents (XYZ - 3 components per vertex)
+        unsigned char *colors;      // vertex colors (RGBA - 4 components per vertex)
+        
+        BoundingBox bounds;         // mesh limits defined by min and max points
+        
+        unsigned int vaoId;         // OpenGL Vertex Array Object id
+        unsigned int vboId[6];      // OpenGL Vertex Buffer Objects id (6 types of vertex data)
+    } Mesh;
+
+    // Shader type
+    typedef struct Shader {
+        unsigned int id;                // Shader program id
+
+        // Variable attributes
+        int vertexLoc;        // Vertex attribute location point (vertex shader)
+        int texcoordLoc;      // Texcoord attribute location point (vertex shader)
+        int normalLoc;        // Normal attribute location point (vertex shader)
+        int colorLoc;         // Color attibute location point (vertex shader)
+
+        // Uniforms
+        int mvpLoc;           // ModelView-Projection matrix uniform location point (vertex shader)
+        int tintColorLoc;     // Color uniform location point (fragment shader)
+        
+        int mapDiffuseLoc;    // Diffuse map texture uniform location point (fragment shader)
+        int mapNormalLoc;     // Normal map texture uniform location point (fragment shader)
+        int mapSpecularLoc;   // Specular map texture uniform location point (fragment shader)
+    } Shader;
+
+    // Texture2D type
+    // NOTE: Data stored in GPU memory
+    typedef struct Texture2D {
+        unsigned int id;        // OpenGL texture id
+        int width;              // Texture base width
+        int height;             // Texture base height
+        int mipmaps;            // Mipmap levels, 1 by default
+        int format;             // Data format (TextureFormat)
+    } Texture2D;
+    
+    // RenderTexture2D type, for texture rendering
+    typedef struct RenderTexture2D {
+        unsigned int id;        // Render texture (fbo) id
+        Texture2D texture;      // Color buffer attachment texture
+        Texture2D depth;        // Depth buffer attachment texture
+    } RenderTexture2D;
+    
+    // Material type
+    typedef struct Material {
+        Shader shader;
+
+        Texture2D texDiffuse;      // Diffuse texture
+        Texture2D texNormal;       // Normal texture
+        Texture2D texSpecular;     // Specular texture
+        
+        Color colDiffuse;
+        Color colAmbient;
+        Color colSpecular;
+        
+        float glossiness;
+        float normalDepth;
+    } Material;
+
+    // 3d Model type
+    typedef struct Model {
+        Mesh mesh;
+        Matrix transform;
+        Material material;
+    } Model;
+	
+    // Color blending modes (pre-defined)
+    typedef enum { BLEND_ALPHA = 0, BLEND_ADDITIVE, BLEND_MULTIPLIED } BlendMode;
+#endif
+
+#ifdef __cplusplus
+extern "C" {            // Prevents name mangling of functions
+#endif
+
+//------------------------------------------------------------------------------------
+// Functions Declaration - Matrix operations
+//------------------------------------------------------------------------------------
+void rlMatrixMode(int mode);                    // Choose the current matrix to be transformed
+void rlPushMatrix(void);                        // Push the current matrix to stack
+void rlPopMatrix(void);                         // Pop lattest inserted matrix from stack
+void rlLoadIdentity(void);                      // Reset current matrix to identity matrix
+void rlTranslatef(float x, float y, float z);   // Multiply the current matrix by a translation matrix
+void rlRotatef(float angleDeg, float x, float y, float z);  // Multiply the current matrix by a rotation matrix
+void rlScalef(float x, float y, float z);       // Multiply the current matrix by a scaling matrix
+void rlMultMatrixf(float *mat);                 // Multiply the current matrix by another matrix
+void rlFrustum(double left, double right, double bottom, double top, double near, double far);
+void rlOrtho(double left, double right, double bottom, double top, double near, double far);
+
+//------------------------------------------------------------------------------------
+// Functions Declaration - Vertex level operations
+//------------------------------------------------------------------------------------
+void rlBegin(int mode);                         // Initialize drawing mode (how to organize vertex)
+void rlEnd(void);                               // Finish vertex providing
+void rlVertex2i(int x, int y);                  // Define one vertex (position) - 2 int
+void rlVertex2f(float x, float y);              // Define one vertex (position) - 2 float
+void rlVertex3f(float x, float y, float z);     // Define one vertex (position) - 3 float
+void rlTexCoord2f(float x, float y);            // Define one vertex (texture coordinate) - 2 float
+void rlNormal3f(float x, float y, float z);     // Define one vertex (normal) - 3 float
+void rlColor4ub(byte r, byte g, byte b, byte a);    // Define one vertex (color) - 4 byte
+void rlColor3f(float x, float y, float z);          // Define one vertex (color) - 3 float
+void rlColor4f(float x, float y, float z, float w); // Define one vertex (color) - 4 float
+
+//------------------------------------------------------------------------------------
+// Functions Declaration - OpenGL equivalent functions (common to 1.1, 3.3+, ES2)
+// NOTE: This functions are used to completely abstract raylib code from OpenGL layer
+//------------------------------------------------------------------------------------
+void rlEnableTexture(unsigned int id);          // Enable texture usage
+void rlDisableTexture(void);                    // Disable texture usage
+void rlEnableRenderTexture(unsigned int id);    // Enable render texture (fbo)
+void rlDisableRenderTexture(void);              // Disable render texture (fbo), return to default framebuffer
+void rlEnableDepthTest(void);                   // Enable depth test
+void rlDisableDepthTest(void);                  // Disable depth test
+void rlDeleteTextures(unsigned int id);         // Delete OpenGL texture from GPU
+void rlDeleteRenderTextures(RenderTexture2D target);    // Delete render textures (fbo) from GPU
+void rlDeleteShader(unsigned int id);           // Delete OpenGL shader program from GPU
+void rlDeleteVertexArrays(unsigned int id);     // Unload vertex data (VAO) from GPU memory
+void rlDeleteBuffers(unsigned int id);          // Unload vertex data (VBO) from GPU memory
+void rlClearColor(byte r, byte g, byte b, byte a);  // Clear color buffer with color
+void rlClearScreenBuffers(void);                // Clear used screen buffers (color and depth)
+int rlGetVersion(void);                         // Returns current OpenGL version
+
+//------------------------------------------------------------------------------------
+// Functions Declaration - rlgl functionality
+//------------------------------------------------------------------------------------
+void rlglInit(void);                            // Initialize rlgl (shaders, VAO, VBO...)
+void rlglClose(void);                           // De-init rlgl
+void rlglDraw(void);                            // Draw VAO/VBO
+void rlglInitGraphics(int offsetX, int offsetY, int width, int height);  // Initialize Graphics (OpenGL stuff)
+
+unsigned int rlglLoadTexture(void *data, int width, int height, int textureFormat, int mipmapCount);    // Load texture in GPU
+RenderTexture2D rlglLoadRenderTexture(int width, int height);   // Load a texture to be used for rendering (fbo with color and depth attachments)
+void rlglUpdateTexture(unsigned int id, int width, int height, int format, void *data);         // Update GPU texture with new data
+void rlglGenerateMipmaps(Texture2D texture);                             // Generate mipmap data for selected texture
+
+// NOTE: There is a set of shader related functions that are available to end user,
+// to avoid creating function wrappers through core module, they have been directly declared in raylib.h
+
+Model rlglLoadModel(Mesh mesh);           // Upload vertex data into GPU and provided VAO/VBO ids
+void rlglDrawModel(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color color, bool wires);
+
+Vector3 rlglUnproject(Vector3 source, Matrix proj, Matrix view);    // Get world coordinates from screen coordinates
+
+unsigned char *rlglReadScreenPixels(int width, int height);         // Read screen pixel data (color buffer)
+void *rlglReadTexturePixels(Texture2D texture);                     // Read texture pixel data
+
+#if defined(GRAPHICS_API_OPENGL_33) || defined(GRAPHICS_API_OPENGL_ES2)
+void PrintProjectionMatrix(void);       // DEBUG: Print projection matrix
+void PrintModelviewMatrix(void);        // DEBUG: Print modelview matrix
+#endif
+
+#if defined(RLGL_STANDALONE)
+//------------------------------------------------------------------------------------
+// Shaders System Functions (Module: rlgl)
+// NOTE: This functions are useless when using OpenGL 1.1
+//------------------------------------------------------------------------------------
+Shader LoadShader(char *vsFileName, char *fsFileName);              // Load a custom shader and bind default locations
+unsigned int LoadShaderProgram(char *vShaderStr, char *fShaderStr); // Load custom shader strings and return program id
+void UnloadShader(Shader shader);                                   // Unload a custom shader from memory
+void SetCustomShader(Shader shader);                                // Set custom shader to be used in batch draw
+void SetDefaultShader(void);                                        // Set default shader to be used in batch draw
+void SetModelShader(Model *model, Shader shader);                   // Link a shader to a model
+
+int GetShaderLocation(Shader shader, const char *uniformName);                          // Get shader uniform location
+void SetShaderValue(Shader shader, int uniformLoc, float *value, int size);             // Set shader uniform value (float)
+void SetShaderValuei(Shader shader, int uniformLoc, int *value, int size);              // Set shader uniform value (int)
+void SetShaderMapDiffuse(Shader *shader, Texture2D texture);                            // Default diffuse shader map texture assignment
+void SetShaderMapNormal(Shader *shader, const char *uniformName, Texture2D texture);    // Normal map texture shader assignment
+void SetShaderMapSpecular(Shader *shader, const char *uniformName, Texture2D texture);  // Specular map texture shader assignment
+void SetShaderMap(Shader *shader, int mapLocation, Texture2D texture, int textureUnit); // TODO: Generic shader map assignment
+
+void SetBlendMode(int mode);                                        // Set blending mode (alpha, additive, multiplied)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // RLGL_H

部分文件因为文件数量过多而无法显示