Parcourir la source

GpuParamBlock is now initialized like any other CoreGpuObject
GpuParamBlock data is now more gracefully switched between render and other threads

Marko Pintera il y a 13 ans
Parent
commit
db092c1e82
25 fichiers modifiés avec 320 ajouts et 239 suppressions
  1. 17 12
      CamelotD3D11RenderSystem/Include/CmD3D11GpuParamBlock.h
  2. 1 1
      CamelotD3D11RenderSystem/Include/CmD3D11HardwareBufferManager.h
  3. 22 41
      CamelotD3D11RenderSystem/Source/CmD3D11GpuParamBlock.cpp
  4. 2 2
      CamelotD3D11RenderSystem/Source/CmD3D11HardwareBufferManager.cpp
  5. 3 4
      CamelotD3D11RenderSystem/Source/CmD3D11RenderSystem.cpp
  6. 1 1
      CamelotD3D9Renderer/Include/CmD3D9HardwareBufferManager.h
  7. 2 2
      CamelotD3D9Renderer/Source/CmD3D9HardwareBufferManager.cpp
  8. 6 4
      CamelotD3D9Renderer/Source/CmD3D9RenderSystem.cpp
  9. 15 1
      CamelotForwardRenderer/Source/CmForwardRenderer.cpp
  10. 18 13
      CamelotGLRenderer/Include/CmGLGpuParamBlock.h
  11. 1 1
      CamelotGLRenderer/Include/CmGLHardwareBufferManager.h
  12. 21 41
      CamelotGLRenderer/Source/CmGLGpuParamBlock.cpp
  13. 2 2
      CamelotGLRenderer/Source/CmGLHardwareBufferManager.cpp
  14. 6 5
      CamelotGLRenderer/Source/CmGLRenderSystem.cpp
  15. 17 4
      CamelotRenderer/Include/CmCoreGpuObject.h
  16. 66 15
      CamelotRenderer/Include/CmGpuParamBlock.h
  17. 1 2
      CamelotRenderer/Include/CmGpuParams.h
  18. 1 1
      CamelotRenderer/Include/CmHardwareBufferManager.h
  19. 12 8
      CamelotRenderer/Source/CmCoreGpuObject.cpp
  20. 1 2
      CamelotRenderer/Source/CmDeferredRenderContext.cpp
  21. 80 33
      CamelotRenderer/Source/CmGpuParamBlock.cpp
  22. 2 15
      CamelotRenderer/Source/CmGpuParams.cpp
  23. 5 1
      CamelotRenderer/Source/CmHardwareBufferManager.cpp
  24. 9 1
      CamelotRenderer/Source/CmMaterial.cpp
  25. 9 27
      CamelotRenderer/TODO.txt

+ 17 - 12
CamelotD3D11RenderSystem/Include/CmD3D11GpuParamBlock.h

@@ -5,24 +5,29 @@
 
 namespace CamelotEngine
 {
-	class CM_D3D11_EXPORT D3D11GpuParamBlock : public GpuParamBlock
+	class CM_D3D11_EXPORT D3D11GpuParamBlockBuffer : public GpuParamBlockBuffer
 	{
-	private:
-		struct D3D11GpuParamBlockSharedData
-		{
-			D3D11HardwareBuffer* mBuffer;
-		};
-
 	public:
-		D3D11GpuParamBlock(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage);
-		~D3D11GpuParamBlock();
+		D3D11GpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage);
+		~D3D11GpuParamBlockBuffer();
 
-		virtual void updateIfDirty();
-		virtual GpuParamBlockPtr clone() const;
+		/**
+		 * @copydoc CpuParamBlockBuffer::writeAll.
+		 */
+		void writeAll(const void* data);
 
 		ID3D11Buffer* getD3D11Buffer() const;
 
 	private:
-		D3D11GpuParamBlockSharedData* mD3D11SharedData;
+		D3D11HardwareBuffer* mBuffer;
+	};
+
+	class CM_D3D11_EXPORT D3D11GpuParamBlock : public GpuParamBlock
+	{
+	protected:
+		/**
+		 * @copydoc GpuParamBlock::createBuffer.
+		 */
+		GpuParamBlockBuffer* createBuffer() const;
 	};
 }

+ 1 - 1
CamelotD3D11RenderSystem/Include/CmD3D11HardwareBufferManager.h

@@ -25,7 +25,7 @@ namespace CamelotEngine
 		IndexBuffer* createIndexBufferImpl(IndexBuffer::IndexType itype, UINT32 numIndexes, GpuBufferUsage usage);
 
 		/** @copydoc HardwareBufferManager::createGpuParamBlock */
-		GpuParamBlock* createGpuParamBlockImpl(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage = GPBU_STATIC);
+		GpuParamBlock* createGpuParamBlockImpl();
 
 		/**
 		 * @copydoc HardwareBufferManager::createGenericBufferImpl

+ 22 - 41
CamelotD3D11RenderSystem/Source/CmD3D11GpuParamBlock.cpp

@@ -5,59 +5,40 @@
 
 namespace CamelotEngine
 {
-	D3D11GpuParamBlock::D3D11GpuParamBlock(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage)
-		:GpuParamBlock(desc, usage), mD3D11SharedData(nullptr)
+	D3D11GpuParamBlockBuffer::D3D11GpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage)
+		:GpuParamBlockBuffer(size, usage), mBuffer(nullptr)
 	{
-		mD3D11SharedData = new D3D11GpuParamBlockSharedData();
+		D3D11RenderSystem* d3d11rs = static_cast<D3D11RenderSystem*>(RenderSystem::instancePtr());
+		D3D11Device& device = d3d11rs->getPrimaryDevice();
+
+		if(mUsage == GPBU_STATIC)
+			mBuffer = new D3D11HardwareBuffer(D3D11HardwareBuffer::BT_CONSTANT, GBU_STATIC, 1, mSize, device);
+		else if(mUsage == GPBU_DYNAMIC)
+			mBuffer = new D3D11HardwareBuffer(D3D11HardwareBuffer::BT_CONSTANT, GBU_DYNAMIC, 1, mSize, device);
+		else
+			CM_EXCEPT(InternalErrorException, "Invalid gpu param block usage.");
 	}
 
-	D3D11GpuParamBlock::~D3D11GpuParamBlock()
+	D3D11GpuParamBlockBuffer::~D3D11GpuParamBlockBuffer()
 	{
-		if(mOwnsSharedData)
-		{
-			delete mD3D11SharedData->mBuffer;
-			delete mD3D11SharedData;
-		}
+		if(mBuffer != nullptr)
+			delete mBuffer;
 	}
 
-	void D3D11GpuParamBlock::updateIfDirty()
+	ID3D11Buffer* D3D11GpuParamBlockBuffer::getD3D11Buffer() const
 	{
-		if(!sharedData->mInitialized)
-		{
-			D3D11RenderSystem* d3d11rs = static_cast<D3D11RenderSystem*>(RenderSystem::instancePtr());
-			D3D11Device& device = d3d11rs->getPrimaryDevice();
-
-			if(mUsage == GPBU_STATIC)
-				mD3D11SharedData->mBuffer = new D3D11HardwareBuffer(D3D11HardwareBuffer::BT_CONSTANT, GBU_STATIC, 1, mSize, device);
-			else if(mUsage == GPBU_DYNAMIC)
-				mD3D11SharedData->mBuffer = new D3D11HardwareBuffer(D3D11HardwareBuffer::BT_CONSTANT, GBU_DYNAMIC, 1, mSize, device);
-			else
-				CM_EXCEPT(InternalErrorException, "Invalid gpu param block usage.");
-
-			sharedData->mInitialized = true;
-		}
-
-		if(sharedData->mDirty)
-		{
-			mD3D11SharedData->mBuffer->writeData(0, mSize, mData, true);
-		}
-
-		GpuParamBlock::updateIfDirty();
+		return mBuffer->getD3DBuffer();
 	}
 
-	GpuParamBlockPtr D3D11GpuParamBlock::clone() const
+	void D3D11GpuParamBlockBuffer::writeAll(const void* data)
 	{
-		std::shared_ptr<D3D11GpuParamBlock> clonedParamBlock(new D3D11GpuParamBlock(*this));
-		clonedParamBlock->mData = new UINT8[mSize];
-		clonedParamBlock->mOwnsSharedData = false;
-		clonedParamBlock->mD3D11SharedData = mD3D11SharedData;
-		memcpy(clonedParamBlock->mData, mData, mSize);
+		mBuffer->writeData(0, mSize, data, true);
 
-		return clonedParamBlock;
+		GpuParamBlockBuffer::writeAll(data);
 	}
 
-	ID3D11Buffer* D3D11GpuParamBlock::getD3D11Buffer() const 
-	{ 
-		return mD3D11SharedData->mBuffer->getD3DBuffer(); 
+	GpuParamBlockBuffer* D3D11GpuParamBlock::createBuffer() const
+	{
+		return new D3D11GpuParamBlockBuffer(mSize, mUsage);
 	}
 }

+ 2 - 2
CamelotD3D11RenderSystem/Source/CmD3D11HardwareBufferManager.cpp

@@ -28,9 +28,9 @@ namespace CamelotEngine
 		return new D3D11IndexBuffer(mDevice, this, itype, numIndexes, usage, false);
 	}
 
-	GpuParamBlock* D3D11HardwareBufferManager::createGpuParamBlockImpl(const GpuParamBlockDesc& blockDesc, GpuParamBlockUsage usage)
+	GpuParamBlock* D3D11HardwareBufferManager::createGpuParamBlockImpl()
 	{
-		return new D3D11GpuParamBlock(blockDesc, usage);
+		return new D3D11GpuParamBlock();
 	}
 
 	GpuBuffer* D3D11HardwareBufferManager::createGpuBufferImpl(UINT32 elementCount, UINT32 elementSize, 

+ 3 - 4
CamelotD3D11RenderSystem/Source/CmD3D11RenderSystem.cpp

@@ -440,8 +440,6 @@ namespace CamelotEngine
 	{
 		THROW_IF_NOT_RENDER_THREAD;
 
-		params->updateIfDirty();
-
 		const GpuParamDesc& paramDesc = params->getParamDesc();
 
 		for(auto iter = paramDesc.samplers.begin(); iter != paramDesc.samplers.end(); ++iter)
@@ -474,8 +472,9 @@ namespace CamelotEngine
 
 			if(currentBlock != nullptr)
 			{
-				D3D11GpuParamBlock* d3d11paramBlock = static_cast<D3D11GpuParamBlock*>(currentBlock.get());
-				bufferArray[0] = d3d11paramBlock->getD3D11Buffer();
+				const GpuParamBlockBuffer* currentBlockBuffer = currentBlock->getBindableBuffer();
+				const D3D11GpuParamBlockBuffer* d3d11paramBlockBuffer = static_cast<const D3D11GpuParamBlockBuffer*>(currentBlockBuffer);
+				bufferArray[0] = d3d11paramBlockBuffer->getD3D11Buffer();
 			}
 			else
 				bufferArray[0] = nullptr;

+ 1 - 1
CamelotD3D9Renderer/Include/CmD3D9HardwareBufferManager.h

@@ -55,7 +55,7 @@ namespace CamelotEngine {
 		IndexBuffer* createIndexBufferImpl(IndexBuffer::IndexType itype, UINT32 numIndexes, GpuBufferUsage usage);
 
 		/** @copydoc HardwareBufferManager::createGpuParamBlock */
-		GpuParamBlock* createGpuParamBlockImpl(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage = GPBU_STATIC);
+		GpuParamBlock* createGpuParamBlockImpl();
 
 		/**
 		 * @copydoc HardwareBufferManager::createGenericBufferImpl

+ 2 - 2
CamelotD3D9Renderer/Source/CmD3D9HardwareBufferManager.cpp

@@ -59,9 +59,9 @@ namespace CamelotEngine
             
     }
 	//-----------------------------------------------------------------------
-	GpuParamBlock* D3D9HardwareBufferManager::createGpuParamBlockImpl(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage)
+	GpuParamBlock* D3D9HardwareBufferManager::createGpuParamBlockImpl()
 	{
-		return new GpuParamBlock(paramDesc, usage);
+		return new GpuParamBlock();
 	}
 	//-----------------------------------------------------------------------
 	GpuBuffer* D3D9HardwareBufferManager::createGpuBufferImpl(UINT32 elementCount, UINT32 elementSize, 

+ 6 - 4
CamelotD3D9Renderer/Source/CmD3D9RenderSystem.cpp

@@ -330,8 +330,6 @@ namespace CamelotEngine
 	{
 		THROW_IF_NOT_RENDER_THREAD;
 
-		params->updateIfDirty();
-
 		const GpuParamDesc& paramDesc = params->getParamDesc();
 
 		for(auto iter = paramDesc.samplers.begin(); iter != paramDesc.samplers.end(); ++iter)
@@ -365,7 +363,9 @@ namespace CamelotEngine
 					const GpuParamDataDesc& paramDesc = iter->second;
 
 					GpuParamBlockPtr paramBlock = params->getParamBlock(paramDesc.paramBlockSlot);
-					const UINT8* ptrData = paramBlock->getDataPtr(paramDesc.cpuMemOffset * sizeof(UINT32));
+					const GpuParamBlockBuffer* currentBlockBuffer = paramBlock->getBindableBuffer();
+
+					const UINT8* ptrData = currentBlockBuffer->getDataPtr(paramDesc.cpuMemOffset * sizeof(UINT32));
 
 					switch(paramDesc.type)
 					{
@@ -417,7 +417,9 @@ namespace CamelotEngine
 					const GpuParamDataDesc& paramDesc = iter->second;
 
 					GpuParamBlockPtr paramBlock = params->getParamBlock(paramDesc.paramBlockSlot);
-					const UINT8* ptrData = paramBlock->getDataPtr(paramDesc.cpuMemOffset * sizeof(UINT32));
+					const GpuParamBlockBuffer* currentBlockBuffer = paramBlock->getBindableBuffer();
+
+					const UINT8* ptrData = currentBlockBuffer->getDataPtr(paramDesc.cpuMemOffset * sizeof(UINT32));
 
 					switch(paramDesc.type)
 					{

+ 15 - 1
CamelotForwardRenderer/Source/CmForwardRenderer.cpp

@@ -76,7 +76,9 @@ namespace CamelotEngine
 			for(UINT32 i = 0; i < material->getNumPasses(); i++)
 			{
 				setPass(material->getPass(i));
-				setPassParameters(material->getPassParameters(i));
+
+				PassParametersPtr paramsPtr = material->getPassParameters(i);
+				setPassParameters(paramsPtr);
 
 				renderContext->render(mesh->getRenderOperation());
 			}
@@ -177,5 +179,17 @@ namespace CamelotEngine
 		GpuProgramHandle geomProgram = mActivePass->getGeometryProgram();
 		if(geomProgram)
 			renderContext->bindGpuParams(GPT_GEOMETRY_PROGRAM, params->mGeomParams);
+
+		GpuProgramHandle hullProgram = mActivePass->getHullProgram();
+		if(hullProgram)
+			renderContext->bindGpuParams(GPT_HULL_PROGRAM, params->mHullParams);
+
+		GpuProgramHandle domainProgram = mActivePass->getDomainProgram();
+		if(domainProgram)
+			renderContext->bindGpuParams(GPT_DOMAIN_PROGRAM, params->mDomainParams);
+
+		GpuProgramHandle computeProgram = mActivePass->getComputeProgram();
+		if(computeProgram)
+			renderContext->bindGpuParams(GPT_COMPUTE_PROGRAM, params->mComputeParams);
 	}
 }

+ 18 - 13
CamelotGLRenderer/Include/CmGLGpuParamBlock.h

@@ -6,24 +6,29 @@
 
 namespace CamelotEngine
 {
-	class CM_RSGL_EXPORT GLGpuParamBlock : public GpuParamBlock
+	class CM_RSGL_EXPORT GLGpuParamBlockBuffer : public GpuParamBlockBuffer
 	{
-	private:
-		struct GLGpuParamBlockSharedData
-		{
-			GLuint mGLHandle;
-		};
-
 	public:
-		GLGpuParamBlock(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage);
-		~GLGpuParamBlock();
+		GLGpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage);
+		~GLGpuParamBlockBuffer();
 
-		virtual void updateIfDirty();
-		virtual GpuParamBlockPtr clone() const;
+		/**
+		 * @copydoc CpuParamBlockBuffer::writeAll.
+		 */
+		void writeAll(const void* data);
 
-		GLuint getGLHandle() const { return mGLSharedData->mGLHandle; }
+		GLuint getGLHandle() const { return mGLHandle; }
 
 	private:
-		GLGpuParamBlockSharedData* mGLSharedData;
+		GLuint mGLHandle;
+	};
+
+	class CM_RSGL_EXPORT GLGpuParamBlock : public GpuParamBlock
+	{
+	protected:
+		/**
+		 * @copydoc GpuParamBlock::createBuffer.
+		 */
+		GpuParamBlockBuffer* createBuffer() const;
 	};
 }

+ 1 - 1
CamelotGLRenderer/Include/CmGLHardwareBufferManager.h

@@ -86,7 +86,7 @@ namespace CamelotEngine {
             GpuBufferUsage usage);
 
 		/** @copydoc HardwareBufferManager::createGpuParamBlockImpl */
-		GpuParamBlock* createGpuParamBlockImpl(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage = GPBU_STATIC);
+		GpuParamBlock* createGpuParamBlockImpl();
 
 		/**
 		 * @copydoc HardwareBufferManager::createGenericBufferImpl

+ 21 - 41
CamelotGLRenderer/Source/CmGLGpuParamBlock.cpp

@@ -3,57 +3,37 @@
 
 namespace CamelotEngine
 {
-	GLGpuParamBlock::GLGpuParamBlock(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage)
-		:GpuParamBlock(desc, usage), mGLSharedData(nullptr)
+	GLGpuParamBlockBuffer::GLGpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage)
+		:GpuParamBlockBuffer(size, usage)
 	{
-		mGLSharedData = new GLGpuParamBlockSharedData();
+		glGenBuffers(1, &mGLHandle);
+		glBindBuffer(GL_UNIFORM_BUFFER, mGLHandle);
+		if(mUsage == GPBU_STATIC)
+			glBufferData(GL_UNIFORM_BUFFER, mSize, (GLvoid*)mData, GL_STATIC_DRAW);
+		else if(mUsage == GPBU_DYNAMIC)
+			glBufferData(GL_UNIFORM_BUFFER, mSize, (GLvoid*)mData, GL_DYNAMIC_DRAW);
+		else
+			CM_EXCEPT(InternalErrorException, "Invalid gpu param block usage.");
+
+		glBindBuffer(GL_UNIFORM_BUFFER, 0);
 	}
 
-	GLGpuParamBlock::~GLGpuParamBlock()
+	GLGpuParamBlockBuffer::~GLGpuParamBlockBuffer()
 	{
-		if(mOwnsSharedData)
-		{
-			glDeleteBuffers(1, &mGLSharedData->mGLHandle);
-			delete mGLSharedData;
-		}
+		glDeleteBuffers(1, &mGLHandle);
 	}
 
-	void GLGpuParamBlock::updateIfDirty()
+	void GLGpuParamBlockBuffer::writeAll(const void* data)
 	{
-		if(!sharedData->mInitialized)
-		{
-			glGenBuffers(1, &mGLSharedData->mGLHandle);
-			glBindBuffer(GL_UNIFORM_BUFFER, mGLSharedData->mGLHandle);
-			if(mUsage == GPBU_STATIC)
-				glBufferData(GL_UNIFORM_BUFFER, mSize, (GLvoid*)mData, GL_STATIC_DRAW);
-			else if(mUsage == GPBU_DYNAMIC)
-				glBufferData(GL_UNIFORM_BUFFER, mSize, (GLvoid*)mData, GL_DYNAMIC_DRAW);
-			else
-				CM_EXCEPT(InternalErrorException, "Invalid gpu param block usage.");
+		glBindBuffer(GL_UNIFORM_BUFFER, mGLHandle);
+		glBufferSubData(GL_UNIFORM_BUFFER, 0 , mSize, data);
+		glBindBuffer(GL_UNIFORM_BUFFER, 0);
 
-			glBindBuffer(GL_UNIFORM_BUFFER, 0);
-
-			sharedData->mInitialized = true;
-		}
-
-		if(sharedData->mDirty)
-		{
-			glBindBuffer(GL_UNIFORM_BUFFER, mGLSharedData->mGLHandle);
-			glBufferSubData(GL_UNIFORM_BUFFER, 0 , mSize, mData);
-			glBindBuffer(GL_UNIFORM_BUFFER, 0);
-		}
-
-		GpuParamBlock::updateIfDirty();
+		GpuParamBlockBuffer::writeAll(data);
 	}
 
-	GpuParamBlockPtr GLGpuParamBlock::clone() const
+	GpuParamBlockBuffer* GLGpuParamBlock::createBuffer() const
 	{
-		std::shared_ptr<GLGpuParamBlock> clonedParamBlock(new GLGpuParamBlock(*this));
-		clonedParamBlock->mData = new UINT8[mSize];
-		clonedParamBlock->mOwnsSharedData = false;
-		clonedParamBlock->mGLSharedData = mGLSharedData;
-		memcpy(clonedParamBlock->mData, mData, mSize);
-
-		return clonedParamBlock;
+		return new GLGpuParamBlockBuffer(mSize, mUsage);
 	}
 }

+ 2 - 2
CamelotGLRenderer/Source/CmGLHardwareBufferManager.cpp

@@ -94,9 +94,9 @@ namespace CamelotEngine {
 		return new GLIndexBuffer(this, itype, numIndexes, usage);
     }
 	//---------------------------------------------------------------------
-	GpuParamBlock* GLHardwareBufferManager::createGpuParamBlockImpl(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage)
+	GpuParamBlock* GLHardwareBufferManager::createGpuParamBlockImpl()
 	{
-		return new GLGpuParamBlock(paramDesc, usage);
+		return new GLGpuParamBlock();
 	}
 	//---------------------------------------------------------------------
 	GpuBuffer* GLHardwareBufferManager::createGpuBufferImpl(UINT32 elementCount, UINT32 elementSize, 

+ 6 - 5
CamelotGLRenderer/Source/CmGLRenderSystem.cpp

@@ -281,8 +281,6 @@ namespace CamelotEngine
 	{
 		THROW_IF_NOT_RENDER_THREAD;
 
-		params->updateIfDirty();
-
 		const GpuParamDesc& paramDesc = params->getParamDesc();
 		GLSLGpuProgram* activeProgram = getActiveProgram(gptype);
 		GLuint glProgram = activeProgram->getGLSLProgram()->getGLHandle();
@@ -323,10 +321,12 @@ namespace CamelotEngine
 				continue;
 
 			GLGpuParamBlockPtr glParamBlock = std::static_pointer_cast<GLGpuParamBlock>(paramBlock);
+			const GpuParamBlockBuffer* paramBlockBuffer = glParamBlock->getBindableBuffer();
+			const GLGpuParamBlockBuffer* glParamBlockBuffer = static_cast<const GLGpuParamBlockBuffer*>(paramBlockBuffer);
 
 			UINT32 globalBlockBinding = getGLUniformBlockBinding(gptype, blockBinding);
 			glUniformBlockBinding(glProgram, iter->second.slot - 1, globalBlockBinding);
-			glBindBufferRange(GL_UNIFORM_BUFFER, globalBlockBinding, glParamBlock->getGLHandle(), 0, glParamBlock->getSize());
+			glBindBufferRange(GL_UNIFORM_BUFFER, globalBlockBinding, glParamBlockBuffer->getGLHandle(), 0, glParamBlockBuffer->getSize());
 
 			blockBinding++;
 		}
@@ -336,11 +336,12 @@ namespace CamelotEngine
 			const GpuParamDataDesc& paramDesc = iter->second;
 
 			GpuParamBlockPtr paramBlock = params->getParamBlock(paramDesc.paramBlockSlot);
-			
+			const GpuParamBlockBuffer* paramBlockBuffer = paramBlock->getBindableBuffer();
+
 			if(paramDesc.paramBlockSlot != 0) // 0 means uniforms are not in a block
 				continue;
 
-			const UINT8* ptrData = paramBlock->getDataPtr(paramDesc.cpuMemOffset * sizeof(UINT32));
+			const UINT8* ptrData = paramBlockBuffer->getDataPtr(paramDesc.cpuMemOffset * sizeof(UINT32));
 
 			switch(paramDesc.type)
 			{

+ 17 - 4
CamelotRenderer/Include/CmCoreGpuObject.h

@@ -88,17 +88,30 @@ o		 *
 		 */
 		virtual void initialize_internal();
 
+		/**
+		 * @brief	Returns a shared_ptr version of "this" pointer.
+		 */
+		std::shared_ptr<CoreGpuObject> getThisPtr() const { return mThis.lock(); }
+
 		/**
 		 * @brief	Queues a command to be executed on the render thread, without a return value.
+		 * 			
+		 * @note	Requires a shared pointer to the object this function will be executed on, in order to 
+		 * 			make sure the object is not deleted before the command executes. Can be null if the 
+		 * 			function is static or global.
 		 */
-		static void queueGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func);
+		static void queueGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void()> func);
 
 		/**
 		 * @brief	Queues a command to be executed on the render thread, with a return value in the form of AsyncOp.
 		 * 			
 		 * @see		AsyncOp
+		 * 			
+		 * @note	Requires a shared pointer to the object this function will be executed on, in order to
+		 * 			make sure the object is not deleted before the command executes. Can be null if the
+		 * 			function is static or global.
 		 */
-		static AsyncOp queueReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func);
+		static AsyncOp queueReturnGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void(AsyncOp&)> func);
 
 		/**
 		 * @brief	Returns an unique identifier for this object.
@@ -121,7 +134,7 @@ o		 *
 		CM_STATIC_THREAD_SYNCHRONISER(mCoreGpuObjectLoadedCondition)
 		CM_STATIC_MUTEX(mCoreGpuObjectLoadedMutex)
 
-		static void executeGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func);
-		static void executeReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func, AsyncOp& op); 
+		static void executeGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void()> func);
+		static void executeReturnGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void(AsyncOp&)> func, AsyncOp& op); 
 	};
 }

+ 66 - 15
CamelotRenderer/Include/CmGpuParamBlock.h

@@ -2,38 +2,89 @@
 
 #include "CmPrerequisites.h"
 #include "CmCommonEnums.h"
+#include "CmCoreGpuObject.h"
 
 namespace CamelotEngine
 {
-	class CM_EXPORT GpuParamBlock
+	/**
+	 * @brief	Represents an actual GPU buffer. 
+	 * 			Should only be accessed directly from render thread.
+	 */
+	class CM_EXPORT GpuParamBlockBuffer
 	{
-	private:
-		struct GpuParamBlockSharedData
-		{
-			bool mDirty;
-			bool mInitialized;
-		};
+	public:
+		GpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage);
+		virtual ~GpuParamBlockBuffer();
+
+		/**
+		 * @brief	Writes all of the data to the buffer.
+		 * 			Data size must be the same size as the buffer;
+		 */
+		virtual void writeAll(const void* data);
+
+		const UINT8* getDataPtr(UINT32 offset) const;
+		UINT32 getSize() const { return mSize; }
+
+	protected:
+		GpuParamBlockUsage mUsage;
+		UINT8* mData;
+		UINT32 mSize;
+	};
 
+	class CM_EXPORT GpuParamBlock : public CoreGpuObject
+	{
 	public:
-		GpuParamBlock(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage);
+		GpuParamBlock();
 		virtual ~GpuParamBlock();
 
+		void initialize(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage);
+
 		void write(UINT32 offset, const void* data, UINT32 size);
 		void zeroOut(UINT32 offset, UINT32 size);
 
-		const UINT8* getDataPtr(UINT32 offset) const;
-		UINT32 getSize() const { return mSize; }
+		/**
+		 * @brief	Returns a buffer that may be bound as a material parameter.
+		 */
+		const GpuParamBlockBuffer* getBindableBuffer() const { return mBuffer; }
 
-		virtual void updateIfDirty();
+		/**
+		 * @brief	Updates the internal buffer. You must call this if you want the buffer to be
+		 * 			up to date after making changes to it. Buffer won't be touched if there were no changes
+		 * 			so feel free to call this often.
+		 * 			
+		 * @note	This will only queue the buffer update on the render thread.
+		 */
+		void updateBuffer();
 
-		virtual GpuParamBlockPtr clone() const;
-		
 		static GpuParamBlockPtr create(const GpuParamBlockDesc& desc);
 	protected:
-		GpuParamBlockSharedData* sharedData;
+		friend class HardwareBufferManager;
+
+		GpuParamBlockBuffer* mBuffer;
 		GpuParamBlockUsage mUsage;
-		bool mOwnsSharedData;
+
 		UINT8* mData;
 		UINT32 mSize;
+		bool mDirty;
+
+		/**
+		 * @copydoc CoreGpuObject::initialize_internal.
+		 */
+		virtual void initialize_internal();
+
+		/**
+		 * @copydoc CoreGpuObject::destroy_internal.
+		 */
+		virtual void destroy_internal();
+
+		/**
+		 * @brief	Creates new GPU parameter buffer. Derived classes should
+		 * 			return their own specific buffer implementations.
+		 * 			
+		 * @note	Should only be called from the render thread.
+		 */
+		virtual GpuParamBlockBuffer* createBuffer() const;
+
+		void updateBuffer_internal(UINT8* data);
 	};
 }

+ 1 - 2
CamelotRenderer/Include/CmGpuParams.h

@@ -51,8 +51,7 @@ namespace CamelotEngine
 
 		void setTransposeMatrices(bool transpose) { mTransposeMatrices = transpose; }
 
-		GpuParamsPtr clone() const;
-		void updateIfDirty();
+		void updateParamBuffers();
 
 	private:
 		GpuParamDesc& mParamDesc;

+ 1 - 1
CamelotRenderer/Include/CmHardwareBufferManager.h

@@ -119,7 +119,7 @@ namespace CamelotEngine {
 		virtual VertexDeclaration* createVertexDeclarationImpl();
 		virtual VertexBuffer* createVertexBufferImpl(UINT32 vertexSize, UINT32 numVerts, GpuBufferUsage usage, bool streamOut = false) = 0;
 		virtual IndexBuffer* createIndexBufferImpl(IndexBuffer::IndexType itype, UINT32 numIndexes, GpuBufferUsage usage) = 0;
-		virtual GpuParamBlock* createGpuParamBlockImpl(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage = GPBU_STATIC) = 0;
+		virtual GpuParamBlock* createGpuParamBlockImpl() = 0;
 		virtual GpuBuffer* createGpuBufferImpl(UINT32 elementCount, UINT32 elementSize, GpuBufferType type, GpuBufferUsage usage, 
 			bool randomGpuWrite = false, bool useCounter = false) = 0;
 	};

+ 12 - 8
CamelotRenderer/Source/CmCoreGpuObject.cpp

@@ -39,7 +39,7 @@ namespace CamelotEngine
 		setScheduledToBeDeleted(true);
 		CoreGpuObjectManager::instance().registerObjectToDestroy(mThis.lock());
 
-		queueGpuCommand(mThis.lock(), &CoreGpuObject::destroy_internal);
+		queueGpuCommand(mThis.lock(), boost::bind(&CoreGpuObject::destroy_internal, this));
 	}
 
 	void CoreGpuObject::destroy_internal()
@@ -66,7 +66,7 @@ namespace CamelotEngine
 
 		setScheduledToBeInitialized(true);
 
-		queueGpuCommand(mThis.lock(), &CoreGpuObject::initialize_internal);
+		queueGpuCommand(mThis.lock(), boost::bind(&CoreGpuObject::initialize_internal, this));
 	}
 
 	void CoreGpuObject::initialize_internal()
@@ -139,7 +139,7 @@ namespace CamelotEngine
 		}
 	}
 
-	void CoreGpuObject::queueGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func)
+	void CoreGpuObject::queueGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void()> func)
 	{
 		// We call another internal method and go through an additional layer of abstraction in order to keep an active
 		// reference to the obj (saved in the bound function).
@@ -148,19 +148,23 @@ namespace CamelotEngine
 		RenderSystem::instancePtr()->queueCommand(boost::bind(&CoreGpuObject::executeGpuCommand, obj, func));
 	}
 
-	AsyncOp CoreGpuObject::queueReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func)
+	AsyncOp CoreGpuObject::queueReturnGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void(AsyncOp&)> func)
 	{
 		// See queueGpuCommand
 		return RenderSystem::instancePtr()->queueReturnCommand(boost::bind(&CoreGpuObject::executeReturnGpuCommand, obj, func, _1));
 	}
 
-	void CoreGpuObject::executeGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func)
+	void CoreGpuObject::executeGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void()> func)
 	{
-		func(obj.get());
+		volatile std::shared_ptr<CoreGpuObject> objParam = obj; // Makes sure obj isn't optimized out?
+
+		func();
 	}
 
-	void CoreGpuObject::executeReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func, AsyncOp& op)
+	void CoreGpuObject::executeReturnGpuCommand(std::shared_ptr<CoreGpuObject>& obj, boost::function<void(AsyncOp&)> func, AsyncOp& op)
 	{
-		func(obj.get(), op);
+		volatile std::shared_ptr<CoreGpuObject> objParam = obj; // Makes sure obj isn't optimized out?
+
+		func(op);
 	}
 }

+ 1 - 2
CamelotRenderer/Source/CmDeferredRenderContext.cpp

@@ -129,8 +129,7 @@ namespace CamelotEngine
 
 	void DeferredRenderContext::bindGpuParams(GpuProgramType gptype, GpuParamsPtr params)
 	{
-		GpuParamsPtr paramCopy = params->clone();
-		mCommandQueue->queue(boost::bind(&RenderSystem::bindGpuParams, mRenderSystem, gptype, paramCopy));
+		mCommandQueue->queue(boost::bind(&RenderSystem::bindGpuParams, mRenderSystem, gptype, params));
 	}
 
 	void DeferredRenderContext::setRenderTarget(RenderTarget* target)

+ 80 - 33
CamelotRenderer/Source/CmGpuParamBlock.cpp

@@ -5,42 +5,80 @@
 
 namespace CamelotEngine
 {
-	GpuParamBlock::GpuParamBlock(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage)
-		:mSize(desc.blockSize * sizeof(UINT32)), mOwnsSharedData(true), mUsage(usage)
+	GpuParamBlockBuffer::GpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage)
+		:mData(new UINT8[size]), mSize(size), mUsage(usage)
 	{
-		mData = new UINT8[mSize];
 		memset(mData, 0, mSize);
-
-		sharedData = new GpuParamBlockSharedData();
-		sharedData->mDirty = true;
-		sharedData->mInitialized = false;
 	}
 
-	GpuParamBlock::~GpuParamBlock()
+	GpuParamBlockBuffer::~GpuParamBlockBuffer()
 	{
-		delete [] mData;
+		if(mData != nullptr)
+			delete[] mData;
+	}
 
-		if(mOwnsSharedData)
-			delete sharedData;
+	void GpuParamBlockBuffer::writeAll(const void* data)
+	{
+		memcpy(mData, data, mSize);
 	}
 
-	void GpuParamBlock::write(UINT32 offset, const void* data, UINT32 size)
+	const UINT8* GpuParamBlockBuffer::getDataPtr(UINT32 offset) const
 	{
 #if CM_DEBUG_MODE
-		if(offset < 0 || (offset + size) > mSize)
+		if(offset < 0 || offset >= mSize)
 		{
 			CM_EXCEPT(InvalidParametersException, "Wanted range is out of buffer bounds. " \
 				"Available range: 0 .. " + toString(mSize) + ". " \
-				"Wanted range: " + toString(offset) + " .. " + toString(offset + size) + ".");
+				"Wanted range: " + toString(offset) + " .. " + toString(offset) + ".");
 		}
 #endif
 
-		memcpy(mData + offset, data, size);
+		return &mData[offset];
+	}
 
-		sharedData->mDirty = true;
+	GpuParamBlock::GpuParamBlock()
+		:mDirty(true), mUsage(GPBU_DYNAMIC), mData(nullptr), mBuffer(nullptr)
+	{
 	}
 
-	void GpuParamBlock::zeroOut(UINT32 offset, UINT32 size)
+	GpuParamBlock::~GpuParamBlock()
+	{
+		if(mData != nullptr)
+			delete [] mData;
+	}
+
+	void GpuParamBlock::initialize(const GpuParamBlockDesc& desc, GpuParamBlockUsage usage)
+	{
+		mSize = desc.blockSize * sizeof(UINT32);
+		mData = new UINT8[mSize];
+		memset(mData, 0, mSize);
+
+		mUsage = usage;
+
+		CoreGpuObject::initialize();
+	}
+
+	void GpuParamBlock::initialize_internal()
+	{
+		mBuffer = createBuffer();
+
+		CoreGpuObject::initialize_internal();
+	}
+
+	void GpuParamBlock::destroy_internal()
+	{
+		if(mBuffer != nullptr)
+			delete mBuffer;
+
+		CoreGpuObject::destroy_internal();
+	}
+
+	GpuParamBlockBuffer* GpuParamBlock::createBuffer() const
+	{
+		return new GpuParamBlockBuffer(mSize, mUsage);
+	}
+
+	void GpuParamBlock::write(UINT32 offset, const void* data, UINT32 size)
 	{
 #if CM_DEBUG_MODE
 		if(offset < 0 || (offset + size) > mSize)
@@ -51,41 +89,50 @@ namespace CamelotEngine
 		}
 #endif
 
-		memset(mData + offset, 0, size);
+		memcpy(mData + offset, data, size);
 
-		sharedData->mDirty = true;
+		mDirty = true;
 	}
 
-	const UINT8* GpuParamBlock::getDataPtr(UINT32 offset) const
+	void GpuParamBlock::zeroOut(UINT32 offset, UINT32 size)
 	{
 #if CM_DEBUG_MODE
-		if(offset < 0 || offset >= mSize)
+		if(offset < 0 || (offset + size) > mSize)
 		{
 			CM_EXCEPT(InvalidParametersException, "Wanted range is out of buffer bounds. " \
 				"Available range: 0 .. " + toString(mSize) + ". " \
-				"Wanted range: " + toString(offset) + " .. " + toString(offset) + ".");
+				"Wanted range: " + toString(offset) + " .. " + toString(offset + size) + ".");
 		}
 #endif
 
-		return &mData[offset];
+		memset(mData + offset, 0, size);
+
+		mDirty = true;
 	}
 
-	void GpuParamBlock::updateIfDirty()
+	void GpuParamBlock::updateBuffer()
 	{
-		sharedData->mDirty = false;
+		if(mDirty)
+		{
+			mDirty = false;
 
-		// Do nothing
+			// Need to copy the data, as non-render threads might modify
+			// the data before render thread has a chance to process it
+			// TODO - Use an allocator
+			UINT8* dataCopy = new UINT8[mSize];
+			memcpy(dataCopy, mData, mSize);
+
+			queueGpuCommand(getThisPtr(), boost::bind(&GpuParamBlock::updateBuffer_internal, this, dataCopy));
+		}
 	}
 
-	GpuParamBlockPtr GpuParamBlock::clone() const
+	void GpuParamBlock::updateBuffer_internal(UINT8* data)
 	{
-		GpuParamBlockPtr clonedParamBlock(new GpuParamBlock(*this));
-		clonedParamBlock->mData = new UINT8[mSize];
-		clonedParamBlock->mSize = mSize;
-		clonedParamBlock->mOwnsSharedData = false;
-		memcpy(clonedParamBlock->mData, mData, mSize);
+		assert(mBuffer != nullptr);
+
+		mBuffer->writeAll(data);
 
-		return clonedParamBlock;
+		delete[] data;
 	}
 
 	GpuParamBlockPtr GpuParamBlock::create(const GpuParamBlockDesc& desc)

+ 2 - 15
CamelotRenderer/Source/CmGpuParams.cpp

@@ -266,25 +266,12 @@ namespace CamelotEngine
 		return mSamplerStates[slot];
 	}
 
-	GpuParamsPtr GpuParams::clone() const
+	void GpuParams::updateParamBuffers()
 	{
-		GpuParamsPtr clonedParams(new GpuParams(*this));
-		
 		for(size_t i = 0; i < mParamBlocks.size(); i++)
 		{
 			if(mParamBlocks[i] != nullptr)
-				clonedParams->mParamBlocks[i] = mParamBlocks[i]->clone();
-		}
-
-		return clonedParams;
-	}
-
-	void GpuParams::updateIfDirty()
-	{
-		for(size_t i = 0; i < mParamBlocks.size(); i++)
-		{
-			if(mParamBlocks[i] != nullptr)
-				mParamBlocks[i]->updateIfDirty();
+				mParamBlocks[i]->updateBuffer();
 		}
 	}
 

+ 5 - 1
CamelotRenderer/Source/CmHardwareBufferManager.cpp

@@ -73,7 +73,11 @@ namespace CamelotEngine {
 	//-----------------------------------------------------------------------
 	GpuParamBlockPtr HardwareBufferManager::createGpuParamBlock(const GpuParamBlockDesc& paramDesc, GpuParamBlockUsage usage)
 	{
-		return GpuParamBlockPtr(createGpuParamBlockImpl(paramDesc, usage));
+		GpuParamBlockPtr paramBlockPtr(createGpuParamBlockImpl(), &CoreGpuObject::_deleteDelayed);
+		paramBlockPtr->setThisPtr(paramBlockPtr);
+		paramBlockPtr->initialize(paramDesc, usage);
+
+		return paramBlockPtr;
 	}
 	//-----------------------------------------------------------------------
 	GpuBufferPtr HardwareBufferManager::createGpuBuffer(UINT32 elementCount, UINT32 elementSize, 

+ 9 - 1
CamelotRenderer/Source/CmMaterial.cpp

@@ -735,7 +735,15 @@ namespace CamelotEngine
 		if(passIdx < 0 || passIdx >= mParametersPerPass.size())
 			CM_EXCEPT(InvalidParametersException, "Invalid pass index.");
 
-		return mParametersPerPass[passIdx];
+		PassParametersPtr params = mParametersPerPass[passIdx];
+
+		for(UINT32 i = 0; i < params->getNumParams(); i++)
+		{
+			if(params->getParamByIdx(i) != nullptr)
+				params->getParamByIdx(i)->updateParamBuffers();
+		}
+
+		return params;
 	}
 
 	TextureHandle Material::getTexture(const String& name) const

+ 9 - 27
CamelotRenderer/TODO.txt

@@ -2,9 +2,6 @@
 -----------------------LONGTERM TODO----------------------------------------------------------------
  - Debug tools 
    - Camera controls + world grid
- - Renderable (contains mesh/material)
- - RenderManager (iterated through Renderables and displays them on screen)
-   - And its plugin implementation ForwardRenderingManager
  - Integrate with Camelot Editor
  - SceneManager plugin
    - Frustum culling and octree (or some other) acceleration structure
@@ -17,9 +14,6 @@ Pass
  - A way to bind buffers to a Pass, while specifying buffer range
  - GpuParams support for bools, buffers, structs
 
-GpuParamBlock - Needs to derive from CoreGpuObject and needs to be initialized better
- - HardwareBufferManager::createGpuParamBlock needs &CoreGpuObject::_deleteDelayed added as a deleted to shared_ptr
-
 Support loading of compound objects:
    - Loading Material also loads attached Shader and Textures/Samplers
 
@@ -32,6 +26,7 @@ Make sure we can add an include file to a HighLevelGpuProgram, and make sure it
 
 Go through RenderSystem classes and make sure we don't hold any raw pointer references.
 Seems there is a possible deadlock when starting the render thread, while waiting for the thread to be started
+Get rid of resource handlers in Resources?
 
 Can be delayed:
  Make sure that I am able to blit contents from render textures on all render systems
@@ -77,14 +72,7 @@ So final solution:
     - Not ALL methods need to be moved, only those that are resource heavy
     - Smaller methods may remain and always stay async, but keep internal state?
  - Resource creation on DX11 should be direct though, without a queue (especially if we manage to populate a resource in the same step)
- - (Make sure to update GpuParamBlock implementations as they do their copying internally)
-
-
-<<<<Issue of setting individual parameters on a material>>>
- - Material can contain multiple techniques
- - How do I ensure parameters are valid for all techniques?
- - Require user to create a Material parameter specification?
- - Also need a way to avoid displaying internal parameters used by the active renderer
+ - Remove & replace internal data copying in GpuParamBlock (or just use a allocator instead of new())
 
 <<<<RenderSystem needed modifications>>>>
   - Texture resource views (Specifying just a subresource of a texture as a shader parameter)
@@ -128,22 +116,17 @@ Editor IMPORTANT:
 -----------------------BACKLOG TODO---------------------------------------------------------------
 
 HIGH PRIORITY TODO:
- - Issue with deserialization and value types:
-  - Value types are only set at the end of deserialization, because I want to be sure all of their fields are initialized. However there is nothing stopping a custom RTTI method from accessing a (yet uninitialized) value in a ptr field. (See CmMaterialRTTI, setTexParam). I need to initialize fields in a better order.
-   - Solution?: Set (empty) ptr values immediately, and only load them later
  - GetRenderOperation doesn't consider sub-meshes
 
 Mid priority TODO:
  - monitorIndex is ignored in DX11
- - Make sure to queue up mesh deletion to make sure it gets destroyed on the render thread
  - Add a field that tracks % of resource deserialization in BinarySerializer
  - Mesh loading:
   - Example Freefall mesh has one index per vertex, and there are 17k+ vertices. I think I need a post-process step that optimizes them.
   - Imported FBX meshes are too big
-  - Search for all remaining "TODO PORT" comments and fix them
-  - Ogre performed special DDS loading. I removed that. I'm not sure if I'll need to re-add it?
-  - My log is not thread safe yet it is being called from multiple threads.
-  - Handling of shader array parameters? This needs testing
+ - Ogre performed special DDS loading. I removed that. I'm not sure if I'll need to re-add it?
+ - My log is not thread safe yet it is being called from multiple threads.
+ - Handling of shader array parameters? This needs testing
    - I'm currently ignoring array elements in GL due to the name their names are handled
  - RTTI:
      When defining RTTIType like so: 
@@ -151,12 +134,8 @@ Mid priority TODO:
      I need to make sure that HighLevelGpuProgram class has valid RTTI type as well. Otherwise the inheritance hierarchy will not be correct. Right now this isn't checked anywhere.
 
 Low priority TODO:
- - Can I assign a RenderTexture to a Pass?
  - Mesh loading:
   - Sub-meshes aren't being transformed by world matrices of their nodes
- - Are resource getting properly unloaded? e.g. when shared_ptr destroys a texture is it removed from gpu?
-  - Make sure resources only get properly unloaded at end of every frame. This is because a lot of RenderSystem stuff holds raw ptrs
-	to resources, and it keeps them throughout the frame (especially with deferred rendering). Plus this can only be done on the render thread anyway.
  - Remove template from RTTIType and move it to IReflectable? This way i can hopefully move GetRTTITypeStatic and GetRTTIType to IReflectable so I don't
    need to manually implement those for every method.
  - Viewport needs to be updated when I call RenderTarget::setFullscreen/finishSwitchingFullscreen/updateWindowRect/windowMovedOrResized. Currently it's not
@@ -167,7 +146,6 @@ Low priority TODO:
    if pointer is saved/loaded as a plain field. I need to add a check that ensures the type is POD. 
    See: http://www.boost.org/doc/libs/1_51_0/boost/mpi/datatype.hpp for a possible implementation of a compile time check.
  - Fix up WorkQueue as it doesn't lock when initializing, to make sure threads are actually started before returning
- - DepthStencilBuffer & Texture should possibly share the same interface instead of being two separate classes? I'll need to assign DepthStencil to shaders sometimes. Not possible in DX9 but possible in DX11.
  - CPU reading or writing to a render texture in OpenGL is not supported. (CmGLHardwarePixelBuffer::upload/download). 
  - When saving a resource, make sure resource is properly loaded before saving
    - Add doc to Resources::save that says it will block until render thread updates the resource
@@ -185,6 +163,10 @@ Optional TODO:
  - Extend texture copy so it accepts different subregions & subresources (currently only entire resource can be copied)
  - Need a way to convert MSAA render texture into a normal render texture
  - Vertex buffer start offset is not supported when calling Draw methods
+ - Issue with deserialization and value types:
+  - Value types are only set at the end of deserialization, because I want to be sure all of their fields are initialized. 
+     However there is nothing stopping a custom RTTI method from accessing a (yet uninitialized) value in a ptr field. 
+	 (See CmMaterialRTTI, setTexParam). I need to initialize fields in a better order.)
 
  -----------------------------------------------------------------------------------------------