Browse Source

Fixed a crash where object was deleted while still in command queue
Added a check to make sure object is scheduled to be initialized when waitUntilInitialized is called

Marko Pintera 13 years ago
parent
commit
079c1923cf

+ 11 - 0
CamelotClient/CamelotClient.cpp

@@ -25,6 +25,11 @@
 
 
 using namespace CamelotEngine;
 using namespace CamelotEngine;
 
 
+void doNothing()
+{
+
+}
+
 int CALLBACK WinMain(
 int CALLBACK WinMain(
 	_In_  HINSTANCE hInstance,
 	_In_  HINSTANCE hInstance,
 	_In_  HINSTANCE hPrevInstance,
 	_In_  HINSTANCE hPrevInstance,
@@ -191,6 +196,8 @@ int CALLBACK WinMain(
 
 
 	testMaterial->setMat4("matViewProjection", Matrix4::IDENTITY);
 	testMaterial->setMat4("matViewProjection", Matrix4::IDENTITY);
 
 
+	RenderSystem::instance().queueCommand(&doNothing, true);
+
 	MaterialHandle testMaterialRef = gResources().create(testMaterial, "C:\\testMaterial.mat", true);
 	MaterialHandle testMaterialRef = gResources().create(testMaterial, "C:\\testMaterial.mat", true);
 	//testMaterialRef = gResources().load("C:\\testMaterial.mat");
 	//testMaterialRef = gResources().load("C:\\testMaterial.mat");
 	//testMaterialRef.waitUntilLoaded();
 	//testMaterialRef.waitUntilLoaded();
@@ -208,7 +215,11 @@ int CALLBACK WinMain(
 	gResources().unload(dbgMeshRef);
 	gResources().unload(dbgMeshRef);
 
 
 	testTexRef = static_resource_cast<Texture>(gResources().load("C:\\ExportTest.tex"));
 	testTexRef = static_resource_cast<Texture>(gResources().load("C:\\ExportTest.tex"));
+	RenderSystem::instance().queueCommand(&doNothing, true);
+
 	dbgMeshRef = static_resource_cast<Mesh>(gResources().load("C:\\ExportMesh.mesh"));
 	dbgMeshRef = static_resource_cast<Mesh>(gResources().load("C:\\ExportMesh.mesh"));
+	RenderSystem::instance().queueCommand(&doNothing, true);
+
 
 
 	testMaterial->setTexture("tex", testTexRef);
 	testMaterial->setTexture("tex", testTexRef);
 	//gResources().create(testMaterial, "C:\\ExportMaterial.mat", true);
 	//gResources().create(testMaterial, "C:\\ExportMaterial.mat", true);

+ 4 - 4
CamelotRenderer/Include/CmCommandQueue.h

@@ -87,7 +87,7 @@ namespace CamelotEngine
 		 * @brief	Returns a copy of all queued commands and makes room for new ones. Must be called from the thread
 		 * @brief	Returns a copy of all queued commands and makes room for new ones. Must be called from the thread
 		 * 			that created the command queue. Returned commands MUST be passed to playbackCommands.
 		 * 			that created the command queue. Returned commands MUST be passed to playbackCommands.
 		 */
 		 */
-		vector<Command>::type* flush();
+		std::queue<Command>* flush();
 
 
 		/**
 		/**
 		 * @brief	Plays all provided commands. Should only be called from the render thread. To get the
 		 * @brief	Plays all provided commands. Should only be called from the render thread. To get the
@@ -96,13 +96,13 @@ namespace CamelotEngine
 		 * @param	notifyCallback  	Callback that will be called if a command that has "notifyOnComplete" flag set.
 		 * @param	notifyCallback  	Callback that will be called if a command that has "notifyOnComplete" flag set.
 		 * 								The callback will receive "callbackId" of the command.
 		 * 								The callback will receive "callbackId" of the command.
 		 */
 		 */
-		void playback(vector<Command>::type* commands, boost::function<void(UINT32)> notifyCallback);
+		void playback(std::queue<Command>* commands, boost::function<void(UINT32)> notifyCallback);
 
 
 		/**
 		/**
 		 * @brief	Plays all provided commands. Should only be called from the render thread. To get
 		 * @brief	Plays all provided commands. Should only be called from the render thread. To get
 		 * 			the commands call flushCommands().
 		 * 			the commands call flushCommands().
 		 */
 		 */
-		void playback(vector<Command>::type* commands);
+		void playback(std::queue<Command>* commands);
 
 
 		/**
 		/**
 		 * @brief	Returns true if no commands are queued.
 		 * @brief	Returns true if no commands are queued.
@@ -110,7 +110,7 @@ namespace CamelotEngine
 		bool isEmpty();
 		bool isEmpty();
 
 
 	private:
 	private:
-		vector<Command>::type* mCommands;
+		std::queue<Command>* mCommands;
 
 
 		bool mAllowAllThreads;
 		bool mAllowAllThreads;
 
 

+ 23 - 3
CamelotRenderer/Include/CmCoreGpuObject.h

@@ -1,6 +1,8 @@
 #pragma once
 #pragma once
 
 
 #include "CmPrerequisites.h"
 #include "CmPrerequisites.h"
+#include "CmAsyncOp.h"
+#include "boost/function.hpp"
 
 
 namespace CamelotEngine
 namespace CamelotEngine
 {
 {
@@ -18,7 +20,8 @@ namespace CamelotEngine
 		enum Flags
 		enum Flags
 		{
 		{
 			CGO_INITIALIZED = 0x01,
 			CGO_INITIALIZED = 0x01,
-			CGO_SCHEDULED_FOR_DELETE = 0x02
+			CGO_SCHEDULED_FOR_INIT = 0x02,
+			CGO_SCHEDULED_FOR_DELETE = 0x04
 		};
 		};
 
 
 	public:
 	public:
@@ -26,7 +29,7 @@ namespace CamelotEngine
 		virtual ~CoreGpuObject();
 		virtual ~CoreGpuObject();
 
 
 		/**
 		/**
-		 * @brief	Destroys this object. Make sure to call this before deleting the object.
+		 * @brief	Destroys all GPU resources of this object.
 o		 * 			
 o		 * 			
 		 * @note	Destruction is not done immediately, and is instead just scheduled on the
 		 * @note	Destruction is not done immediately, and is instead just scheduled on the
 		 * 			render thread. Unless called from render thread in which case it is executed right away.
 		 * 			render thread. Unless called from render thread in which case it is executed right away.
@@ -85,14 +88,28 @@ o		 *
 		 */
 		 */
 		virtual void initialize_internal();
 		virtual void initialize_internal();
 
 
+		/**
+		 * @brief	Queues a command to be executed on the render thread, without a return value.
+		 */
+		static void queueGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func);
+
+		/**
+		 * @brief	Queues a command to be executed on the render thread, with a return value in the form of AsyncOp.
+		 * 			
+		 * @see		AsyncOp
+		 */
+		static AsyncOp queueReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func);
+
 		/**
 		/**
 		 * @brief	Returns an unique identifier for this object.
 		 * @brief	Returns an unique identifier for this object.
 		 */
 		 */
 		UINT64 getInternalID() const { return mInternalID; }
 		UINT64 getInternalID() const { return mInternalID; }
 
 
+		bool isScheduledToBeInitialized() const { return (mFlags & CGO_SCHEDULED_FOR_INIT) != 0; }
 		bool isScheduledToBeDeleted() const { return (mFlags & CGO_SCHEDULED_FOR_DELETE) != 0; }
 		bool isScheduledToBeDeleted() const { return (mFlags & CGO_SCHEDULED_FOR_DELETE) != 0; }
-
+		
 		void setIsInitialized(bool initialized) { mFlags = initialized ? mFlags | CGO_INITIALIZED : mFlags & ~CGO_INITIALIZED; }
 		void setIsInitialized(bool initialized) { mFlags = initialized ? mFlags | CGO_INITIALIZED : mFlags & ~CGO_INITIALIZED; }
+		void setScheduledToBeInitialized(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_INIT : mFlags & ~CGO_SCHEDULED_FOR_INIT; }
 		void setScheduledToBeDeleted(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_DELETE : mFlags & ~CGO_SCHEDULED_FOR_DELETE; }
 		void setScheduledToBeDeleted(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_DELETE : mFlags & ~CGO_SCHEDULED_FOR_DELETE; }
 	private:
 	private:
 		friend class CoreGpuObjectManager;
 		friend class CoreGpuObjectManager;
@@ -103,5 +120,8 @@ o		 *
 
 
 		CM_STATIC_THREAD_SYNCHRONISER(mCoreGpuObjectLoadedCondition)
 		CM_STATIC_THREAD_SYNCHRONISER(mCoreGpuObjectLoadedCondition)
 		CM_STATIC_MUTEX(mCoreGpuObjectLoadedMutex)
 		CM_STATIC_MUTEX(mCoreGpuObjectLoadedMutex)
+
+		static void executeGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func);
+		static void executeReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func, AsyncOp& op); 
 	};
 	};
 }
 }

+ 14 - 12
CamelotRenderer/Source/CmCommandQueue.cpp

@@ -8,7 +8,7 @@ namespace CamelotEngine
 	CommandQueue::CommandQueue(CM_THREAD_ID_TYPE threadId, bool allowAllThreads)
 	CommandQueue::CommandQueue(CM_THREAD_ID_TYPE threadId, bool allowAllThreads)
 		:mMyThreadId(threadId), mAllowAllThreads(allowAllThreads)
 		:mMyThreadId(threadId), mAllowAllThreads(allowAllThreads)
 	{
 	{
-		mCommands = new vector<Command>::type();
+		mCommands = new std::queue<Command>();
 	}
 	}
 
 
 	CommandQueue::~CommandQueue()
 	CommandQueue::~CommandQueue()
@@ -38,10 +38,10 @@ namespace CamelotEngine
 #endif
 #endif
 
 
 		Command newCommand(commandCallback, _notifyWhenComplete, _callbackId);
 		Command newCommand(commandCallback, _notifyWhenComplete, _callbackId);
-		mCommands->push_back(newCommand);
+		mCommands->push(newCommand);
 
 
 #if CM_FORCE_SINGLETHREADED_RENDERING
 #if CM_FORCE_SINGLETHREADED_RENDERING
-		vector<CommandQueue::Command>::type* commands = flush();
+		std::queue<CommandQueue::Command>* commands = flush();
 		playback(commands);
 		playback(commands);
 #endif
 #endif
 
 
@@ -60,28 +60,28 @@ namespace CamelotEngine
 #endif
 #endif
 
 
 		Command newCommand(commandCallback, _notifyWhenComplete, _callbackId);
 		Command newCommand(commandCallback, _notifyWhenComplete, _callbackId);
-		mCommands->push_back(newCommand);
+		mCommands->push(newCommand);
 
 
 #if CM_FORCE_SINGLETHREADED_RENDERING
 #if CM_FORCE_SINGLETHREADED_RENDERING
-		vector<CommandQueue::Command>::type* commands = flush();
+		std::queue<CommandQueue::Command>* commands = flush();
 		playback(commands);
 		playback(commands);
 #endif
 #endif
 	}
 	}
 
 
-	vector<CommandQueue::Command>::type* CommandQueue::flush()
+	std::queue<CommandQueue::Command>* CommandQueue::flush()
 	{
 	{
-		vector<Command>::type* oldCommands = nullptr;
+		std::queue<Command>* oldCommands = nullptr;
 		{
 		{
 			CM_LOCK_MUTEX(mCommandBufferMutex);
 			CM_LOCK_MUTEX(mCommandBufferMutex);
 
 
 			oldCommands = mCommands;
 			oldCommands = mCommands;
-			mCommands = new vector<Command>::type();
+			mCommands = new std::queue<Command>();
 		}
 		}
 
 
 		return oldCommands;
 		return oldCommands;
 	}
 	}
 
 
-	void CommandQueue::playback(vector<CommandQueue::Command>::type* commands, boost::function<void(UINT32)> notifyCallback)
+	void CommandQueue::playback(std::queue<CommandQueue::Command>* commands, boost::function<void(UINT32)> notifyCallback)
 	{
 	{
 #if CM_DEBUG_MODE
 #if CM_DEBUG_MODE
 		RenderSystem* rs = RenderSystem::instancePtr();
 		RenderSystem* rs = RenderSystem::instancePtr();
@@ -93,9 +93,9 @@ namespace CamelotEngine
 		if(commands == nullptr)
 		if(commands == nullptr)
 			return;
 			return;
 
 
-		for(auto iter = commands->begin(); iter != commands->end(); ++iter)
+		while(!commands->empty())
 		{
 		{
-			Command& command = (*iter);
+			Command& command = commands->front();
 
 
 			if(command.returnsValue)
 			if(command.returnsValue)
 			{
 			{
@@ -117,12 +117,14 @@ namespace CamelotEngine
 			{
 			{
 				notifyCallback(command.callbackId);
 				notifyCallback(command.callbackId);
 			}
 			}
+
+			commands->pop();
 		}
 		}
 
 
 		delete commands;
 		delete commands;
 	}
 	}
 
 
-	void CommandQueue::playback(vector<Command>::type* commands)
+	void CommandQueue::playback(std::queue<Command>* commands)
 	{
 	{
 		playback(commands, boost::function<void(UINT32)>());
 		playback(commands, boost::function<void(UINT32)>());
 	}
 	}

+ 52 - 10
CamelotRenderer/Source/CmCoreGpuObject.cpp

@@ -36,8 +36,10 @@ namespace CamelotEngine
 
 
 	void CoreGpuObject::destroy()
 	void CoreGpuObject::destroy()
 	{
 	{
+		setScheduledToBeDeleted(true);
 		CoreGpuObjectManager::instance().registerObjectToDestroy(mThis.lock());
 		CoreGpuObjectManager::instance().registerObjectToDestroy(mThis.lock());
-		RenderSystem::instancePtr()->queueCommand(boost::bind(&CoreGpuObject::destroy_internal, this));
+
+		queueGpuCommand(mThis.lock(), &CoreGpuObject::destroy_internal);
 	}
 	}
 
 
 	void CoreGpuObject::destroy_internal()
 	void CoreGpuObject::destroy_internal()
@@ -58,11 +60,13 @@ namespace CamelotEngine
 	void CoreGpuObject::initialize()
 	void CoreGpuObject::initialize()
 	{
 	{
 #if CM_DEBUG_MODE
 #if CM_DEBUG_MODE
-		if(isInitialized())
-			CM_EXCEPT(InternalErrorException, "Trying to initialize an object that is already initialized");
+		if(isInitialized() || isScheduledToBeInitialized())
+			CM_EXCEPT(InternalErrorException, "Trying to initialize an object that is already initialized.");
 #endif
 #endif
 
 
-		RenderSystem::instancePtr()->queueCommand(boost::bind(&CoreGpuObject::initialize_internal, this));
+		setScheduledToBeInitialized(true);
+
+		queueGpuCommand(mThis.lock(), &CoreGpuObject::initialize_internal);
 	}
 	}
 
 
 	void CoreGpuObject::initialize_internal()
 	void CoreGpuObject::initialize_internal()
@@ -72,6 +76,8 @@ namespace CamelotEngine
 			setIsInitialized(true);
 			setIsInitialized(true);
 		}	
 		}	
 
 
+		setScheduledToBeInitialized(false);
+
 		CM_THREAD_NOTIFY_ALL(mCoreGpuObjectLoadedCondition);
 		CM_THREAD_NOTIFY_ALL(mCoreGpuObjectLoadedCondition);
 	}
 	}
 
 
@@ -87,6 +93,9 @@ namespace CamelotEngine
 			CM_LOCK_MUTEX_NAMED(mCoreGpuObjectLoadedMutex, lock);
 			CM_LOCK_MUTEX_NAMED(mCoreGpuObjectLoadedMutex, lock);
 			while(!isInitialized())
 			while(!isInitialized())
 			{
 			{
+				if(!isScheduledToBeInitialized())
+					CM_EXCEPT(InternalErrorException, "Attempting to wait until initialization finishes but object is not scheduled to be initialized.");
+
 				CM_THREAD_WAIT(mCoreGpuObjectLoadedCondition, mCoreGpuObjectLoadedMutex, lock);
 				CM_THREAD_WAIT(mCoreGpuObjectLoadedCondition, mCoreGpuObjectLoadedMutex, lock);
 			}
 			}
 		}
 		}
@@ -102,23 +111,56 @@ namespace CamelotEngine
 		assert(obj != nullptr);
 		assert(obj != nullptr);
 
 
 		// This method usually gets called automatically by the shared pointer when all references are released. The process:
 		// This method usually gets called automatically by the shared pointer when all references are released. The process:
-		// - If the deletion flag is set or object was never initialized then we immediately delete the object
+		// - If the object wasn't initialized delete it right away
 		// - Otherwise:
 		// - Otherwise:
 		//  - We re-create the reference to the object by setting mThis pointer
 		//  - We re-create the reference to the object by setting mThis pointer
 		//  - We queue the object to be destroyed so all of its GPU resources may be released on the render thread
 		//  - We queue the object to be destroyed so all of its GPU resources may be released on the render thread
 		//    - destroy() makes sure it keeps a reference of mThis so object isn't deleted
 		//    - destroy() makes sure it keeps a reference of mThis so object isn't deleted
-		//    - Once the destroy() finishes the reference is removed and delete is called again, but this time deletion flag is set
+		//    - Once the destroy() finishes the reference is removed and the default shared_ptr deleter is called
 
 
-		if(obj->isScheduledToBeDeleted() || !obj->isInitialized())
+#if CM_DEBUG_MODE
+		if(obj->isScheduledToBeInitialized())
 		{
 		{
-			delete obj;
+			CM_EXCEPT(InternalErrorException, "Object scheduled to be initialized, yet it's being deleted. " \
+				"By design objects queued in the command queue should always have a reference count >= 1, therefore never be deleted " \
+				"while still in the queue.");
 		}
 		}
-		else
+#endif
+
+		if(obj->isInitialized())
 		{
 		{
 			std::shared_ptr<CoreGpuObject> thisPtr(obj);
 			std::shared_ptr<CoreGpuObject> thisPtr(obj);
 			obj->setThisPtr(thisPtr);
 			obj->setThisPtr(thisPtr);
-			obj->setScheduledToBeDeleted(true);
 			obj->destroy();
 			obj->destroy();
 		}
 		}
+		else
+		{
+			delete obj;
+		}
+	}
+
+	void CoreGpuObject::queueGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func)
+	{
+		// We call another internal method and go through an additional layer of abstraction in order to keep an active
+		// reference to the obj (saved in the bound function).
+		// We could have called the function directly using "this" pointer but then we couldn't have used a shared_ptr for the object,
+		// in which case there is a possibility that the object would be released and deleted while still being in the command queue.
+		RenderSystem::instancePtr()->queueCommand(boost::bind(&CoreGpuObject::executeGpuCommand, obj, func));
+	}
+
+	AsyncOp CoreGpuObject::queueReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func)
+	{
+		// See queueGpuCommand
+		return RenderSystem::instancePtr()->queueReturnCommand(boost::bind(&CoreGpuObject::executeReturnGpuCommand, obj, func, _1));
+	}
+
+	void CoreGpuObject::executeGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*)> func)
+	{
+		func(obj.get());
+	}
+
+	void CoreGpuObject::executeReturnGpuCommand(std::shared_ptr<CoreGpuObject> obj, boost::function<void(CoreGpuObject*, AsyncOp&)> func, AsyncOp& op)
+	{
+		func(obj.get(), op);
 	}
 	}
 }
 }

+ 2 - 2
CamelotRenderer/Source/CmDeferredRenderContext.cpp

@@ -170,7 +170,7 @@ namespace CamelotEngine
 
 
 	void DeferredRenderContext::submitToGpu()
 	void DeferredRenderContext::submitToGpu()
 	{
 	{
-		vector<CommandQueue::Command>::type* commands = mCommandQueue->flush();
+		std::queue<CommandQueue::Command>* commands = mCommandQueue->flush();
 
 
 		RenderSystem* rs = RenderSystem::instancePtr();
 		RenderSystem* rs = RenderSystem::instancePtr();
 		rs->queueCommand(boost::bind(&CommandQueue::playback, mCommandQueue, commands));
 		rs->queueCommand(boost::bind(&CommandQueue::playback, mCommandQueue, commands));
@@ -178,7 +178,7 @@ namespace CamelotEngine
 
 
 	void DeferredRenderContext::cancelAll()
 	void DeferredRenderContext::cancelAll()
 	{
 	{
-		vector<CommandQueue::Command>::type* commands = mCommandQueue->flush();
+		std::queue<CommandQueue::Command>* commands = mCommandQueue->flush();
 		delete commands;
 		delete commands;
 
 
 		// TODO - This cancels the commands but doesn't revert the state variables
 		// TODO - This cancels the commands but doesn't revert the state variables

+ 1 - 1
CamelotRenderer/Source/CmRenderSystem.cpp

@@ -396,7 +396,7 @@ namespace CamelotEngine {
 		while(true)
 		while(true)
 		{
 		{
 			// Wait until we get some ready commands
 			// Wait until we get some ready commands
-			vector<CommandQueue::Command>::type* commands = nullptr;
+			std::queue<CommandQueue::Command>* commands = nullptr;
 			{
 			{
 				CM_LOCK_MUTEX_NAMED(mCommandQueueMutex, lock)
 				CM_LOCK_MUTEX_NAMED(mCommandQueueMutex, lock)
 
 

+ 1 - 5
CamelotRenderer/TODO.txt

@@ -25,9 +25,6 @@ Having ResourceHandle closely tied in with Resources creates a problem when user
  - However those won't be released using unload() which is confusing.
  - However those won't be released using unload() which is confusing.
    - creating a resource should add it to mLoaded
    - creating a resource should add it to mLoaded
 
 
-Switching the renderer will cause exceptions for the first couple of times I run it. It happens randomly and
- goes away after I try to run the game for a few times. Restart the PC and try again if you want to get it to happen again.
-
 RenderTextures and MultiRenderTextures will only work properly if used directly from render thread, as setting each surface calls render methods, so its impossible to set one outside of render thread
 RenderTextures and MultiRenderTextures will only work properly if used directly from render thread, as setting each surface calls render methods, so its impossible to set one outside of render thread
  - also they don't have initialize_internal
  - also they don't have initialize_internal
  - I should probably make these immutable. Once they are set they can't be changed, so everything can be initialized when calling initialize()
  - I should probably make these immutable. Once they are set they can't be changed, so everything can be initialized when calling initialize()
@@ -35,8 +32,6 @@ RenderTextures and MultiRenderTextures will only work properly if used directly
  - Support loading of compound objects:
  - Support loading of compound objects:
    - Loading Material also loads attached Shader and Textures/Samplers
    - Loading Material also loads attached Shader and Textures/Samplers
 
 
-Calling CoreGpuObject::waitUntilInitialized and initialization fails (or is never called) it will block forever
-
 Add support for include file resource
 Add support for include file resource
 Make sure we can add an include file to a HighLevelGpuProgram, and make sure it uses it
 Make sure we can add an include file to a HighLevelGpuProgram, and make sure it uses it
  - Also a way to list all referenced includes, and a way to remove them
  - Also a way to list all referenced includes, and a way to remove them
@@ -59,6 +54,7 @@ Can be delayed:
  Material RTTI should also serialize shared buffers (they need to be made into a resource)
  Material RTTI should also serialize shared buffers (they need to be made into a resource)
  - BE CAREFUL on how this will be implemented. Likely it will have much of the same interface as a material and/or GpuParams
  - BE CAREFUL on how this will be implemented. Likely it will have much of the same interface as a material and/or GpuParams
  Mesh::setMeshData is currently always synced
  Mesh::setMeshData is currently always synced
+  register/unregisterObjectToDestroy might be redundant now that I save a reference with each queue entry?
 >>>>>>>>>>>>>>>START WORKING ON THE EDITOR!
 >>>>>>>>>>>>>>>START WORKING ON THE EDITOR!
  
  
 
 

+ 1 - 0
CamelotUtility/Include/CmAsyncOp.h

@@ -1,6 +1,7 @@
 #pragma once
 #pragma once
 
 
 #include "CmPrerequisitesUtil.h"
 #include "CmPrerequisitesUtil.h"
+#include "CmException.h"
 #include "boost/any.hpp"
 #include "boost/any.hpp"
 
 
 namespace CamelotEngine
 namespace CamelotEngine