瀏覽代碼

Added TaskScheduler and ported Resources to it (untested)
More documentation

Marko Pintera 11 年之前
父節點
當前提交
b4a4f95110
共有 32 個文件被更改,包括 685 次插入1197 次删除
  1. 39 19
      CamelotCore/Include/CmCommandQueue.h
  2. 93 24
      CamelotCore/Include/CmCoreObject.h
  3. 20 5
      CamelotCore/Include/CmCoreObjectManager.h
  4. 2 2
      CamelotCore/Include/CmMaterialManager.cpp
  5. 0 2
      CamelotCore/Include/CmPrerequisites.h
  6. 2 43
      CamelotCore/Include/CmResources.h
  7. 10 2
      CamelotCore/Source/CmApplication.cpp
  8. 8 8
      CamelotCore/Source/CmCoreObject.cpp
  9. 4 4
      CamelotCore/Source/CmCoreObjectManager.cpp
  10. 6 0
      CamelotCore/Source/CmCoreThread.cpp
  11. 2 2
      CamelotCore/Source/CmFontManager.cpp
  12. 1 1
      CamelotCore/Source/CmGpuProgInclude.cpp
  13. 1 1
      CamelotCore/Source/CmGpuProgramManager.cpp
  14. 5 5
      CamelotCore/Source/CmHardwareBufferManager.cpp
  15. 3 3
      CamelotCore/Source/CmHighLevelGpuProgramManager.cpp
  16. 2 2
      CamelotCore/Source/CmMeshHeap.cpp
  17. 4 4
      CamelotCore/Source/CmMeshManager.cpp
  18. 8 8
      CamelotCore/Source/CmRenderStateManager.cpp
  19. 1 1
      CamelotCore/Source/CmRenderWindowManager.cpp
  20. 27 97
      CamelotCore/Source/CmResources.cpp
  21. 1 1
      CamelotCore/Source/CmShader.cpp
  22. 1 1
      CamelotCore/Source/CmTexture.cpp
  23. 4 4
      CamelotCore/Source/CmTextureManager.cpp
  24. 1 1
      CamelotD3D11RenderSystem/Source/CmD3D11Texture.cpp
  25. 0 2
      CamelotUtility/CamelotUtility.vcxproj
  26. 0 6
      CamelotUtility/CamelotUtility.vcxproj.filters
  27. 150 0
      CamelotUtility/Include/BsTaskScheduler.h
  28. 126 1
      CamelotUtility/Include/BsThreadPool.h
  29. 2 0
      CamelotUtility/Include/CmFwdDeclUtil.h
  30. 0 428
      CamelotUtility/Include/CmWorkQueue.h
  31. 162 0
      CamelotUtility/Source/BsTaskScheduler.cpp
  32. 0 520
      CamelotUtility/Source/CmWorkQueue.cpp

+ 39 - 19
CamelotCore/Include/CmCommandQueue.h

@@ -7,6 +7,10 @@
 
 
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
+	/**
+	 * @brief	Command queue policy that provides no synchonization. Should be used
+	 * 			with command queues that are used on a single thread only.
+	 */
 	class CommandQueueNoSync
 	class CommandQueueNoSync
 	{
 	{
 	public:
 	public:
@@ -18,16 +22,14 @@ namespace BansheeEngine
 			return CM_THREAD_CURRENT_ID == ownerThread;
 			return CM_THREAD_CURRENT_ID == ownerThread;
 		}
 		}
 
 
-		void lock() 
-		{
-		};
-
-		void unlock()
-		{
-		}
-
+		void lock() { };
+		void unlock() { }
 	};
 	};
 
 
+	/**
+	 * @brief	Command queue policy that provides synchonization. Should be used
+	 * 			with command queues that are used on multiple threads.
+	 */
 	class CommandQueueSync
 	class CommandQueueSync
 	{
 	{
 	public:
 	public:
@@ -56,6 +58,10 @@ namespace BansheeEngine
 		CM_LOCK_TYPE mLock;
 		CM_LOCK_TYPE mLock;
 	};
 	};
 
 
+	/**
+	 * @brief	Represents a single queued command in the command list. Contains all the data for executing the command
+	 * 			and checking up on the command status.
+	 */
 	struct QueuedCommand
 	struct QueuedCommand
 	{
 	{
 #if CM_DEBUG_MODE
 #if CM_DEBUG_MODE
@@ -123,8 +129,7 @@ namespace BansheeEngine
 	};
 	};
 
 
 	/**
 	/**
-	 * @brief	Contains a list of commands that can be queued by one thread,
-	 * 			and executed by another.
+	 * @brief	Contains a list of commands you may queue for later execution on the core thread.
 	 */
 	 */
 	class CM_EXPORT CommandQueueBase
 	class CM_EXPORT CommandQueueBase
 	{
 	{
@@ -132,15 +137,21 @@ namespace BansheeEngine
 		/**
 		/**
 		 * @brief	Constructor.
 		 * @brief	Constructor.
 		 *
 		 *
-		 * @param	threadId	   	Identifier for the thread the command queue will be used on.						
+		 * @param	threadId	   	Identifier for the thread the command queue will be getting commands from.					
 		 */
 		 */
 		CommandQueueBase(CM_THREAD_ID_TYPE threadId);
 		CommandQueueBase(CM_THREAD_ID_TYPE threadId);
 		virtual ~CommandQueueBase();
 		virtual ~CommandQueueBase();
 
 
+		/**
+		 * @brief	Gets the thread identifier the command queue is used on.
+		 * 			
+		 * @note	If the command queue is using a synchonized access policy generally this
+		 * 			is not relevant as it may be used on multiple threads.
+		 */
 		CM_THREAD_ID_TYPE getThreadId() const { return mMyThreadId; }
 		CM_THREAD_ID_TYPE getThreadId() const { return mMyThreadId; }
 
 
 		/**
 		/**
-		 * @brief	Plays all provided commands. To get the commands call flush().
+		 * @brief	Executes all provided commands one by one in order. To get the commands you should call flush().
 		 *
 		 *
 		 * @param	notifyCallback  	Callback that will be called if a command that has "notifyOnComplete" flag set.
 		 * @param	notifyCallback  	Callback that will be called if a command that has "notifyOnComplete" flag set.
 		 * 								The callback will receive "callbackId" of the command.
 		 * 								The callback will receive "callbackId" of the command.
@@ -148,7 +159,7 @@ namespace BansheeEngine
 		void playbackWithNotify(Queue<QueuedCommand>::type* commands, std::function<void(UINT32)> notifyCallback);
 		void playbackWithNotify(Queue<QueuedCommand>::type* commands, std::function<void(UINT32)> notifyCallback);
 
 
 		/**
 		/**
-		 * @brief	Plays all provided commands. To get the commands call flush().
+		 * @brief	Executes all provided commands one by one in order. To get the commands you should call flush().
 		 */
 		 */
 		void playback(Queue<QueuedCommand>::type* commands);
 		void playback(Queue<QueuedCommand>::type* commands);
 
 
@@ -168,7 +179,7 @@ namespace BansheeEngine
 	protected:
 	protected:
 		/**
 		/**
 		 * @brief	Queue up a new command to execute. Make sure the provided function has all of its
 		 * @brief	Queue up a new command to execute. Make sure the provided function has all of its
-		 * 			parameters properly bound. Last parameter must be unbound and of AsyncOp&amp; type.
+		 * 			parameters properly bound. Last parameter must be unbound and of AsyncOp& type.
 		 * 			This is used to signal that the command is completed, and also for storing the return
 		 * 			This is used to signal that the command is completed, and also for storing the return
 		 * 			value.
 		 * 			value.
 		 * 			
 		 * 			
@@ -178,10 +189,10 @@ namespace BansheeEngine
 		 *
 		 *
 		 * @param	_notifyWhenComplete	(optional) Call the notify method (provided in the call to CommandQueue::playback)
 		 * @param	_notifyWhenComplete	(optional) Call the notify method (provided in the call to CommandQueue::playback)
 		 * 								when the command is complete.
 		 * 								when the command is complete.
-		 * @param	_callbackId			   	(optional) Identifier for the callback so you can then later find it
-		 * 									if needed.
+		 * @param	_callbackId			(optional) Identifier for the callback so you can then later find it
+		 * 								if needed.
 		 *
 		 *
-		 * @return	Async operation object you can continuously check until the command completes. After
+		 * @return	Async operation object that you can continuously check until the command completes. After
 		 * 			it completes AsyncOp::isResolved will return true and return data will be valid (if
 		 * 			it completes AsyncOp::isResolved will return true and return data will be valid (if
 		 * 			the callback provided any).
 		 * 			the callback provided any).
 		 */
 		 */
@@ -190,8 +201,7 @@ namespace BansheeEngine
 		/**
 		/**
 		 * @brief	Queue up a new command to execute. Make sure the provided function has all of its
 		 * @brief	Queue up a new command to execute. Make sure the provided function has all of its
 		 * 			parameters properly bound. Provided command is not expected to return a value. If you
 		 * 			parameters properly bound. Provided command is not expected to return a value. If you
-		 * 			wish to return a value from the callback use the other overload of queueCommand which
-		 * 			accepts AsyncOp parameter.
+		 * 			wish to return a value from the callback use the queueReturn which accepts an AsyncOp parameter.
 		 *
 		 *
 		 * @param	_notifyWhenComplete	(optional) Call the notify method (provided in the call to CommandQueue::playback)
 		 * @param	_notifyWhenComplete	(optional) Call the notify method (provided in the call to CommandQueue::playback)
 		 * 								when the command is complete.
 		 * 								when the command is complete.
@@ -217,6 +227,10 @@ namespace BansheeEngine
 		bool isEmpty();
 		bool isEmpty();
 
 
 	protected:
 	protected:
+		/**
+		 * @brief	Helper method that throws an "Invalid thread" exception. Used primarily
+		 * 			so we can avoid including Exception include in this header.
+		 */
 		void throwInvalidThreadException(const String& message) const;
 		void throwInvalidThreadException(const String& message) const;
 
 
 	private:
 	private:
@@ -259,12 +273,18 @@ namespace BansheeEngine
 		static UnorderedSet<QueueBreakpoint, QueueBreakpoint::HashFunction, QueueBreakpoint::EqualFunction>::type SetBreakpoints;
 		static UnorderedSet<QueueBreakpoint, QueueBreakpoint::HashFunction, QueueBreakpoint::EqualFunction>::type SetBreakpoints;
 		CM_STATIC_MUTEX(CommandQueueBreakpointMutex);
 		CM_STATIC_MUTEX(CommandQueueBreakpointMutex);
 
 
+		/**
+		 * @brief	Checks if the specified command has a breakpoint and throw an assert if it does.
+		 */
 		static void breakIfNeeded(UINT32 queueIdx, UINT32 commandIdx);
 		static void breakIfNeeded(UINT32 queueIdx, UINT32 commandIdx);
 #endif
 #endif
 	};
 	};
 
 
 	/**
 	/**
 	 * @copydoc CommandQueueBase
 	 * @copydoc CommandQueueBase
+	 * 			
+	 * @brief	Use SyncPolicy to choose whether you want command queue be synchonized or not. Synchonized
+	 * 			command queues may be used across multiple threads and non-synchonized only on one.
 	 */
 	 */
 	template<class SyncPolicy = CommandQueueNoSync>
 	template<class SyncPolicy = CommandQueueNoSync>
 	class CommandQueue : public CommandQueueBase, public SyncPolicy
 	class CommandQueue : public CommandQueueBase, public SyncPolicy

+ 93 - 24
CamelotCore/Include/CmCoreObject.h

@@ -7,33 +7,46 @@
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
 	/**
 	/**
-	 * @brief	This class provides some common functionality that all low-level GPU-related objects
-	 * 			need to implement.
+	 * @brief	This class provides some common functionality that all low-level objects
+	 * 			used on the core thread need to implement.
 	 * 			
 	 * 			
-	 * @note	This involves initializing, keeping track of, and releasing all GPU resources.
+	 * @note	This involves initializing, keeping track of, and releasing GPU resources.
 	 * 			All core GPU objects are initialized on the core thread, and destroyed on the core thread,
 	 * 			All core GPU objects are initialized on the core thread, and destroyed on the core thread,
 	 * 			so majority of these methods will just schedule object initialization/destruction.
 	 * 			so majority of these methods will just schedule object initialization/destruction.
+	 * 			Non-GPU core objects can normally be initialized on the caller thread.
 	 */
 	 */
 	class CM_EXPORT CoreObject
 	class CM_EXPORT CoreObject
 	{
 	{
 	protected:
 	protected:
+		/**
+		 * @brief	Values that represent current state of the object
+		 */
 		enum Flags
 		enum Flags
 		{
 		{
-			CGO_INITIALIZED = 0x01,
-			CGO_INIT_ON_RENDER_THREAD = 0x02,
-			CGO_SCHEDULED_FOR_INIT = 0x04,
-			CGO_SCHEDULED_FOR_DELETE = 0x08
+			CGO_INITIALIZED = 0x01, /**< Object has been fully initialized and may be used. */
+			CGO_INIT_ON_CORE_THREAD = 0x02, /**< Object requires initialization on core thread. */
+			CGO_SCHEDULED_FOR_INIT = 0x04, /**< Object has been scheduled for initialization but core thread has not completed it yet. */
+			CGO_SCHEDULED_FOR_DELETE = 0x08 /**< Object has been scheduled for deletion but core thread has not completed it yet. */
 		};
 		};
 
 
 	public:
 	public:
+		/**
+		 * @brief	Constructs a new core object.
+		 *
+		 * @param	requiresGpuInit	(optional) If true the objects initialize_internal and destroy_internal methods
+		 * 							will be called from the core thread asynchronously. Otherwise they will be called 
+		 * 							by the caller thread synchronously.
+		 */
 		CoreObject(bool requiresGpuInit = true);
 		CoreObject(bool requiresGpuInit = true);
 		virtual ~CoreObject();
 		virtual ~CoreObject();
 
 
 		/**
 		/**
 		 * @brief	Destroys all GPU resources of this object.
 		 * @brief	Destroys all GPU resources of this object.
-o		 * 			
-		 * @note	Destruction is not done immediately, and is instead just scheduled on the
-		 * 			core thread. Unless called from core thread in which case it is executed right away.
+		 * 			
+		 * @note	If is created with "CGO_INIT_ON_CORE_THREAD" flag destruction is not done immediately, 
+		 * 			and is instead just scheduled on the core thread. 
+		 * 			Unless called from core thread in which case it is executed immediately.
+		 * 			Objects without "CGO_INIT_ON_CORE_THREAD" flag are destructed immediately.
 		 */
 		 */
 		virtual void destroy();
 		virtual void destroy();
 
 
@@ -41,8 +54,10 @@ o		 *
 		 * @brief	Initializes all the internal resources of this object. Should be called by the
 		 * @brief	Initializes all the internal resources of this object. Should be called by the
 		 * 			factory creation methods automatically after construction and not by user directly.
 		 * 			factory creation methods automatically after construction and not by user directly.
 		 * 					
 		 * 					
-		 * @note	Initialization is not done immediately, and is instead just scheduled on the
-		 * 			core thread. Unless called from core thread in which case it is executed right away.
+		 * @note	If is created with "CGO_INIT_ON_CORE_THREAD" flag initialization is not done immediately, 
+		 * 			and is instead just scheduled on the core thread. 
+		 * 			Unless called from core thread in which case it is executed immediately.
+		 * 			Objects without "CGO_INIT_ON_CORE_THREAD" flag are initialized immediately.
 		 */
 		 */
 		virtual void initialize();
 		virtual void initialize();
 
 
@@ -51,22 +66,24 @@ o		 *
 		 * 			allowed to call any methods on the resource until you are sure resource is initialized.
 		 * 			allowed to call any methods on the resource until you are sure resource is initialized.
 		 * 			
 		 * 			
 		 * @note	Normally CPU objects are initialized on creation and this will never be false, and GPU
 		 * @note	Normally CPU objects are initialized on creation and this will never be false, and GPU
-		 * 			objects are initialized when the Core Thread processes them.
+		 * 			objects are initialized when the core thread processes them.
 		 */
 		 */
 		bool isInitialized() const { return (mFlags & CGO_INITIALIZED) != 0; }
 		bool isInitialized() const { return (mFlags & CGO_INITIALIZED) != 0; }
 
 
 		/**
 		/**
 		 * @brief	Blocks the current thread until the resource is fully initialized.
 		 * @brief	Blocks the current thread until the resource is fully initialized.
-		 * 			If you call this without calling initialize first a deadlock will occurr.
+		 * 			
+		 * @note	If you call this without calling initialize first a deadlock will occur.
+		 * 			You should not call this from core thread.
 		 */
 		 */
 		void synchronize();
 		void synchronize();
 
 
 		/**
 		/**
-		 * @brief	Sets a shared this pointer to this object. This MUST be called immediately after construction.
+		 * @brief	Internal method. Sets a shared this pointer to this object. This MUST be called immediately after construction.
 		 *
 		 *
 		 * @note	Called automatically by the factory creation methods so user should not call this manually.
 		 * @note	Called automatically by the factory creation methods so user should not call this manually.
 		 */
 		 */
-		void setThisPtr(std::shared_ptr<CoreObject> ptrThis);
+		void _setThisPtr(std::shared_ptr<CoreObject> ptrThis);
 
 
 		/**
 		/**
 		 * @brief	Returns an unique identifier for this object.
 		 * @brief	Returns an unique identifier for this object.
@@ -74,9 +91,7 @@ o		 *
 		UINT64 getInternalID() const { return mInternalID; }
 		UINT64 getInternalID() const { return mInternalID; }
 
 
 		/**
 		/**
-		 * @brief	Schedules the object to be destroyed, and then deleted.
-		 *
-		 * @note	You should never call this manually. It's meant for internal use only.
+		 * @brief	Internal method. Schedules the object to be destroyed, and then deleted.
 		 */
 		 */
 		template<class T, class MemAlloc>
 		template<class T, class MemAlloc>
 		static void _deleteDelayed(CoreObject* obj)
 		static void _deleteDelayed(CoreObject* obj)
@@ -86,7 +101,7 @@ o		 *
 			if(obj->isInitialized())
 			if(obj->isInitialized())
 			{
 			{
 				std::shared_ptr<CoreObject> thisPtr(obj);
 				std::shared_ptr<CoreObject> thisPtr(obj);
-				obj->setThisPtr(thisPtr);
+				obj->_setThisPtr(thisPtr);
 				obj->destroy();
 				obj->destroy();
 			}
 			}
 			else
 			else
@@ -105,7 +120,8 @@ o		 *
 		 * @brief	Frees all of the objects dynamically allocated memory. All derived classes that have something to free
 		 * @brief	Frees all of the objects dynamically allocated memory. All derived classes that have something to free
 		 * 			should do it here instead of their destructor. All derived classes need to call this base method when they're done.
 		 * 			should do it here instead of their destructor. All derived classes need to call this base method when they're done.
 		 * 			
 		 * 			
-		 * @note	Since this is scheduled to be executed on the core thread, normally you want to destroy all GPU specific resources here.
+		 * @note	For objects with "CGO_INIT_ON_CORE_THREAD" flag this is scheduled to be executed on the core thread, 
+		 * 			so normally you want to destroy all GPU specific resources here.
 		 */
 		 */
 		virtual void destroy_internal();
 		virtual void destroy_internal();
 
 
@@ -113,10 +129,14 @@ o		 *
 		 * @brief	Initializes all the internal resources of this object. Needs to be called before doing
 		 * @brief	Initializes all the internal resources of this object. Needs to be called before doing
 		 * 			any operations with the object. All derived classes also need to call this base method.
 		 * 			any operations with the object. All derived classes also need to call this base method.
 		 * 			
 		 * 			
-		 * @note	Since this is scheduled to be executed on the core thread, normally you want to initialize all GPU specific resources here.
+		 * @note	For objects with "CGO_INIT_ON_CORE_THREAD" flag this is scheduled to be executed on the core thread, 
+		 * 			so normally you want to initialize all GPU specific resources here.
 		 */
 		 */
 		virtual void initialize_internal();
 		virtual void initialize_internal();
 
 
+		/**
+		 * @brief	Performs some internal checks when an object is being deleted.
+		 */
 		static void _deleteDelayedInternal(CoreObject* obj);
 		static void _deleteDelayedInternal(CoreObject* obj);
 
 
 		/**
 		/**
@@ -141,13 +161,13 @@ o		 *
 
 
 		bool isScheduledToBeInitialized() const { return (mFlags & CGO_SCHEDULED_FOR_INIT) != 0; }
 		bool isScheduledToBeInitialized() const { return (mFlags & CGO_SCHEDULED_FOR_INIT) != 0; }
 		bool isScheduledToBeDeleted() const { return (mFlags & CGO_SCHEDULED_FOR_DELETE) != 0; }
 		bool isScheduledToBeDeleted() const { return (mFlags & CGO_SCHEDULED_FOR_DELETE) != 0; }
-		bool requiresInitOnRenderThread() const { return (mFlags & CGO_INIT_ON_RENDER_THREAD) != 0; }
+		bool requiresInitOnCoreThread() const { return (mFlags & CGO_INIT_ON_CORE_THREAD) != 0; }
 
 
 		void setIsInitialized(bool initialized) { mFlags = initialized ? mFlags | CGO_INITIALIZED : mFlags & ~CGO_INITIALIZED; }
 		void setIsInitialized(bool initialized) { mFlags = initialized ? mFlags | CGO_INITIALIZED : mFlags & ~CGO_INITIALIZED; }
 		void setScheduledToBeInitialized(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_INIT : mFlags & ~CGO_SCHEDULED_FOR_INIT; }
 		void setScheduledToBeInitialized(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_INIT : mFlags & ~CGO_SCHEDULED_FOR_INIT; }
 		void setScheduledToBeDeleted(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_DELETE : mFlags & ~CGO_SCHEDULED_FOR_DELETE; }
 		void setScheduledToBeDeleted(bool scheduled) { mFlags = scheduled ? mFlags | CGO_SCHEDULED_FOR_DELETE : mFlags & ~CGO_SCHEDULED_FOR_DELETE; }
 	private:
 	private:
-		friend class CoreGpuObjectManager;
+		friend class CoreObjectManager;
 
 
 		volatile UINT8 mFlags;
 		volatile UINT8 mFlags;
 		UINT64 mInternalID; // ID == 0 is not a valid ID
 		UINT64 mInternalID; // ID == 0 is not a valid ID
@@ -156,13 +176,38 @@ o		 *
 		CM_STATIC_THREAD_SYNCHRONISER(mCoreGpuObjectLoadedCondition)
 		CM_STATIC_THREAD_SYNCHRONISER(mCoreGpuObjectLoadedCondition)
 		CM_STATIC_MUTEX(mCoreGpuObjectLoadedMutex)
 		CM_STATIC_MUTEX(mCoreGpuObjectLoadedMutex)
 
 
+		/**
+		 * @brief	Queues object initialization command on the core thread. The command is added to the
+		 * 			primary core thread queue and will be executed as soon as the core thread is ready.
+		 */
 		static void queueInitializeGpuCommand(std::shared_ptr<CoreObject>& obj);
 		static void queueInitializeGpuCommand(std::shared_ptr<CoreObject>& obj);
+
+		/**
+		 * @brief	Queues object destruction command on the core thread. The command is added to the
+		 * 			core thread accessor of this thread and will be executed after accessor commands
+		 * 			are submitted and any previously queued commands are executed.
+		 *
+		 * @note	It is up to the caller to ensure no other accessors attempt to use this object.
+		 */
 		static void queueDestroyGpuCommand(std::shared_ptr<CoreObject>& obj);
 		static void queueDestroyGpuCommand(std::shared_ptr<CoreObject>& obj);
 
 
+		/**
+		 * @brief	Helper wrapper method used for queuing commands with no return value on the core thread.
+		 */
 		static void executeGpuCommand(std::shared_ptr<CoreObject>& obj, std::function<void()> func);
 		static void executeGpuCommand(std::shared_ptr<CoreObject>& obj, std::function<void()> func);
+
+		/**
+		 * @brief	Helper wrapper method used for queuing commands with a return value on the core thread.
+		 */
 		static void executeReturnGpuCommand(std::shared_ptr<CoreObject>& obj, std::function<void(AsyncOp&)> func, AsyncOp& op); 
 		static void executeReturnGpuCommand(std::shared_ptr<CoreObject>& obj, std::function<void(AsyncOp&)> func, AsyncOp& op); 
 	};
 	};
 
 
+	/**
+	 * @brief	Creates a new core object using the specified allocators and returns a shared pointer to it.
+	 *
+	 * @note	All core thread object shared pointers must be created using this method or its overloads
+	 * 			and you should not create them manually.
+	 */
 #define MAKE_CM_NEW_CORE(z, n, unused)                                     \
 #define MAKE_CM_NEW_CORE(z, n, unused)                                     \
 	template<class Type, class MainAlloc, class PtrDataAlloc BOOST_PP_ENUM_TRAILING_PARAMS(n, class T)>     \
 	template<class Type, class MainAlloc, class PtrDataAlloc BOOST_PP_ENUM_TRAILING_PARAMS(n, class T)>     \
 	std::shared_ptr<Type> cm_core_ptr(BOOST_PP_ENUM_BINARY_PARAMS(n, T, t) ) { \
 	std::shared_ptr<Type> cm_core_ptr(BOOST_PP_ENUM_BINARY_PARAMS(n, T, t) ) { \
@@ -173,6 +218,12 @@ o		 *
 
 
 #undef MAKE_CM_NEW_CORE
 #undef MAKE_CM_NEW_CORE
 
 
+	/**
+	 * @brief	Creates a new core object using the specified allocator and returns a shared pointer to it.
+	 *
+	 * @note	All core thread object shared pointers must be created using this method or its overloads
+	 * 			and you should not create them manually.
+	 */
 #define MAKE_CM_NEW_CORE(z, n, unused)                                     \
 #define MAKE_CM_NEW_CORE(z, n, unused)                                     \
 	template<class Type, class MainAlloc BOOST_PP_ENUM_TRAILING_PARAMS(n, class T)>     \
 	template<class Type, class MainAlloc BOOST_PP_ENUM_TRAILING_PARAMS(n, class T)>     \
 	std::shared_ptr<Type> cm_core_ptr(BOOST_PP_ENUM_BINARY_PARAMS(n, T, t) ) { \
 	std::shared_ptr<Type> cm_core_ptr(BOOST_PP_ENUM_BINARY_PARAMS(n, T, t) ) { \
@@ -183,6 +234,12 @@ o		 *
 
 
 #undef MAKE_CM_NEW_CORE
 #undef MAKE_CM_NEW_CORE
 
 
+	/**
+	 * @brief	Creates a new core object and returns a shared pointer to it.
+	 *
+	 * @note	All core thread object shared pointers must be created using this method or its overloads
+	 * 			and you should not create them manually.
+	 */
 #define MAKE_CM_NEW_CORE(z, n, unused)                                     \
 #define MAKE_CM_NEW_CORE(z, n, unused)                                     \
 	template<class Type BOOST_PP_ENUM_TRAILING_PARAMS(n, class T)>     \
 	template<class Type BOOST_PP_ENUM_TRAILING_PARAMS(n, class T)>     \
 	std::shared_ptr<Type> cm_core_ptr(BOOST_PP_ENUM_BINARY_PARAMS(n, T, t) ) { \
 	std::shared_ptr<Type> cm_core_ptr(BOOST_PP_ENUM_BINARY_PARAMS(n, T, t) ) { \
@@ -193,12 +250,24 @@ o		 *
 
 
 #undef MAKE_CM_NEW_CORE
 #undef MAKE_CM_NEW_CORE
 
 
+	/**
+	 * @brief	Creates a core object shared pointer using a previously constructed object.
+	 *
+	 * @note	All core thread object shared pointers must be created using this method or its overloads
+	 * 			and you should not create them manually.
+	 */
 	template<class Type, class MainAlloc>
 	template<class Type, class MainAlloc>
 	std::shared_ptr<Type> cm_core_ptr(Type* data) 
 	std::shared_ptr<Type> cm_core_ptr(Type* data) 
 	{
 	{
 		return std::shared_ptr<Type>(data, &CoreObject::_deleteDelayed<Type, MainAlloc>, StdAlloc<GenAlloc>());  
 		return std::shared_ptr<Type>(data, &CoreObject::_deleteDelayed<Type, MainAlloc>, StdAlloc<GenAlloc>());  
 	}
 	}
 
 
+	/**
+	 * @brief	Creates a core object shared pointer using a previously constructed object.
+	 *
+	 * @note	All core thread object shared pointers must be created using this method or its overloads
+	 * 			and you should not create them manually.
+	 */
 	template<class Type, class MainAlloc, class PtrDataAlloc>
 	template<class Type, class MainAlloc, class PtrDataAlloc>
 	std::shared_ptr<Type> cm_core_ptr(Type* data) 
 	std::shared_ptr<Type> cm_core_ptr(Type* data) 
 	{
 	{

+ 20 - 5
CamelotCore/Include/CmCoreObjectManager.h

@@ -5,18 +5,33 @@
 
 
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
-	// TODO - Add debug option that would remember a call stack for each resource initialization,
+	// TODO Low priority - Add debug option that would remember a call stack for each resource initialization,
 	// so when we fail to release one we know which one it is.
 	// so when we fail to release one we know which one it is.
-	class CM_EXPORT CoreGpuObjectManager : public Module<CoreGpuObjectManager>
+	
+	/**
+	 * @brief	Manager that keeps track of all active CoreObjects.
+	 * 			
+	 * @note	Thread safe.
+	 */
+	class CM_EXPORT CoreObjectManager : public Module<CoreObjectManager>
 	{
 	{
 	public:
 	public:
-		CoreGpuObjectManager();
-		~CoreGpuObjectManager();
+		CoreObjectManager();
+		~CoreObjectManager();
 
 
+		/**
+		 * @brief	Registers a new CoreObject notifying the manager the object
+		 * 			is created.
+		 */
 		UINT64 registerObject(CoreObject* object);
 		UINT64 registerObject(CoreObject* object);
+
+		/**
+		 * @brief	Unregisters a CoreObject notifying the manager the object
+		 * 			is destroyed.
+		 */
 		void unregisterObject(CoreObject* object);
 		void unregisterObject(CoreObject* object);
+
 	private:
 	private:
-		// Keeps a list of ALL loaded core GPU objects
 		UINT64 mNextAvailableID;
 		UINT64 mNextAvailableID;
 		Map<UINT64, CoreObject*>::type mObjects;
 		Map<UINT64, CoreObject*>::type mObjects;
 		CM_MUTEX(mObjectsMutex);
 		CM_MUTEX(mObjectsMutex);

+ 2 - 2
CamelotCore/Include/CmMaterialManager.cpp

@@ -6,7 +6,7 @@ namespace BansheeEngine
 	MaterialPtr MaterialManager::create() const
 	MaterialPtr MaterialManager::create() const
 	{
 	{
 		MaterialPtr newMat = cm_core_ptr<Material, PoolAlloc>(new (cm_alloc<Material, PoolAlloc>()) Material());
 		MaterialPtr newMat = cm_core_ptr<Material, PoolAlloc>(new (cm_alloc<Material, PoolAlloc>()) Material());
-		newMat->setThisPtr(newMat);
+		newMat->_setThisPtr(newMat);
 		newMat->initialize();
 		newMat->initialize();
 
 
 		return newMat;
 		return newMat;
@@ -15,7 +15,7 @@ namespace BansheeEngine
 	MaterialPtr MaterialManager::create(ShaderPtr shader) const
 	MaterialPtr MaterialManager::create(ShaderPtr shader) const
 	{
 	{
 		MaterialPtr newMat = cm_core_ptr<Material, PoolAlloc>(new (cm_alloc<Material, PoolAlloc>()) Material());
 		MaterialPtr newMat = cm_core_ptr<Material, PoolAlloc>(new (cm_alloc<Material, PoolAlloc>()) Material());
-		newMat->setThisPtr(newMat);
+		newMat->_setThisPtr(newMat);
 		newMat->initialize();
 		newMat->initialize();
 		newMat->setShader(shader);
 		newMat->setShader(shader);
 
 

+ 0 - 2
CamelotCore/Include/CmPrerequisites.h

@@ -99,7 +99,6 @@ namespace BansheeEngine
 	class RawInputHandler;
 	class RawInputHandler;
 	class Renderer;
 	class Renderer;
 	class RendererFactory;
 	class RendererFactory;
-	class WorkQueue;
 	class PassParameters;
 	class PassParameters;
 	class AsyncOp;
 	class AsyncOp;
 	class HardwareBufferManager;
 	class HardwareBufferManager;
@@ -194,7 +193,6 @@ namespace BansheeEngine
 	typedef std::shared_ptr<Material> MaterialPtr;
 	typedef std::shared_ptr<Material> MaterialPtr;
 	typedef std::shared_ptr<Renderer> RendererPtr;
 	typedef std::shared_ptr<Renderer> RendererPtr;
 	typedef std::shared_ptr<RendererFactory> RendererFactoryPtr;
 	typedef std::shared_ptr<RendererFactory> RendererFactoryPtr;
-	typedef std::shared_ptr<WorkQueue> WorkQueuePtr;
 	typedef std::shared_ptr<PassParameters> PassParametersPtr;
 	typedef std::shared_ptr<PassParameters> PassParametersPtr;
 	typedef std::shared_ptr<Component> ComponentPtr;
 	typedef std::shared_ptr<Component> ComponentPtr;
 	typedef std::shared_ptr<SceneObject> GameObjectPtr;
 	typedef std::shared_ptr<SceneObject> GameObjectPtr;

+ 2 - 43
CamelotCore/Include/CmResources.h

@@ -2,45 +2,11 @@
 
 
 #include "CmPrerequisites.h"
 #include "CmPrerequisites.h"
 #include "CmModule.h"
 #include "CmModule.h"
-#include "CmWorkQueue.h"
 
 
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
 	class CM_EXPORT Resources : public Module<Resources>
 	class CM_EXPORT Resources : public Module<Resources>
 	{
 	{
-	private:
-		class CM_EXPORT ResourceRequestHandler : public WorkQueue::RequestHandler
-		{
-			virtual bool canHandleRequest( const WorkQueue::Request* req, const WorkQueue* srcQ );
-			virtual WorkQueue::Response* handleRequest(WorkQueue::Request* req, const WorkQueue* srcQ );
-		};
-
-		class CM_EXPORT ResourceResponseHandler : public WorkQueue::ResponseHandler
-		{
-			virtual bool canHandleResponse( const WorkQueue::Response* res, const WorkQueue* srcQ );
-			virtual void handleResponse( const WorkQueue::Response* res, const WorkQueue* srcQ );
-		};
-
-		struct CM_EXPORT ResourceLoadRequest
-		{
-			WString filePath;
-			HResource resource;
-		};
-
-		struct CM_EXPORT ResourceLoadResponse
-		{
-			ResourcePtr rawResource;
-		};
-
-		struct CM_EXPORT ResourceAsyncOp
-		{
-			HResource resource;
-			WorkQueue::RequestID requestID;
-		};
-
-		typedef std::shared_ptr<ResourceLoadRequest> ResourceLoadRequestPtr;
-		typedef std::shared_ptr<ResourceLoadResponse> ResourceLoadResponsePtr;
-
 	public:
 	public:
 		/**
 		/**
 		 * @brief	Constructor.
 		 * @brief	Constructor.
@@ -162,20 +128,13 @@ namespace BansheeEngine
 		CM_MUTEX(mInProgressResourcesMutex);
 		CM_MUTEX(mInProgressResourcesMutex);
 		CM_MUTEX(mLoadedResourceMutex);
 		CM_MUTEX(mLoadedResourceMutex);
 
 
-		ResourceRequestHandler* mRequestHandler;
-		ResourceResponseHandler* mResponseHandler;
-
-		WorkQueue* mWorkQueue;
-		UINT16 mWorkQueueChannel;
-
 		UnorderedMap<String, HResource>::type mLoadedResources; 
 		UnorderedMap<String, HResource>::type mLoadedResources; 
-		UnorderedMap<String, ResourceAsyncOp>::type mInProgressResources; // Resources that are being asynchronously loaded
+		UnorderedMap<String, HResource>::type mInProgressResources; // Resources that are being asynchronously loaded
 
 
 		HResource loadInternal(const WString& filePath, bool synchronous); 
 		HResource loadInternal(const WString& filePath, bool synchronous); 
 		ResourcePtr loadFromDiskAndDeserialize(const WString& filePath);
 		ResourcePtr loadFromDiskAndDeserialize(const WString& filePath);
 
 
-		void notifyResourceLoadingFinished(HResource& handle);
-		void notifyNewResourceLoaded(HResource& handle);
+		void loadCallback(const WString& filePath, HResource& resource);
 	};
 	};
 
 
 	CM_EXPORT Resources& gResources();
 	CM_EXPORT Resources& gResources();

+ 10 - 2
CamelotCore/Source/CmApplication.cpp

@@ -34,6 +34,7 @@
 #include "CmQueryManager.h"
 #include "CmQueryManager.h"
 #include "BsThreadPool.h"
 #include "BsThreadPool.h"
 #include "BsThreadPolicy.h"
 #include "BsThreadPolicy.h"
+#include "BsTaskScheduler.h"
 
 
 #include "CmMaterial.h"
 #include "CmMaterial.h"
 #include "CmShader.h"
 #include "CmShader.h"
@@ -59,11 +60,13 @@ namespace BansheeEngine
 
 
 		Profiler::startUp(cm_new<Profiler>());
 		Profiler::startUp(cm_new<Profiler>());
 		ThreadPool::startUp(cm_new<TThreadPool<ThreadBansheePolicy>>(numWorkerThreads));
 		ThreadPool::startUp(cm_new<TThreadPool<ThreadBansheePolicy>>(numWorkerThreads));
+		TaskScheduler::startUp(cm_new<TaskScheduler>());
+		TaskScheduler::instance().removeWorker();
 		StringTable::startUp(cm_new<StringTable>());
 		StringTable::startUp(cm_new<StringTable>());
 		DeferredCallManager::startUp(cm_new<DeferredCallManager>());
 		DeferredCallManager::startUp(cm_new<DeferredCallManager>());
 		Time::startUp(cm_new<Time>());
 		Time::startUp(cm_new<Time>());
 		DynLibManager::startUp(cm_new<DynLibManager>());
 		DynLibManager::startUp(cm_new<DynLibManager>());
-		CoreGpuObjectManager::startUp(cm_new<CoreGpuObjectManager>());
+		CoreObjectManager::startUp(cm_new<CoreObjectManager>());
 		GameObjectManager::startUp(cm_new<GameObjectManager>());
 		GameObjectManager::startUp(cm_new<GameObjectManager>());
 		Resources::startUp(cm_new<Resources>());
 		Resources::startUp(cm_new<Resources>());
 		HighLevelGpuProgramManager::startUp(cm_new<HighLevelGpuProgramManager>());
 		HighLevelGpuProgramManager::startUp(cm_new<HighLevelGpuProgramManager>());
@@ -125,7 +128,11 @@ namespace BansheeEngine
 				CM_LOCK_MUTEX_NAMED(mFrameRenderingFinishedMutex, lock);
 				CM_LOCK_MUTEX_NAMED(mFrameRenderingFinishedMutex, lock);
 
 
 				while(!mIsFrameRenderingFinished)
 				while(!mIsFrameRenderingFinished)
+				{
+					TaskScheduler::instance().addWorker();
 					CM_THREAD_WAIT(mFrameRenderingFinishedCondition, mFrameRenderingFinishedMutex, lock);
 					CM_THREAD_WAIT(mFrameRenderingFinishedCondition, mFrameRenderingFinishedMutex, lock);
+					TaskScheduler::instance().removeWorker();
+				}
 
 
 				mIsFrameRenderingFinished = false;
 				mIsFrameRenderingFinished = false;
 			}
 			}
@@ -187,12 +194,13 @@ namespace BansheeEngine
 		HighLevelGpuProgramManager::shutDown();
 		HighLevelGpuProgramManager::shutDown();
 		Resources::shutDown();
 		Resources::shutDown();
 		GameObjectManager::shutDown();
 		GameObjectManager::shutDown();
-		CoreGpuObjectManager::shutDown(); // Must shut down before DynLibManager to ensure all objects are destroyed before unloading their libraries
+		CoreObjectManager::shutDown(); // Must shut down before DynLibManager to ensure all objects are destroyed before unloading their libraries
 		DynLibManager::shutDown();
 		DynLibManager::shutDown();
 		Time::shutDown();
 		Time::shutDown();
 		DeferredCallManager::shutDown();
 		DeferredCallManager::shutDown();
 		StringTable::shutDown();
 		StringTable::shutDown();
 
 
+		TaskScheduler::shutDown();
 		ThreadPool::shutDown();
 		ThreadPool::shutDown();
 		Profiler::shutDown();
 		Profiler::shutDown();
 		MemStack::endThread();
 		MemStack::endThread();

+ 8 - 8
CamelotCore/Source/CmCoreObject.cpp

@@ -14,8 +14,8 @@ namespace BansheeEngine
 	CoreObject::CoreObject(bool initializeOnRenderThread)
 	CoreObject::CoreObject(bool initializeOnRenderThread)
 		: mFlags(0), mInternalID(0)
 		: mFlags(0), mInternalID(0)
 	{
 	{
-		mInternalID = CoreGpuObjectManager::instance().registerObject(this);
-		mFlags = initializeOnRenderThread ? mFlags | CGO_INIT_ON_RENDER_THREAD : mFlags;
+		mInternalID = CoreObjectManager::instance().registerObject(this);
+		mFlags = initializeOnRenderThread ? mFlags | CGO_INIT_ON_CORE_THREAD : mFlags;
 	}
 	}
 
 
 	CoreObject::~CoreObject() 
 	CoreObject::~CoreObject() 
@@ -35,12 +35,12 @@ namespace BansheeEngine
 		}
 		}
 #endif
 #endif
 
 
-		CoreGpuObjectManager::instance().unregisterObject(this);
+		CoreObjectManager::instance().unregisterObject(this);
 	}
 	}
 
 
 	void CoreObject::destroy()
 	void CoreObject::destroy()
 	{
 	{
-		if(requiresInitOnRenderThread())
+		if(requiresInitOnCoreThread())
 		{
 		{
 			setScheduledToBeDeleted(true);
 			setScheduledToBeDeleted(true);
 
 
@@ -72,7 +72,7 @@ namespace BansheeEngine
 			CM_EXCEPT(InternalErrorException, "Trying to initialize an object that is already initialized.");
 			CM_EXCEPT(InternalErrorException, "Trying to initialize an object that is already initialized.");
 #endif
 #endif
 
 
-		if(requiresInitOnRenderThread())
+		if(requiresInitOnCoreThread())
 		{
 		{
 			setScheduledToBeInitialized(true);
 			setScheduledToBeInitialized(true);
 
 
@@ -89,7 +89,7 @@ namespace BansheeEngine
 
 
 	void CoreObject::initialize_internal()
 	void CoreObject::initialize_internal()
 	{
 	{
-		if(requiresInitOnRenderThread())
+		if(requiresInitOnCoreThread())
 		{
 		{
 			{
 			{
 				CM_LOCK_MUTEX(mCoreGpuObjectLoadedMutex);
 				CM_LOCK_MUTEX(mCoreGpuObjectLoadedMutex);
@@ -110,7 +110,7 @@ namespace BansheeEngine
 	{
 	{
 		if(!isInitialized())
 		if(!isInitialized())
 		{
 		{
-			if(requiresInitOnRenderThread())
+			if(requiresInitOnCoreThread())
 			{
 			{
 #if CM_DEBUG_MODE
 #if CM_DEBUG_MODE
 				if(CM_THREAD_CURRENT_ID == CoreThread::instance().getCoreThreadId())
 				if(CM_THREAD_CURRENT_ID == CoreThread::instance().getCoreThreadId())
@@ -133,7 +133,7 @@ namespace BansheeEngine
 		}
 		}
 	}
 	}
 
 
-	void CoreObject::setThisPtr(std::shared_ptr<CoreObject> ptrThis)
+	void CoreObject::_setThisPtr(std::shared_ptr<CoreObject> ptrThis)
 	{
 	{
 		mThis = ptrThis;
 		mThis = ptrThis;
 	}
 	}

+ 4 - 4
CamelotCore/Source/CmCoreObjectManager.cpp

@@ -4,13 +4,13 @@
 
 
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
-	CoreGpuObjectManager::CoreGpuObjectManager()
+	CoreObjectManager::CoreObjectManager()
 		:mNextAvailableID(1)
 		:mNextAvailableID(1)
 	{
 	{
 
 
 	}
 	}
 
 
-	CoreGpuObjectManager::~CoreGpuObjectManager()
+	CoreObjectManager::~CoreObjectManager()
 	{
 	{
 		CM_LOCK_MUTEX(mObjectsMutex);
 		CM_LOCK_MUTEX(mObjectsMutex);
 
 
@@ -25,7 +25,7 @@ namespace BansheeEngine
 		}
 		}
 	}
 	}
 
 
-	UINT64 CoreGpuObjectManager::registerObject(CoreObject* object)
+	UINT64 CoreObjectManager::registerObject(CoreObject* object)
 	{
 	{
 		assert(object != nullptr);
 		assert(object != nullptr);
 
 
@@ -36,7 +36,7 @@ namespace BansheeEngine
 		return mNextAvailableID++;
 		return mNextAvailableID++;
 	}
 	}
 
 
-	void CoreGpuObjectManager::unregisterObject(CoreObject* object)
+	void CoreObjectManager::unregisterObject(CoreObject* object)
 	{
 	{
 		assert(object != nullptr);
 		assert(object != nullptr);
 
 

+ 6 - 0
CamelotCore/Source/CmCoreThread.cpp

@@ -1,5 +1,6 @@
 #include "CmCoreThread.h"
 #include "CmCoreThread.h"
 #include "BsThreadPool.h"
 #include "BsThreadPool.h"
+#include "BsTaskScheduler.h"
 
 
 using namespace std::placeholders;
 using namespace std::placeholders;
 
 
@@ -63,6 +64,8 @@ namespace BansheeEngine
 	void CoreThread::runCoreThread()
 	void CoreThread::runCoreThread()
 	{
 	{
 #if !CM_FORCE_SINGLETHREADED_RENDERING
 #if !CM_FORCE_SINGLETHREADED_RENDERING
+		TaskScheduler::instance().removeWorker(); // One less worker because we are reserving one core for this thread
+
 		mCoreThreadId = CM_THREAD_CURRENT_ID;
 		mCoreThreadId = CM_THREAD_CURRENT_ID;
 		mSyncedCoreAccessor = cm_new<CoreThreadAccessor<CommandQueueSync>>(CM_THREAD_CURRENT_ID);
 		mSyncedCoreAccessor = cm_new<CoreThreadAccessor<CommandQueueSync>>(CM_THREAD_CURRENT_ID);
 
 
@@ -78,10 +81,13 @@ namespace BansheeEngine
 					if(mCoreThreadShutdown)
 					if(mCoreThreadShutdown)
 					{
 					{
 						cm_delete(mSyncedCoreAccessor);
 						cm_delete(mSyncedCoreAccessor);
+						TaskScheduler::instance().addWorker();
 						return;
 						return;
 					}
 					}
 
 
+					TaskScheduler::instance().addWorker(); // Do something else while we wait, otherwise this core will be unused
 					CM_THREAD_WAIT(mCommandReadyCondition, mCommandQueueMutex, lock);
 					CM_THREAD_WAIT(mCommandReadyCondition, mCommandQueueMutex, lock);
+					TaskScheduler::instance().removeWorker();
 				}
 				}
 
 
 				commands = mCommandQueue->flush();
 				commands = mCommandQueue->flush();

+ 2 - 2
CamelotCore/Source/CmFontManager.cpp

@@ -6,7 +6,7 @@ namespace BansheeEngine
 	FontPtr FontManager::create(const Vector<FontData>::type& fontData) const
 	FontPtr FontManager::create(const Vector<FontData>::type& fontData) const
 	{
 	{
 		FontPtr newFont = cm_core_ptr<Font, PoolAlloc>(new (cm_alloc<Font, PoolAlloc>()) Font());
 		FontPtr newFont = cm_core_ptr<Font, PoolAlloc>(new (cm_alloc<Font, PoolAlloc>()) Font());
-		newFont->setThisPtr(newFont);
+		newFont->_setThisPtr(newFont);
 		newFont->initialize(fontData);
 		newFont->initialize(fontData);
 
 
 		return newFont;
 		return newFont;
@@ -15,7 +15,7 @@ namespace BansheeEngine
 	FontPtr FontManager::createEmpty() const
 	FontPtr FontManager::createEmpty() const
 	{
 	{
 		FontPtr newFont = cm_core_ptr<Font, PoolAlloc>(new (cm_alloc<Font, PoolAlloc>()) Font());
 		FontPtr newFont = cm_core_ptr<Font, PoolAlloc>(new (cm_alloc<Font, PoolAlloc>()) Font());
-		newFont->setThisPtr(newFont);
+		newFont->_setThisPtr(newFont);
 
 
 		return newFont;
 		return newFont;
 	}
 	}

+ 1 - 1
CamelotCore/Source/CmGpuProgInclude.cpp

@@ -18,7 +18,7 @@ namespace BansheeEngine
 	{
 	{
 		GpuProgIncludePtr gpuProgIncludePtr = cm_core_ptr<GpuProgInclude, PoolAlloc>(
 		GpuProgIncludePtr gpuProgIncludePtr = cm_core_ptr<GpuProgInclude, PoolAlloc>(
 			new (cm_alloc<GpuProgInclude, PoolAlloc>()) GpuProgInclude(includeString));
 			new (cm_alloc<GpuProgInclude, PoolAlloc>()) GpuProgInclude(includeString));
-		gpuProgIncludePtr->setThisPtr(gpuProgIncludePtr);
+		gpuProgIncludePtr->_setThisPtr(gpuProgIncludePtr);
 		gpuProgIncludePtr->initialize();
 		gpuProgIncludePtr->initialize();
 
 
 		return gpuProgIncludePtr;
 		return gpuProgIncludePtr;

+ 1 - 1
CamelotCore/Source/CmGpuProgramManager.cpp

@@ -44,7 +44,7 @@ namespace BansheeEngine {
 	GpuProgramPtr GpuProgramManager::createProgram(const String& source, const String& entryPoint, const String& language, GpuProgramType gptype, GpuProgramProfile profile)
 	GpuProgramPtr GpuProgramManager::createProgram(const String& source, const String& entryPoint, const String& language, GpuProgramType gptype, GpuProgramProfile profile)
     {
     {
 		GpuProgramPtr prg = create(source, entryPoint, language, gptype, profile);
 		GpuProgramPtr prg = create(source, entryPoint, language, gptype, profile);
-		prg->setThisPtr(prg);
+		prg->_setThisPtr(prg);
 
 
 		// TODO: Gpu programs get initialized by their parent HighLevelGpuProgram. I might handle that more intuitively later but
 		// TODO: Gpu programs get initialized by their parent HighLevelGpuProgram. I might handle that more intuitively later but
 		// it works just fine as it is
 		// it works just fine as it is

+ 5 - 5
CamelotCore/Source/CmHardwareBufferManager.cpp

@@ -46,7 +46,7 @@ namespace BansheeEngine {
     VertexDeclarationPtr HardwareBufferManager::createVertexDeclaration(void)
     VertexDeclarationPtr HardwareBufferManager::createVertexDeclaration(void)
     {
     {
         VertexDeclarationPtr decl = createVertexDeclarationImpl();
         VertexDeclarationPtr decl = createVertexDeclarationImpl();
-		decl->setThisPtr(decl);
+		decl->_setThisPtr(decl);
 		decl->initialize();
 		decl->initialize();
         return decl;
         return decl;
     }
     }
@@ -56,7 +56,7 @@ namespace BansheeEngine {
 		assert (numVerts > 0);
 		assert (numVerts > 0);
 
 
 		VertexBufferPtr vbuf = createVertexBufferImpl(vertexSize, numVerts, usage, streamOut);
 		VertexBufferPtr vbuf = createVertexBufferImpl(vertexSize, numVerts, usage, streamOut);
-		vbuf->setThisPtr(vbuf);
+		vbuf->_setThisPtr(vbuf);
 		vbuf->initialize();
 		vbuf->initialize();
 		return vbuf;
 		return vbuf;
 	}
 	}
@@ -66,7 +66,7 @@ namespace BansheeEngine {
 		assert (numIndexes > 0);
 		assert (numIndexes > 0);
 
 
 		IndexBufferPtr ibuf = createIndexBufferImpl(itype, numIndexes, usage);
 		IndexBufferPtr ibuf = createIndexBufferImpl(itype, numIndexes, usage);
-		ibuf->setThisPtr(ibuf);
+		ibuf->_setThisPtr(ibuf);
 		ibuf->initialize();
 		ibuf->initialize();
 		return ibuf;
 		return ibuf;
 
 
@@ -75,7 +75,7 @@ namespace BansheeEngine {
 	GpuParamBlockBufferPtr HardwareBufferManager::createGpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage)
 	GpuParamBlockBufferPtr HardwareBufferManager::createGpuParamBlockBuffer(UINT32 size, GpuParamBlockUsage usage)
 	{
 	{
 		GpuParamBlockBufferPtr paramBlockPtr = createGpuParamBlockBufferImpl();
 		GpuParamBlockBufferPtr paramBlockPtr = createGpuParamBlockBufferImpl();
-		paramBlockPtr->setThisPtr(paramBlockPtr);
+		paramBlockPtr->_setThisPtr(paramBlockPtr);
 		paramBlockPtr->initialize(size, usage);
 		paramBlockPtr->initialize(size, usage);
 
 
 		return paramBlockPtr;
 		return paramBlockPtr;
@@ -85,7 +85,7 @@ namespace BansheeEngine {
 		GpuBufferType type, GpuBufferUsage usage, bool randomGpuWrite, bool useCounter)
 		GpuBufferType type, GpuBufferUsage usage, bool randomGpuWrite, bool useCounter)
 	{
 	{
 		GpuBufferPtr gbuf = createGpuBufferImpl(elementCount, elementSize, type, usage, randomGpuWrite, useCounter);
 		GpuBufferPtr gbuf = createGpuBufferImpl(elementCount, elementSize, type, usage, randomGpuWrite, useCounter);
-		gbuf->setThisPtr(gbuf);
+		gbuf->_setThisPtr(gbuf);
 		gbuf->initialize();
 		gbuf->initialize();
 		return gbuf;
 		return gbuf;
 	}
 	}

+ 3 - 3
CamelotCore/Source/CmHighLevelGpuProgramManager.cpp

@@ -141,7 +141,7 @@ namespace BansheeEngine {
     {
     {
 		HighLevelGpuProgramFactory* factory = getFactory(language);
 		HighLevelGpuProgramFactory* factory = getFactory(language);
         HighLevelGpuProgramPtr ret = factory->create(source, entryPoint, gptype, profile, includes);
         HighLevelGpuProgramPtr ret = factory->create(source, entryPoint, gptype, profile, includes);
-		ret->setThisPtr(ret);
+		ret->_setThisPtr(ret);
 		ret->initialize();
 		ret->initialize();
 
 
         return ret;
         return ret;
@@ -151,7 +151,7 @@ namespace BansheeEngine {
 	{
 	{
 		HighLevelGpuProgramFactory* factory = getFactory(language);
 		HighLevelGpuProgramFactory* factory = getFactory(language);
 		HighLevelGpuProgramPtr ret = factory->create();
 		HighLevelGpuProgramPtr ret = factory->create();
-		ret->setThisPtr(ret);
+		ret->_setThisPtr(ret);
 		ret->initialize();
 		ret->initialize();
 
 
 		return ret;
 		return ret;
@@ -161,7 +161,7 @@ namespace BansheeEngine {
 	{
 	{
 		HighLevelGpuProgramFactory* factory = getFactory(language);
 		HighLevelGpuProgramFactory* factory = getFactory(language);
 		HighLevelGpuProgramPtr ret = factory->create();
 		HighLevelGpuProgramPtr ret = factory->create();
-		ret->setThisPtr(ret);
+		ret->_setThisPtr(ret);
 
 
 		return ret;
 		return ret;
 	}
 	}

+ 2 - 2
CamelotCore/Source/CmMeshHeap.cpp

@@ -36,7 +36,7 @@ namespace BansheeEngine
 		MeshHeap* meshHeap = new (cm_alloc<MeshHeap>()) MeshHeap(numVertices, numIndices, vertexDesc, indexType); 
 		MeshHeap* meshHeap = new (cm_alloc<MeshHeap>()) MeshHeap(numVertices, numIndices, vertexDesc, indexType); 
 		MeshHeapPtr meshHeapPtr = cm_core_ptr<MeshHeap, GenAlloc>(meshHeap);
 		MeshHeapPtr meshHeapPtr = cm_core_ptr<MeshHeap, GenAlloc>(meshHeap);
 
 
-		meshHeapPtr->setThisPtr(meshHeapPtr);
+		meshHeapPtr->_setThisPtr(meshHeapPtr);
 		meshHeapPtr->initialize();
 		meshHeapPtr->initialize();
 
 
 		return meshHeapPtr;
 		return meshHeapPtr;
@@ -71,7 +71,7 @@ namespace BansheeEngine
 		TransientMesh* transientMesh = new (cm_alloc<TransientMesh>()) TransientMesh(thisPtr, meshIdx, meshData->getNumVertices(), meshData->getNumIndices(), drawOp); 
 		TransientMesh* transientMesh = new (cm_alloc<TransientMesh>()) TransientMesh(thisPtr, meshIdx, meshData->getNumVertices(), meshData->getNumIndices(), drawOp); 
 		TransientMeshPtr transientMeshPtr = cm_core_ptr<TransientMesh, GenAlloc>(transientMesh);
 		TransientMeshPtr transientMeshPtr = cm_core_ptr<TransientMesh, GenAlloc>(transientMesh);
 
 
-		transientMeshPtr->setThisPtr(transientMeshPtr);
+		transientMeshPtr->_setThisPtr(transientMeshPtr);
 		transientMeshPtr->initialize();
 		transientMeshPtr->initialize();
 
 
 		mMeshes[meshIdx] = transientMeshPtr;
 		mMeshes[meshIdx] = transientMeshPtr;

+ 4 - 4
CamelotCore/Source/CmMeshManager.cpp

@@ -22,7 +22,7 @@ namespace BansheeEngine
 	{
 	{
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) 
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) 
 			Mesh(numVertices, numIndices, vertexDesc, bufferType, drawOp, indexType));
 			Mesh(numVertices, numIndices, vertexDesc, bufferType, drawOp, indexType));
-		mesh->setThisPtr(mesh);
+		mesh->_setThisPtr(mesh);
 		mesh->initialize();
 		mesh->initialize();
 
 
 		return mesh;
 		return mesh;
@@ -33,7 +33,7 @@ namespace BansheeEngine
 	{
 	{
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) 
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) 
 			Mesh(numVertices, numIndices, vertexDesc, initialData, bufferType, drawOp, indexType));
 			Mesh(numVertices, numIndices, vertexDesc, initialData, bufferType, drawOp, indexType));
-		mesh->setThisPtr(mesh);
+		mesh->_setThisPtr(mesh);
 		mesh->initialize();
 		mesh->initialize();
 
 
 		return mesh;
 		return mesh;
@@ -42,7 +42,7 @@ namespace BansheeEngine
 	MeshPtr MeshManager::create(const MeshDataPtr& initialData, MeshBufferType bufferType, DrawOperationType drawOp)
 	MeshPtr MeshManager::create(const MeshDataPtr& initialData, MeshBufferType bufferType, DrawOperationType drawOp)
 	{
 	{
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) Mesh(initialData, bufferType, drawOp));
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) Mesh(initialData, bufferType, drawOp));
-		mesh->setThisPtr(mesh);
+		mesh->_setThisPtr(mesh);
 		mesh->initialize();
 		mesh->initialize();
 
 
 		return mesh;
 		return mesh;
@@ -51,7 +51,7 @@ namespace BansheeEngine
 	MeshPtr MeshManager::createEmpty()
 	MeshPtr MeshManager::createEmpty()
 	{
 	{
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) Mesh());
 		MeshPtr mesh = cm_core_ptr<Mesh, PoolAlloc>(new (cm_alloc<Mesh, PoolAlloc>()) Mesh());
-		mesh->setThisPtr(mesh);
+		mesh->_setThisPtr(mesh);
 
 
 		return mesh;
 		return mesh;
 	}
 	}

+ 8 - 8
CamelotCore/Source/CmRenderStateManager.cpp

@@ -9,7 +9,7 @@ namespace BansheeEngine
 	SamplerStatePtr RenderStateManager::createSamplerState(const SAMPLER_STATE_DESC& desc) const
 	SamplerStatePtr RenderStateManager::createSamplerState(const SAMPLER_STATE_DESC& desc) const
 	{
 	{
 		SamplerStatePtr samplerState = createSamplerStateImpl();
 		SamplerStatePtr samplerState = createSamplerStateImpl();
-		samplerState->setThisPtr(samplerState);
+		samplerState->_setThisPtr(samplerState);
 		samplerState->initialize(desc);
 		samplerState->initialize(desc);
 
 
 		return samplerState;
 		return samplerState;
@@ -18,7 +18,7 @@ namespace BansheeEngine
 	DepthStencilStatePtr RenderStateManager::createDepthStencilState(const DEPTH_STENCIL_STATE_DESC& desc) const
 	DepthStencilStatePtr RenderStateManager::createDepthStencilState(const DEPTH_STENCIL_STATE_DESC& desc) const
 	{
 	{
 		DepthStencilStatePtr depthStencilState = createDepthStencilStateImpl();
 		DepthStencilStatePtr depthStencilState = createDepthStencilStateImpl();
-		depthStencilState->setThisPtr(depthStencilState);
+		depthStencilState->_setThisPtr(depthStencilState);
 		depthStencilState->initialize(desc);
 		depthStencilState->initialize(desc);
 
 
 		return depthStencilState;
 		return depthStencilState;
@@ -27,7 +27,7 @@ namespace BansheeEngine
 	RasterizerStatePtr RenderStateManager::createRasterizerState(const RASTERIZER_STATE_DESC& desc) const
 	RasterizerStatePtr RenderStateManager::createRasterizerState(const RASTERIZER_STATE_DESC& desc) const
 	{
 	{
 		RasterizerStatePtr rasterizerState = createRasterizerStateImpl();
 		RasterizerStatePtr rasterizerState = createRasterizerStateImpl();
-		rasterizerState->setThisPtr(rasterizerState);
+		rasterizerState->_setThisPtr(rasterizerState);
 		rasterizerState->initialize(desc);
 		rasterizerState->initialize(desc);
 
 
 		return rasterizerState;
 		return rasterizerState;
@@ -36,7 +36,7 @@ namespace BansheeEngine
 	BlendStatePtr RenderStateManager::createBlendState(const BLEND_STATE_DESC& desc) const
 	BlendStatePtr RenderStateManager::createBlendState(const BLEND_STATE_DESC& desc) const
 	{
 	{
 		BlendStatePtr blendState = createBlendStateImpl();
 		BlendStatePtr blendState = createBlendStateImpl();
-		blendState->setThisPtr(blendState);
+		blendState->_setThisPtr(blendState);
 		blendState->initialize(desc);
 		blendState->initialize(desc);
 
 
 		return blendState;
 		return blendState;
@@ -45,7 +45,7 @@ namespace BansheeEngine
 	SamplerStatePtr RenderStateManager::createEmptySamplerState() const
 	SamplerStatePtr RenderStateManager::createEmptySamplerState() const
 	{
 	{
 		SamplerStatePtr samplerState = createSamplerStateImpl();
 		SamplerStatePtr samplerState = createSamplerStateImpl();
-		samplerState->setThisPtr(samplerState);
+		samplerState->_setThisPtr(samplerState);
 
 
 		return samplerState;
 		return samplerState;
 	}
 	}
@@ -53,7 +53,7 @@ namespace BansheeEngine
 	DepthStencilStatePtr RenderStateManager::createEmptyDepthStencilState() const
 	DepthStencilStatePtr RenderStateManager::createEmptyDepthStencilState() const
 	{
 	{
 		DepthStencilStatePtr depthStencilState = createDepthStencilStateImpl();
 		DepthStencilStatePtr depthStencilState = createDepthStencilStateImpl();
-		depthStencilState->setThisPtr(depthStencilState);
+		depthStencilState->_setThisPtr(depthStencilState);
 
 
 		return depthStencilState;
 		return depthStencilState;
 	}
 	}
@@ -61,7 +61,7 @@ namespace BansheeEngine
 	RasterizerStatePtr RenderStateManager::createEmptyRasterizerState() const
 	RasterizerStatePtr RenderStateManager::createEmptyRasterizerState() const
 	{
 	{
 		RasterizerStatePtr rasterizerState = createRasterizerStateImpl();
 		RasterizerStatePtr rasterizerState = createRasterizerStateImpl();
-		rasterizerState->setThisPtr(rasterizerState);
+		rasterizerState->_setThisPtr(rasterizerState);
 
 
 		return rasterizerState;
 		return rasterizerState;
 	}
 	}
@@ -69,7 +69,7 @@ namespace BansheeEngine
 	BlendStatePtr RenderStateManager::createEmptyBlendState() const
 	BlendStatePtr RenderStateManager::createEmptyBlendState() const
 	{
 	{
 		BlendStatePtr blendState = createBlendStateImpl();
 		BlendStatePtr blendState = createBlendStateImpl();
-		blendState->setThisPtr(blendState);
+		blendState->_setThisPtr(blendState);
 
 
 		return blendState;
 		return blendState;
 	}
 	}

+ 1 - 1
CamelotCore/Source/CmRenderWindowManager.cpp

@@ -16,7 +16,7 @@ namespace BansheeEngine
 	RenderWindowPtr RenderWindowManager::create(RENDER_WINDOW_DESC& desc, RenderWindowPtr parentWindow)
 	RenderWindowPtr RenderWindowManager::create(RENDER_WINDOW_DESC& desc, RenderWindowPtr parentWindow)
 	{
 	{
 		RenderWindowPtr renderWindow = createImpl(desc, parentWindow);
 		RenderWindowPtr renderWindow = createImpl(desc, parentWindow);
-		renderWindow->setThisPtr(renderWindow);
+		renderWindow->_setThisPtr(renderWindow);
 		renderWindow->initialize();
 		renderWindow->initialize();
 
 
 		{
 		{

+ 27 - 97
CamelotCore/Source/CmResources.cpp

@@ -4,98 +4,22 @@
 #include "CmException.h"
 #include "CmException.h"
 #include "CmFileSerializer.h"
 #include "CmFileSerializer.h"
 #include "CmFileSystem.h"
 #include "CmFileSystem.h"
+#include "BsTaskScheduler.h"
 #include "CmUUID.h"
 #include "CmUUID.h"
 #include "CmPath.h"
 #include "CmPath.h"
 #include "CmDebug.h"
 #include "CmDebug.h"
 
 
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
-	bool Resources::ResourceRequestHandler::canHandleRequest(const WorkQueue::Request* req, const WorkQueue* srcQ)
-	{
-		return true;
-	}
-
-	WorkQueue::Response* Resources::ResourceRequestHandler::handleRequest(WorkQueue::Request* req, const WorkQueue* srcQ)
-	{
-		ResourceLoadRequestPtr resRequest = boost::any_cast<ResourceLoadRequestPtr>(req->getData());
-
-		ResourceLoadResponsePtr resResponse = cm_shared_ptr<Resources::ResourceLoadResponse, ScratchAlloc>();
-		resResponse->rawResource = gResources().loadFromDiskAndDeserialize(resRequest->filePath);
-
-		return cm_new<WorkQueue::Response, ScratchAlloc>(req, true, resResponse);
-	}
-
-	bool Resources::ResourceResponseHandler::canHandleResponse(const WorkQueue::Response* res, const WorkQueue* srcQ)
-	{
-		return true;
-	}
-
-	void Resources::ResourceResponseHandler::handleResponse(const WorkQueue::Response* res, const WorkQueue* srcQ)
-	{
-		ResourceLoadRequestPtr resRequest = boost::any_cast<ResourceLoadRequestPtr>(res->getRequest()->getData());
-
-		if(res->getRequest()->getAborted())
-			return;
-
-		gResources().notifyResourceLoadingFinished(resRequest->resource);
-
-		if(res->succeeded())
-		{
-			ResourceLoadResponsePtr resResponse = boost::any_cast<ResourceLoadResponsePtr>(res->getData());
-			
-			// This should be thread safe without any sync primitives, if other threads read a few cycles out of date value
-			// and think this resource isn't created when it really is, it hardly makes any difference
-			resRequest->resource._setHandleData(resResponse->rawResource, resRequest->resource.getUUID());
-
-			gResources().notifyNewResourceLoaded(resRequest->resource);
-		}
-		else
-		{
-			gDebug().logWarning("Resource load request failed.");
-		}
-	}
-
 	Resources::Resources()
 	Resources::Resources()
-		:mRequestHandler(nullptr), mResponseHandler(nullptr), mWorkQueue(nullptr)
 	{
 	{
 		mDefaultResourceManifest = ResourceManifest::create("Default");
 		mDefaultResourceManifest = ResourceManifest::create("Default");
 		mResourceManifests.push_back(mDefaultResourceManifest);
 		mResourceManifests.push_back(mDefaultResourceManifest);
-
-		mWorkQueue = cm_new<WorkQueue>();
-		mWorkQueueChannel = mWorkQueue->getChannel("Resources");
-		mRequestHandler = cm_new<ResourceRequestHandler>();
-		mResponseHandler = cm_new<ResourceResponseHandler>();
-
-		mWorkQueue->addRequestHandler(mWorkQueueChannel, mRequestHandler);
-		mWorkQueue->addResponseHandler(mWorkQueueChannel, mResponseHandler);
-
-		// TODO Low priority - I might want to make this more global so other classes can use it
-#if CM_THREAD_SUPPORT
-		mWorkQueue->setWorkerThreadCount(CM_THREAD_HARDWARE_CONCURRENCY);
-#endif
-		mWorkQueue->startup();
 	}
 	}
 
 
 	Resources::~Resources()
 	Resources::~Resources()
 	{
 	{
-		if(mWorkQueue)
-		{
-			if(mRequestHandler != nullptr)
-				mWorkQueue->removeRequestHandler(mWorkQueueChannel, mRequestHandler);
-
-			if(mResponseHandler != nullptr)
-				mWorkQueue->removeResponseHandler(mWorkQueueChannel, mResponseHandler);
-
-			mWorkQueue->shutdown();
-
-			cm_delete(mWorkQueue);
-		}
 
 
-		if(mRequestHandler != nullptr)
-			cm_delete(mRequestHandler);
-
-		if(mResponseHandler != nullptr)
-			cm_delete(mResponseHandler);
 	}
 	}
 
 
 	HResource Resources::load(const WString& filePath)
 	HResource Resources::load(const WString& filePath)
@@ -189,7 +113,7 @@ namespace BansheeEngine
 			auto iterFind2 = mInProgressResources.find(uuid);
 			auto iterFind2 = mInProgressResources.find(uuid);
 			if(iterFind2 != mInProgressResources.end()) 
 			if(iterFind2 != mInProgressResources.end()) 
 			{
 			{
-				existingResource = iterFind2->second.resource;
+				existingResource = iterFind2->second;
 				resourceLoadingInProgress = true;
 				resourceLoadingInProgress = true;
 			}
 			}
 		}
 		}
@@ -215,21 +139,24 @@ namespace BansheeEngine
 
 
 		HResource newResource(uuid);
 		HResource newResource(uuid);
 
 
-		ResourceLoadRequestPtr resRequest = cm_shared_ptr<Resources::ResourceLoadRequest, ScratchAlloc>();
-		resRequest->filePath = filePath;
-		resRequest->resource = newResource;
-
-		WorkQueue::RequestID requestId = mWorkQueue->peekNextFreeRequestId();
-		ResourceAsyncOp newAsyncOp;
-		newAsyncOp.resource = newResource;
-		newAsyncOp.requestID = requestId;
-
 		{
 		{
 			CM_LOCK_MUTEX(mInProgressResourcesMutex);
 			CM_LOCK_MUTEX(mInProgressResourcesMutex);
-			mInProgressResources[uuid] = newAsyncOp;
+			mInProgressResources[uuid] = newResource;
+		}
+
+		if(synchronous)
+		{
+			loadCallback(filePath, newResource);
+		}
+		else
+		{
+			String fileName = toString(Path::getFilename(filePath));
+			String taskName = "Resource load: " + fileName;
+
+			TaskPtr task = Task::create(taskName, std::bind(&Resources::loadCallback, this, filePath, std::ref(newResource)));
+			TaskScheduler::instance().addTask(task);
 		}
 		}
 
 
-		mWorkQueue->addRequest(mWorkQueueChannel, resRequest, 0, synchronous);
 		return newResource;
 		return newResource;
 	}
 	}
 
 
@@ -359,18 +286,21 @@ namespace BansheeEngine
 		return false;
 		return false;
 	}
 	}
 
 
-	void Resources::notifyResourceLoadingFinished(HResource& handle)
+	void Resources::loadCallback(const WString& filePath, HResource& resource)
 	{
 	{
-		CM_LOCK_MUTEX(mInProgressResourcesMutex);
+		ResourcePtr rawResource = loadFromDiskAndDeserialize(filePath);
 
 
-		mInProgressResources.erase(handle.getUUID());
-	}
+		{
+			CM_LOCK_MUTEX(mInProgressResourcesMutex);
+			mInProgressResources.erase(resource.getUUID());
+		}
 
 
-	void Resources::notifyNewResourceLoaded(HResource& handle)
-	{
-		CM_LOCK_MUTEX(mLoadedResourceMutex);
+		resource._setHandleData(rawResource, resource.getUUID());
 
 
-		mLoadedResources[handle.getUUID()] = handle;
+		{
+			CM_LOCK_MUTEX(mLoadedResourceMutex);
+			mLoadedResources[resource.getUUID()] = resource;
+		}
 	}
 	}
 
 
 	CM_EXPORT Resources& gResources()
 	CM_EXPORT Resources& gResources()

+ 1 - 1
CamelotCore/Source/CmShader.cpp

@@ -207,7 +207,7 @@ namespace BansheeEngine
 	ShaderPtr Shader::create(const String& name)
 	ShaderPtr Shader::create(const String& name)
 	{
 	{
 		ShaderPtr newShader = cm_core_ptr<Shader, PoolAlloc>(new (cm_alloc<Shader, PoolAlloc>()) Shader(name));
 		ShaderPtr newShader = cm_core_ptr<Shader, PoolAlloc>(new (cm_alloc<Shader, PoolAlloc>()) Shader(name));
-		newShader->setThisPtr(newShader);
+		newShader->_setThisPtr(newShader);
 		newShader->initialize();
 		newShader->initialize();
 
 
 		return newShader;
 		return newShader;

+ 1 - 1
CamelotCore/Source/CmTexture.cpp

@@ -250,7 +250,7 @@ namespace BansheeEngine
 	TextureViewPtr Texture::createView()
 	TextureViewPtr Texture::createView()
 	{
 	{
 		TextureViewPtr viewPtr = cm_core_ptr<TextureView, PoolAlloc>(new (cm_alloc<TextureView, PoolAlloc>()) TextureView());
 		TextureViewPtr viewPtr = cm_core_ptr<TextureView, PoolAlloc>(new (cm_alloc<TextureView, PoolAlloc>()) TextureView());
-		viewPtr->setThisPtr(viewPtr);
+		viewPtr->_setThisPtr(viewPtr);
 
 
 		return viewPtr;
 		return viewPtr;
 	}
 	}

+ 4 - 4
CamelotCore/Source/CmTextureManager.cpp

@@ -68,7 +68,7 @@ namespace BansheeEngine
 		UINT32 fsaa, const String& fsaaHint)
 		UINT32 fsaa, const String& fsaaHint)
     {
     {
         TexturePtr ret = createTextureImpl();
         TexturePtr ret = createTextureImpl();
-		ret->setThisPtr(ret);
+		ret->_setThisPtr(ret);
 		ret->initialize(texType, width, height, depth, static_cast<size_t>(numMipmaps), format, usage, hwGamma, fsaa, fsaaHint);
 		ret->initialize(texType, width, height, depth, static_cast<size_t>(numMipmaps), format, usage, hwGamma, fsaa, fsaaHint);
 
 
 		return ret;
 		return ret;
@@ -77,7 +77,7 @@ namespace BansheeEngine
 	TexturePtr TextureManager::createEmpty()
 	TexturePtr TextureManager::createEmpty()
 	{
 	{
 		TexturePtr texture = createTextureImpl();
 		TexturePtr texture = createTextureImpl();
-		texture->setThisPtr(texture);
+		texture->_setThisPtr(texture);
 
 
 		return texture;
 		return texture;
 	}
 	}
@@ -113,7 +113,7 @@ namespace BansheeEngine
 	RenderTexturePtr TextureManager::createRenderTexture(const RENDER_TEXTURE_DESC& desc)
 	RenderTexturePtr TextureManager::createRenderTexture(const RENDER_TEXTURE_DESC& desc)
 	{
 	{
 		RenderTexturePtr newRT = createRenderTextureImpl();
 		RenderTexturePtr newRT = createRenderTextureImpl();
-		newRT->setThisPtr(newRT);
+		newRT->_setThisPtr(newRT);
 		newRT->initialize(desc);
 		newRT->initialize(desc);
 
 
 		return newRT;
 		return newRT;
@@ -122,7 +122,7 @@ namespace BansheeEngine
 	MultiRenderTexturePtr TextureManager::createEmptyMultiRenderTexture()
 	MultiRenderTexturePtr TextureManager::createEmptyMultiRenderTexture()
 	{
 	{
 		MultiRenderTexturePtr newRT = createMultiRenderTextureImpl();
 		MultiRenderTexturePtr newRT = createMultiRenderTextureImpl();
-		newRT->setThisPtr(newRT);
+		newRT->_setThisPtr(newRT);
 
 
 		return newRT;
 		return newRT;
 	}
 	}

+ 1 - 1
CamelotD3D11RenderSystem/Source/CmD3D11Texture.cpp

@@ -688,7 +688,7 @@ namespace BansheeEngine
 	TextureViewPtr D3D11Texture::createView()
 	TextureViewPtr D3D11Texture::createView()
 	{
 	{
 		TextureViewPtr viewPtr = cm_core_ptr<D3D11TextureView, PoolAlloc>(new (cm_alloc<D3D11TextureView, PoolAlloc>()) D3D11TextureView());
 		TextureViewPtr viewPtr = cm_core_ptr<D3D11TextureView, PoolAlloc>(new (cm_alloc<D3D11TextureView, PoolAlloc>()) D3D11TextureView());
-		viewPtr->setThisPtr(viewPtr);
+		viewPtr->_setThisPtr(viewPtr);
 
 
 		return viewPtr;
 		return viewPtr;
 	}
 	}

+ 0 - 2
CamelotUtility/CamelotUtility.vcxproj

@@ -265,7 +265,6 @@
     <ClCompile Include="Source\CmStringTable.cpp" />
     <ClCompile Include="Source\CmStringTable.cpp" />
     <ClCompile Include="Source\CmTexAtlasGenerator.cpp" />
     <ClCompile Include="Source\CmTexAtlasGenerator.cpp" />
     <ClCompile Include="Source\CmUUID.cpp" />
     <ClCompile Include="Source\CmUUID.cpp" />
-    <ClCompile Include="Source\CmWorkQueue.cpp" />
     <ClCompile Include="Source\Win32\CmTimer.cpp" />
     <ClCompile Include="Source\Win32\CmTimer.cpp" />
     <ClInclude Include="Include\BsTaskScheduler.h" />
     <ClInclude Include="Include\BsTaskScheduler.h" />
     <ClInclude Include="Include\BsThreadPool.h" />
     <ClInclude Include="Include\BsThreadPool.h" />
@@ -333,7 +332,6 @@
     <ClCompile Include="Source\CmIReflectable.cpp" />
     <ClCompile Include="Source\CmIReflectable.cpp" />
     <ClCompile Include="Source\CmRTTIField.cpp" />
     <ClCompile Include="Source\CmRTTIField.cpp" />
     <ClCompile Include="Source\CmRTTIType.cpp" />
     <ClCompile Include="Source\CmRTTIType.cpp" />
-    <ClInclude Include="Include\CmWorkQueue.h" />
     <ClInclude Include="Include\CmTexAtlasGenerator.h" />
     <ClInclude Include="Include\CmTexAtlasGenerator.h" />
     <ClCompile Include="Source\CmHString.cpp" />
     <ClCompile Include="Source\CmHString.cpp" />
   </ItemGroup>
   </ItemGroup>

+ 0 - 6
CamelotUtility/CamelotUtility.vcxproj.filters

@@ -171,9 +171,6 @@
     <ClInclude Include="Include\CmTime.h">
     <ClInclude Include="Include\CmTime.h">
       <Filter>Header Files</Filter>
       <Filter>Header Files</Filter>
     </ClInclude>
     </ClInclude>
-    <ClInclude Include="Include\CmWorkQueue.h">
-      <Filter>Header Files</Filter>
-    </ClInclude>
     <ClInclude Include="Include\CmAsyncOp.h">
     <ClInclude Include="Include\CmAsyncOp.h">
       <Filter>Header Files\Threading</Filter>
       <Filter>Header Files\Threading</Filter>
     </ClInclude>
     </ClInclude>
@@ -314,9 +311,6 @@
     <ClCompile Include="Source\CmUUID.cpp">
     <ClCompile Include="Source\CmUUID.cpp">
       <Filter>Source Files</Filter>
       <Filter>Source Files</Filter>
     </ClCompile>
     </ClCompile>
-    <ClCompile Include="Source\CmWorkQueue.cpp">
-      <Filter>Source Files</Filter>
-    </ClCompile>
     <ClCompile Include="Source\CmManagedDataBlock.cpp">
     <ClCompile Include="Source\CmManagedDataBlock.cpp">
       <Filter>Source Files</Filter>
       <Filter>Source Files</Filter>
     </ClCompile>
     </ClCompile>

+ 150 - 0
CamelotUtility/Include/BsTaskScheduler.h

@@ -0,0 +1,150 @@
+#pragma once
+
+#include "CmPrerequisitesUtil.h"
+#include "CmModule.h"
+
+namespace BansheeEngine
+{
+	/**
+	 * @brief	Task priority. Tasks with higher priority will get executed sooner.
+	 */
+	enum class TaskPriority
+	{
+		VeryLow = 98,
+		Low = 99,
+		Normal = 100,
+		High = 101,
+		VeryHigh = 102
+	};
+
+	/**
+	 * @brief	Represents a single task that may be queued in the TaskScheduler.
+	 * 			
+	 * @note	Thread safe.
+	 */
+	class CM_UTILITY_EXPORT Task
+	{
+		struct PrivatelyConstruct {};
+
+	public:
+		Task(const PrivatelyConstruct& dummy, const String& name, std::function<void()> taskWorker, 
+			TaskPriority priority, TaskPtr dependency);
+
+		/**
+		 * @brief	Creates a new task. Task should be provided to TaskScheduler in order for it
+		 * 			to start.
+		 *
+		 * @param	name		Name you can use to more easily identify the task.
+		 * @param	taskWorker	Worker method that does all of the work in the task.
+		 * @param	priority  	(optional) Higher priority means the tasks will be executed sooner.
+		 * @param	dependency	(optional) Task dependency if one exists. If provided the task will
+		 * 						not be executed until its dependency is complete.
+		 */
+		static TaskPtr create(const String& name, std::function<void()> taskWorker, TaskPriority priority = TaskPriority::Normal, 
+			TaskPtr dependency = nullptr);
+
+		/**
+		 * @brief	Returns true if the task has completed.
+		 */
+		bool isComplete() const;
+
+		/**
+		 * @brief	Returns true if the task has been canceled.
+		 */
+		bool isCanceled() const;
+
+		/**
+		 * @brief	Blocks the current thread until the task has completed. 
+		 * 			
+		 * @note	While waiting adds a new worker thread, so that the blocking threads core can be utilized.
+		 */
+		void wait();
+
+		/**
+		 * @brief	Cancels the task and removes it from the TaskSchedulers queue.
+		 */
+		void cancel();
+
+	private:
+		friend class TaskScheduler;
+
+		String mName;
+		TaskPriority mPriority;
+		UINT32 mTaskId;
+		std::function<void()> mTaskWorker;
+		TaskPtr mTaskDependency;
+		std::atomic<UINT32> mState; /**< 0 - Inactive, 1 - In progress, 2 - Completed, 3 - Canceled */
+
+		TaskScheduler* mParent;
+	};
+
+	/**
+	 * @brief	Represents a task scheduler running on multiple threads. You may queue
+	 * 			tasks on it from any thread and they will be executed in user specified order
+	 * 			on any available thread.
+	 * 			
+	 * @note	Thread safe.
+	 * 			
+	 *			This type of task scheduler uses a global queue and is best used for coarse granularity of tasks.
+	 *			(Number of tasks in the order of hundreds. Higher number of tasks might require different queuing and
+	 *			locking mechanism, potentially at the cost of flexibility.)
+	 *			
+	 *			By default the task scheduler will create as many threads as there are physical CPU cores. You may add or remove
+	 *			threads using addWorker/removeWorker methods.
+	 */
+	class CM_UTILITY_EXPORT TaskScheduler : public Module<TaskScheduler>
+	{
+	public:
+		TaskScheduler();
+		~TaskScheduler();
+
+		/**
+		 * @brief	Queues a new task.
+		 */
+		void addTask(const TaskPtr& task);
+
+		/**
+		 * @brief	Adds a new worker thread which will be used for executing queued tasks.
+		 */
+		void addWorker();
+
+		/**
+		 * @brief	Removes a worker thread (as soon as its current task is finished).
+		 */
+		void removeWorker();
+
+	protected:
+		friend class Task;
+
+		/**
+		 * @brief	Main task scheduler method that dispatches tasks to other threads.
+		 */
+		void runMain();
+
+		/**
+		 * @brief	Worker method that runs a single task.
+		 */
+		void runTask(const TaskPtr& task);
+
+		/**
+		 * @brief	Blocks the calling thread until the specified task has completed.
+		 */
+		void waitUntilComplete(const Task* task);
+
+		/**
+		 * @brief	Method used for sorting tasks.
+		 */
+		static bool taskCompare(const TaskPtr& lhs, const TaskPtr& rhs);
+
+		Set<TaskPtr, std::function<bool(const TaskPtr&, const TaskPtr&)>>::type mTaskQueue;
+		UINT32 mNumActiveTasks;
+		UINT32 mMaxActiveTasks;
+		UINT32 mNextTaskId;
+		bool mShutdown;
+
+		CM_MUTEX(mReadyMutex);
+		CM_MUTEX(mCompleteMutex);
+		CM_THREAD_SYNCHRONISER(mTaskReadyCond);
+		CM_THREAD_SYNCHRONISER(mTaskCompleteCond);
+	};
+}

+ 126 - 1
CamelotUtility/Include/BsThreadPool.h

@@ -5,25 +5,69 @@
 
 
 namespace BansheeEngine
 namespace BansheeEngine
 {
 {
+	/**
+	 * @brief	Wrapper around a thread that is used within ThreadPool.
+	 */
 	class CM_UTILITY_EXPORT PooledThread
 	class CM_UTILITY_EXPORT PooledThread
 	{
 	{
 	public:
 	public:
 		PooledThread(const String& name);
 		PooledThread(const String& name);
 		virtual ~PooledThread();
 		virtual ~PooledThread();
 
 
+		/**
+		 * @brief	Initializes the pooled thread. Must be called
+		 * 			right after the object is constructed.
+		 */
 		void initialize();
 		void initialize();
+
+		/**
+		 * @brief	Starts executing the given worker method.
+		 *
+		 * @note	Caller must ensure worker method is not null and that the thread
+		 * 			is currently idle, otherwise undefined behavior will occur.
+		 */
 		void start(std::function<void()> workerMethod);
 		void start(std::function<void()> workerMethod);
-		void run();
+
+		/**
+		 * @brief	Attempts to join the currently running thread and destroys it. Caller must ensure
+		 * 			that any worker method currently running properly returns, otherwise this
+		 * 			will block indefinitely.
+		 */
 		void destroy();
 		void destroy();
 
 
+		/**
+		 * @brief	Returns true if the thread is idle and new worker method can be scheduled on it.
+		 */
 		bool isIdle();
 		bool isIdle();
+
+		/**
+		 * @brief	Returns how long has the thread been idle. Value is undefined if thread is not idle.
+		 */
 		time_t idleTime();
 		time_t idleTime();
 
 
+		/**
+		 * @brief	Sets a name of the thread.
+		 */
 		void setName(const String& name);
 		void setName(const String& name);
 
 
+		/**
+		 * @brief	Called when the thread is first created.
+		 */
 		virtual void onThreadStarted(const String& name) = 0;
 		virtual void onThreadStarted(const String& name) = 0;
+
+		/**
+		 * @brief	Called when the thread is being shut down.
+		 */
 		virtual void onThreadEnded(const String& name) = 0;
 		virtual void onThreadEnded(const String& name) = 0;
 
 
+	protected:
+		/**
+		 * @brief	Primary worker method that is ran when the thread is first
+		 * 			initialized.
+		 */
+		void run();
+
+	protected:
 		std::function<void()> mWorkerMethod;
 		std::function<void()> mWorkerMethod;
 
 
 		String mName;
 		String mName;
@@ -39,6 +83,12 @@ namespace BansheeEngine
 		CM_THREAD_SYNCHRONISER(mReadyCond);
 		CM_THREAD_SYNCHRONISER(mReadyCond);
 	};
 	};
 
 
+	/**
+	 * @copydoc	PooledThread
+	 * 			
+	 * @tparam	ThreadPolicy Allows you specify a policy with methods that will get called
+	 * 						 whenever a new thread is created or when a thread is destroyed.
+	 */
 	template<class ThreadPolicy>
 	template<class ThreadPolicy>
 	class TPooledThread : public PooledThread
 	class TPooledThread : public PooledThread
 	{
 	{
@@ -47,38 +97,101 @@ namespace BansheeEngine
 			:PooledThread(name)
 			:PooledThread(name)
 		{ }
 		{ }
 
 
+		/**
+		 * @copydoc	PooledThread::onThreadStarted
+		 */
 		void onThreadStarted(const String& name)
 		void onThreadStarted(const String& name)
 		{
 		{
 			ThreadPolicy::onThreadStarted(name);
 			ThreadPolicy::onThreadStarted(name);
 		}
 		}
 
 
+		/**
+		 * @copydoc	PooledThread::onThreadEnded
+		 */
 		void onThreadEnded(const String& name)
 		void onThreadEnded(const String& name)
 		{
 		{
 			ThreadPolicy::onThreadEnded(name);
 			ThreadPolicy::onThreadEnded(name);
 		}
 		}
 	};
 	};
 
 
+	/**
+	 * @brief	Class that maintains a pool of threads we can easily retrieve and use
+	 * 			for any task. This saves on the cost of creating and destroying threads.
+	 */
 	class CM_UTILITY_EXPORT ThreadPool : public Module<ThreadPool>
 	class CM_UTILITY_EXPORT ThreadPool : public Module<ThreadPool>
 	{
 	{
 	public:
 	public:
+		/**
+		 * @brief	Constructs a new thread pool
+		 *
+		 * @param	threadCapacity	Default thread capacity, the pool will always
+		 * 							try to keep this many threads available.
+		 * @param	maxCapacity   	(optional) Maximum number of threads the pool can create.
+		 * 							If we go over this limit an exception will be thrown.
+		 * @param	idleTimeout   	(optional) How many seconds do threads need to be idle before
+		 * 							we remove them from the pool
+		 */
 		ThreadPool(UINT32 threadCapacity, UINT32 maxCapacity = 16, UINT32 idleTimeout = 60);
 		ThreadPool(UINT32 threadCapacity, UINT32 maxCapacity = 16, UINT32 idleTimeout = 60);
 		virtual ~ThreadPool();
 		virtual ~ThreadPool();
 
 
+		/**
+		 * @brief	Find an unused thread (or creates a new one) and runs the specified worker
+		 * 			method on it.
+		 *
+		 * @param	name			A name you may use for more easily identifying the thread.
+		 * @param	workerMethod	The worker method to be called by the thread.
+		 */
 		void run(const String& name, std::function<void()> workerMethod);
 		void run(const String& name, std::function<void()> workerMethod);
 
 
+		/**
+		 * @brief	Stops all threads and destroys them. Caller must ensure each threads workerMethod
+		 * 			returns otherwise this will never return.
+		 */
 		void stopAll();
 		void stopAll();
 
 
+		/**
+		 * @brief	Clear any unused threads that are over the capacity.
+		 */
 		void clearUnused();
 		void clearUnused();
 
 
+		/**
+		 * @brief	Returns the number of unused threads in the pool.
+		 */
 		UINT32 getNumAvailable() const;
 		UINT32 getNumAvailable() const;
+
+		/**
+		 * @brief	Returns the number of running threads in the pool.
+		 */
 		UINT32 getNumActive() const;
 		UINT32 getNumActive() const;
+
+		/**
+		 * @brief	Returns the total number of created threads in the pool
+		 * 			(both running and unused).
+		 */
 		UINT32 getNumAllocated() const;
 		UINT32 getNumAllocated() const;
 
 
 	protected:
 	protected:
 		Vector<PooledThread*>::type mThreads;
 		Vector<PooledThread*>::type mThreads;
 		
 		
+		/**
+		 * @brief	Creates a new thread to be used by the pool.
+		 */
 		virtual PooledThread* createThread(const String& name) = 0;
 		virtual PooledThread* createThread(const String& name) = 0;
+
+		/**
+		 * @brief	Destroys the specified thread. Caller needs to make sure
+		 * 			the thread is actually shut down beforehand.
+		 */
 		void destroyThread(PooledThread* thread);
 		void destroyThread(PooledThread* thread);
+
+		/**
+		 * @brief	Returns the first unused thread if one exists, otherwise
+		 * 			creates a new one.
+		 *
+		 * @param	name	Name to assign the thread.
+		 *
+		 * @note	Throws an exception if we have reached our maximum thread capacity.
+		 */
 		PooledThread* getThread(const String& name);
 		PooledThread* getThread(const String& name);
 
 
 		UINT32 mDefaultCapacity;
 		UINT32 mDefaultCapacity;
@@ -89,6 +202,9 @@ namespace BansheeEngine
 		CM_MUTEX(mMutex);
 		CM_MUTEX(mMutex);
 	};
 	};
 
 
+	/**
+	 * @brief	Policy used for thread start & end used by the ThreadPool.
+	 */
 	class ThreadNoPolicy
 	class ThreadNoPolicy
 	{
 	{
 	public:
 	public:
@@ -96,6 +212,12 @@ namespace BansheeEngine
 		static void onThreadEnded(const String& name) { }
 		static void onThreadEnded(const String& name) { }
 	};
 	};
 
 
+	/**
+	 * @copydoc ThreadPool
+	 * 			
+	 * @tparam	ThreadPolicy Allows you specify a policy with methods that will get called
+	 * 						 whenever a new thread is created or when a thread is destroyed.
+	 */
 	template<class ThreadPolicy = ThreadNoPolicy>
 	template<class ThreadPolicy = ThreadNoPolicy>
 	class TThreadPool : public ThreadPool
 	class TThreadPool : public ThreadPool
 	{
 	{
@@ -107,6 +229,9 @@ namespace BansheeEngine
 		}
 		}
 
 
 	protected:
 	protected:
+		/**
+		 * @copydoc ThreadPool::createThread
+		 */
 		PooledThread* createThread(const String& name)
 		PooledThread* createThread(const String& name)
 		{
 		{
 			PooledThread* newThread = cm_new<TPooledThread<ThreadPolicy>>(name);
 			PooledThread* newThread = cm_new<TPooledThread<ThreadPolicy>>(name);

+ 2 - 0
CamelotUtility/Include/CmFwdDeclUtil.h

@@ -44,6 +44,7 @@ namespace BansheeEngine
 	class MeshData;
 	class MeshData;
 	class FileSystem;
 	class FileSystem;
 	class Timer;
 	class Timer;
+	class Task;
 	class GpuResourceData;
 	class GpuResourceData;
 	class PixelData;
 	class PixelData;
 	class HString;
 	class HString;
@@ -64,6 +65,7 @@ namespace BansheeEngine
 	typedef std::shared_ptr<GpuResourceData> GpuResourceDataPtr;
 	typedef std::shared_ptr<GpuResourceData> GpuResourceDataPtr;
 	typedef std::shared_ptr<DataStream> DataStreamPtr;
 	typedef std::shared_ptr<DataStream> DataStreamPtr;
 	typedef std::shared_ptr<MemoryDataStream> MemoryDataStreamPtr;
 	typedef std::shared_ptr<MemoryDataStream> MemoryDataStreamPtr;
+	typedef std::shared_ptr<Task> TaskPtr;
 
 
 	typedef List<DataStreamPtr>::type DataStreamList;
 	typedef List<DataStreamPtr>::type DataStreamList;
 	typedef std::shared_ptr<DataStreamList> DataStreamListPtr;
 	typedef std::shared_ptr<DataStreamList> DataStreamListPtr;

+ 0 - 428
CamelotUtility/Include/CmWorkQueue.h

@@ -1,428 +0,0 @@
-#pragma once
-
-#include "CmPrerequisitesUtil.h"
-#include "boost/any.hpp"
-
-namespace BansheeEngine
-{
-	/** \addtogroup Core
-	*  @{
-	*/
-	/** \addtogroup General
-	*  @{
-	*/
-
-	/** Interface to a general purpose request / response style background work queue.
-	@remarks
-		A work queue is a simple structure, where requests for work are placed
-		onto the queue, then removed by a worker for processing, then finally
-		a response is placed on the result queue for the originator to pick up
-		at their leisure. The typical use for this is in a threaded environment, 
-		although any kind of deferred processing could use this approach to 
-		decouple and distribute work over a period of time even 
-		if it was single threaded.
-	@par
-		WorkQueues also incorporate thread pools. One or more background worker threads
-		can wait on the queue and be notified when a request is waiting to be
-		processed. For maximal thread usage, a WorkQueue instance should be shared
-		among many sources of work, rather than many work queues being created.
-		This way, you can share a small number of hardware threads among a large 
-		number of background tasks. This doesn't mean you have to implement all the
-		request processing in one class, you can plug in many handlers in order to
-		process the requests.
-	*/
-	class CM_UTILITY_EXPORT WorkQueue
-	{
-	protected:
-		typedef Map<String, UINT16>::type ChannelMap;
-		ChannelMap mChannelMap;
-		UINT16 mNextChannel;
-		CM_MUTEX(mChannelMapMutex)
-	public:
-		/// Numeric identifier for a request
-		typedef unsigned long long int RequestID;
-
-		/** General purpose request structure. 
-		*/
-		class CM_UTILITY_EXPORT Request
-		{
-			friend class WorkQueue;
-		protected:
-			/// The request channel, as an integer 
-			UINT16 mChannel;
-			/// The details of the request (user defined)
-			boost::any mData;
-			/// Retry count - set this to non-zero to have the request try again on failure
-			UINT8 mRetryCount;
-			/// Identifier (assigned by the system)
-			RequestID mID;
-			/// Abort Flag
-			mutable bool mAborted;
-
-		public:
-			/// Constructor 
-			Request(UINT16 channel, const boost::any& rData, UINT8 retry, RequestID rid);
-			~Request();
-			/// Set the abort flag
-			void abortRequest() const { mAborted = true; }
-			/// Get the request channel (top level categorisation)
-			UINT16 getChannel() const { return mChannel; }
-			/// Get the user details of this request
-			const boost::any& getData() const { return mData; }
-			/// Get the remaining retry count
-			UINT8 getRetryCount() const { return mRetryCount; }
-			/// Get the identifier of this request
-			RequestID getID() const { return mID; }
-			/// Get the abort flag
-			bool getAborted() const { return mAborted; }
-		};
-
-		/** General purpose response structure. 
-		*/
-		struct CM_UTILITY_EXPORT Response
-		{
-			/// Pointer to the request that this response is in relation to
-			Request* mRequest;
-			/// Whether the work item succeeded or not
-			bool mSuccess;
-			/// Data associated with the result of the process
-			boost::any mData;
-
-		public:
-			Response(Request* rq, bool success, const boost::any& data);
-			~Response();
-			/// Get the request that this is a response to (NB destruction destroys this)
-			const Request* getRequest() const { return mRequest; }
-			/// Return whether this is a successful response
-			bool succeeded() const { return mSuccess; }
-			/// Return the response data (user defined, only valid on success)
-			const boost::any& getData() const { return mData; }
-			/// Abort the request
-			void abortRequest() { mRequest->abortRequest(); }
-		};
-
-		/** Interface definition for a handler of requests. 
-		@remarks
-		User classes are expected to implement this interface in order to
-		process requests on the queue. It's important to realise that
-		the calls to this class may be in a separate thread to the main
-		render context, and as such it may not be possible to make
-		rendersystem or other GPU-dependent calls in this handler. You can only
-		do so if the queue was created with 'workersCanAccessRenderSystem'
-		set to true, and OGRE_THREAD_SUPPORT=1, but this puts extra strain
-		on the thread safety of the render system and is not recommended.
-		It is best to perform CPU-side work in these handlers and let the
-		response handler transfer results to the GPU in the main render thread.
-		*/
-		class CM_UTILITY_EXPORT RequestHandler
-		{
-		public:
-			RequestHandler() {}
-			virtual ~RequestHandler() {}
-
-			/** Return whether this handler can process a given request. 
-			@remarks
-			Defaults to true, but if you wish to add several handlers each of
-			which deal with different types of request, you can override
-			this method. 
-			*/
-			virtual bool canHandleRequest(const Request* req, const WorkQueue* srcQ) 
-			{ (void)srcQ; return !req->getAborted(); }
-
-			/** The handler method every subclass must implement. 
-			If a failure is encountered, return a Response with a failure
-			result rather than raise an exception.
-			@param req The Request structure, which is effectively owned by the
-			handler during this call. It must be attached to the returned
-			Response regardless of success or failure.
-			@param srcQ The work queue that this request originated from
-			@return Pointer to a Response object - the caller is responsible
-			for deleting the object.
-			*/
-			virtual Response* handleRequest(Request* req, const WorkQueue* srcQ) = 0;
-		};
-
-		/** Interface definition for a handler of responses. 
-		@remarks
-		User classes are expected to implement this interface in order to
-		process responses from the queue. All calls to this class will be 
-		in the main render thread and thus all GPU resources will be
-		available. 
-		*/
-		class CM_UTILITY_EXPORT ResponseHandler
-		{
-		public:
-			ResponseHandler() {}
-			virtual ~ResponseHandler() {}
-
-			/** Return whether this handler can process a given response. 
-			@remarks
-			Defaults to true, but if you wish to add several handlers each of
-			which deal with different types of response, you can override
-			this method. 
-			*/
-			virtual bool canHandleResponse(const Response* res, const WorkQueue* srcQ) 
-			{ (void)srcQ; return !res->getRequest()->getAborted(); }
-
-			/** The handler method every subclass must implement. 
-			@param res The Response structure. The caller is responsible for
-			deleting this after the call is made, none of the data contained
-			(except pointers to structures in user Any data) will persist
-			after this call is returned.
-			@param srcQ The work queue that this request originated from
-			*/
-			virtual void handleResponse(const Response* res, const WorkQueue* srcQ) = 0;
-		};
-
-	protected:
-		size_t mWorkerThreadCount;
-		bool mIsRunning;
-		unsigned long mResposeTimeLimitMS;
-
-		typedef Deque<Request*>::type RequestQueue;
-		RequestQueue mRequestQueue;
-		RequestQueue mProcessQueue;
-
-		/// Thread function
-		struct WorkerFunc CM_THREAD_WORKER_INHERIT
-		{
-			WorkQueue* mQueue;
-
-			WorkerFunc(WorkQueue* q) 
-				: mQueue(q) {}
-
-			void operator()();
-
-			void run();
-		};
-		WorkerFunc* mWorkerFunc;
-
-		/** Intermediate structure to hold a pointer to a request handler which 
-			provides insurance against the handler itself being disconnected
-			while the list remains unchanged.
-		*/
-		class CM_UTILITY_EXPORT RequestHandlerHolder
-		{
-		protected:
-			CM_RW_MUTEX(mRWMutex);
-			RequestHandler* mHandler;
-		public:
-			RequestHandlerHolder(RequestHandler* handler)
-				: mHandler(handler)	{}
-
-			// Disconnect the handler to allow it to be destroyed
-			void disconnectHandler()
-			{
-				// write lock - must wait for all requests to finish
-				CM_LOCK_RW_MUTEX_WRITE(mRWMutex);
-				mHandler = 0;
-			}
-
-			/** Get handler pointer - note, only use this for == comparison or similar,
-				do not attempt to call it as it is not thread safe. 
-			*/
-			RequestHandler* getHandler() { return mHandler; }
-
-			/** Process a request if possible.
-			@return Valid response if processed, null otherwise
-			*/
-			Response* handleRequest(Request* req, const WorkQueue* srcQ)
-			{
-				// Read mutex so that multiple requests can be processed by the
-				// same handler in parallel if required
-				CM_LOCK_RW_MUTEX_READ(mRWMutex);
-				Response* response = 0;
-				if (mHandler)
-				{
-					if (mHandler->canHandleRequest(req, srcQ))
-					{
-						response = mHandler->handleRequest(req, srcQ);
-					}
-				}
-				return response;
-			}
-
-		};
-		// Hold these by shared pointer so they can be copied keeping same instance
-		typedef std::shared_ptr<RequestHandlerHolder> RequestHandlerHolderPtr;
-
-		typedef List<RequestHandlerHolderPtr>::type RequestHandlerList;
-		typedef List<ResponseHandler*>::type ResponseHandlerList;
-		typedef Map<UINT16, RequestHandlerList>::type RequestHandlerListByChannel;
-		typedef Map<UINT16, ResponseHandlerList>::type ResponseHandlerListByChannel;
-
-		RequestHandlerListByChannel mRequestHandlers;
-		ResponseHandlerListByChannel mResponseHandlers;
-		RequestID mRequestCount;
-		bool mPaused;
-		bool mAcceptRequests;
-		bool mShuttingDown;
-
-		/// Synchroniser token to wait / notify on thread init 
-		CM_THREAD_SYNCHRONISER(mInitSync)
-
-		CM_THREAD_SYNCHRONISER(mRequestCondition)
-
-		/// Init notification mutex (must lock before waiting on initCondition)
-		CM_MUTEX(mRequestMutex)
-		CM_MUTEX(mProcessMutex)
-		CM_RW_MUTEX(mRequestHandlerMutex);
-
-#if CM_THREAD_SUPPORT
-		typedef Vector<CM_THREAD_TYPE*>::type WorkerThreadList;
-		WorkerThreadList mWorkers;
-#endif
-
-	public:
-		WorkQueue();
-		~WorkQueue();		
-
-		/** Start up the queue with the options that have been set.
-		@param forceRestart If the queue is already running, whether to shut it
-			down and restart.
-		*/
-		void startup(bool forceRestart = true);
-
-		/** Shut down the queue.
-		*/
-		void shutdown();
-
-		/** Add a request handler instance to the queue. 
-		@remarks
-			Every queue must have at least one request handler instance for each 
-			channel in which requests are raised. If you 
-			add more than one handler per channel, then you must implement canHandleRequest 
-			differently	in each if you wish them to respond to different requests.
-		@param channel The channel for requests you want to handle
-		@param rh Your handler
-		*/
-		void addRequestHandler(UINT16 channel, RequestHandler* rh);
-		/** Remove a request handler. */
-		void removeRequestHandler(UINT16 channel, RequestHandler* rh);
-
-		/** Add a response handler instance to the queue. 
-		@remarks
-			Every queue must have at least one response handler instance for each 
-			channel in which requests are raised. If you add more than one, then you 
-			must implement canHandleResponse differently in each if you wish them 
-			to respond to different responses.
-		@param channel The channel for responses you want to handle
-		@param rh Your handler
-		*/
-		void addResponseHandler(UINT16 channel, ResponseHandler* rh);
-		/** Remove a Response handler. */
-		void removeResponseHandler(UINT16 channel, ResponseHandler* rh);
-
-		/**
-		 * @brief	Gets the next free request identifier.
-		 *
-		 * @return	The next free request identifier.
-		 */
-		RequestID peekNextFreeRequestId();
-
-		/** Add a new request to the queue.
-		@param channel The channel this request will go into = 0; the channel is the top-level
-			categorisation of the request
-		@param requestType An identifier that's unique within this queue which
-			identifies the type of the request (user decides the actual value)
-		@param rData The data required by the request process. 
-		@param retryCount The number of times the request should be retried
-			if it fails.
-		@param forceSynchronous Forces the request to be processed immediately
-			even if threading is enabled.
-		@returns The ID of the request that has been added
-		*/
-		RequestID addRequest(UINT16 channel, const boost::any& rData, UINT8 retryCount = 0, 
-			bool forceSynchronous = false);
-
-		/// Put a Request on the queue with a specific RequestID.
-		void addRequestWithRID(RequestID rid, UINT16 channel, const boost::any& rData, UINT8 retryCount);
-
-		/** Abort a previously issued request.
-		If the request is still waiting to be processed, it will be 
-		removed from the queue.
-		@param id The ID of the previously issued request.
-		*/
-		void abortRequest(RequestID id);
-
-		/** Abort all previously issued requests in a given channel.
-		Any requests still waiting to be processed of the given channel, will be 
-		removed from the queue.
-		@param channel The type of request to be aborted
-		*/
-		void abortRequestsByChannel(UINT16 channel);
-
-		/** Abort all previously issued requests.
-		Any requests still waiting to be processed will be removed from the queue.
-		Any requests that are being processed will still complete.
-		*/
-		void abortAllRequests();
-		
-		/** Set whether to pause further processing of any requests. 
-		If true, any further requests will simply be queued and not processed until
-		setPaused(false) is called. Any requests which are in the process of being
-		worked on already will still continue. 
-		*/
-		void setPaused(bool pause);
-		/// Return whether the queue is paused ie not sending more work to workers
-		bool isPaused() const;
-
-		/** Set whether to accept new requests or not. 
-		If true, requests are added to the queue as usual. If false, requests
-		are silently ignored until setRequestsAccepted(true) is called. 
-		*/
-		void setRequestsAccepted(bool accept);
-		/// Returns whether requests are being accepted right now
-		bool getRequestsAccepted() const;
-
-		/** Get the number of worker threads that this queue will start when 
-			startup() is called. 
-		*/
-		size_t getWorkerThreadCount() const;
-
-		/** Set the number of worker threads that this queue will start
-			when startup() is called (default 1).
-			Calling this will have no effect unless the queue is shut down and
-			restarted.
-		*/
-		void setWorkerThreadCount(size_t c);
-
-		/** Get a channel ID for a given channel name. 
-		@remarks
-			Channels are assigned on a first-come, first-served basis and are
-			not persistent across application instances. This method allows 
-			applications to not worry about channel clashes through manually
-			assigned channel numbers.
-		*/
-		UINT16 getChannel(const String& channelName);
-
-		/** Returns whether the queue is trying to shut down. */
-		bool isShuttingDown() const { return mShuttingDown; }
-
-	protected:
-		void processRequestResponse(Request* r);
-		Response* processRequest(Request* r);
-		void processResponse(Response* r);
-
-		/** To be called by a separate thread; will return immediately if there
-			are items in the queue, or suspend the thread until new items are added
-			otherwise.
-		*/
-		void waitForNextRequest();
-
-		/** Process the next request on the queue. 
-		@remarks
-			This method is public, but only intended for advanced users to call. 
-			The only reason you would call this, is if you were using your 
-			own thread to drive the worker processing. The thread calling this
-			method will be the thread used to call the RequestHandler.
-		*/
-		void processNextRequest();
-
-		/// Main function for each thread spawned.
-		void threadMain();
-
-		/// Notify workers about a new request. 
-		void notifyWorkers();
-	};
-}

+ 162 - 0
CamelotUtility/Source/BsTaskScheduler.cpp

@@ -0,0 +1,162 @@
+#include "BsTaskScheduler.h"
+#include "BsThreadPool.h"
+
+namespace BansheeEngine
+{
+	Task::Task(const PrivatelyConstruct& dummy, const String& name, std::function<void()> taskWorker, 
+		TaskPriority priority, TaskPtr dependency)
+		:mName(name), mState(0), mPriority(priority), mTaskId(0), 
+		mTaskDependency(dependency), mTaskWorker(taskWorker), mParent(nullptr)
+	{
+
+	}
+
+	TaskPtr Task::create(const String& name, std::function<void()> taskWorker, TaskPriority priority, TaskPtr dependency)
+	{
+		return cm_shared_ptr<Task>(PrivatelyConstruct(), name, taskWorker, priority, dependency);
+	}
+
+	bool Task::isComplete() const
+	{
+		return mState.load() == 2;
+	}
+
+	bool Task::isCanceled() const
+	{
+		return mState.load() == 3;
+	}
+
+	void Task::wait()
+	{
+		mParent->waitUntilComplete(this);
+	}
+
+	void Task::cancel()
+	{
+		mState.store(3);
+	}
+
+	TaskScheduler::TaskScheduler()
+		:mMaxActiveTasks(0), mNumActiveTasks(0), mNextTaskId(0), mShutdown(false),
+		mTaskQueue(&TaskScheduler::taskCompare)
+	{
+		mMaxActiveTasks = CM_THREAD_HARDWARE_CONCURRENCY;
+
+		ThreadPool::instance().run("TaskScheduler", std::bind(&TaskScheduler::runMain, this));
+	}
+
+	TaskScheduler::~TaskScheduler()
+	{
+		CM_LOCK_MUTEX(mReadyMutex);
+
+		mShutdown = true;
+		CM_THREAD_NOTIFY_ONE(mTaskReadyCond);
+	}
+
+	void TaskScheduler::addTask(const TaskPtr& task)
+	{
+		CM_LOCK_MUTEX(mReadyMutex);
+
+		task->mParent = this;
+		task->mTaskId = mNextTaskId++;
+
+		mTaskQueue.insert(task);
+
+		// Wake main scheduler thread
+		CM_THREAD_NOTIFY_ONE(mTaskReadyCond);
+	}
+
+	void TaskScheduler::addWorker()
+	{
+		CM_LOCK_MUTEX(mReadyMutex);
+
+		mMaxActiveTasks++;
+
+		// A spot freed up, queue new tasks on main scheduler thread if they exist
+		CM_THREAD_NOTIFY_ONE(mTaskReadyCond);
+	}
+
+	void TaskScheduler::removeWorker()
+	{
+		CM_LOCK_MUTEX(mReadyMutex);
+
+		if(mMaxActiveTasks > 0)
+			mMaxActiveTasks--;
+	}
+
+	void TaskScheduler::runMain()
+	{
+		while(true)
+		{
+			CM_LOCK_MUTEX_NAMED(mReadyMutex, lock);
+
+			while((mTaskQueue.size() == 0 || mNumActiveTasks == mMaxActiveTasks) && !mShutdown)
+				CM_THREAD_WAIT(mTaskReadyCond, mReadyMutex, lock);
+
+			if(mShutdown)
+				break;
+
+			for(UINT32 i = 0; i < mTaskQueue.size(), mNumActiveTasks < mMaxActiveTasks; i++)
+			{
+				TaskPtr curTask = *mTaskQueue.begin();
+				mTaskQueue.erase(mTaskQueue.begin());
+
+				if(curTask->isCanceled())
+					continue;
+
+				if(curTask->mTaskDependency != nullptr && !curTask->mTaskDependency->isComplete())
+					continue;
+
+				curTask->mState.store(1);
+				mNumActiveTasks++;
+
+				ThreadPool::instance().run(curTask->mName, std::bind(&TaskScheduler::runTask, this, std::cref(curTask)));
+			}
+		}
+	}
+
+	void TaskScheduler::runTask(const TaskPtr& task)
+	{
+		task->mTaskWorker();
+
+		{
+			CM_LOCK_MUTEX(mCompleteMutex);
+			task->mState.store(2);
+
+			CM_THREAD_NOTIFY_ALL(mTaskCompleteCond);
+		}
+
+		// Possibly this task was someones dependency, so wake the main scheduler thread
+		CM_THREAD_NOTIFY_ONE(mTaskReadyCond);
+	}
+
+	void TaskScheduler::waitUntilComplete(const Task* task)
+	{
+		if(task->isCanceled())
+			return;
+
+		{
+			CM_LOCK_MUTEX_NAMED(mCompleteMutex, lock);
+			
+			while(!task->isComplete())
+			{
+				addWorker();
+				CM_THREAD_WAIT(mTaskCompleteCond, mCompleteMutex, lock);
+				removeWorker();
+			}
+		}
+	}
+
+	bool TaskScheduler::taskCompare(const TaskPtr& lhs, const TaskPtr& rhs)
+	{
+		// If one tasks priority is higher, that one goes first
+		if(lhs->mPriority > rhs->mPriority)
+			return true;
+
+		// Otherwise we go by smaller id, as that task was queued earlier than the other
+		if(lhs->mTaskId < rhs->mTaskId)
+			return true;
+
+		CM_EXCEPT(InternalErrorException, "Found two identical tasks.");
+	}
+}

+ 0 - 520
CamelotUtility/Source/CmWorkQueue.cpp

@@ -1,520 +0,0 @@
-#include "CmWorkQueue.h"
-#include "CmDebug.h"
-
-namespace BansheeEngine {
-	WorkQueue::WorkQueue() 
-		: mNextChannel(0) 		
-		, mWorkerThreadCount(1)
-		, mIsRunning(false)
-		, mWorkerFunc(0)
-		, mRequestCount(0)
-		, mPaused(false)
-		, mAcceptRequests(true)
-	{}
-	//---------------------------------------------------------------------
-	WorkQueue::~WorkQueue() 
-	{
-		shutdown();
-
-		for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
-		{
-			cm_delete<ScratchAlloc>(*i);
-		}
-		mRequestQueue.clear();
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::startup(bool forceRestart)
-	{
-		if (mIsRunning)
-		{
-			if (forceRestart)
-				shutdown();
-			else
-				return;
-		}
-
-		mShuttingDown = false;
-
-		mWorkerFunc = cm_new<WorkerFunc>(this);
-
-#if CM_THREAD_SUPPORT
-		for (UINT8 i = 0; i < mWorkerThreadCount; ++i)
-		{
-			CM_THREAD_CREATE(t, *mWorkerFunc);
-			mWorkers.push_back(t);
-		}
-#endif
-
-		mIsRunning = true;
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::shutdown()
-	{
-		if( !mIsRunning )
-			return;
-
-		mShuttingDown = true;
-		abortAllRequests();
-#if CM_THREAD_SUPPORT
-		// wake all threads (they should check shutting down as first thing after wait)
-		CM_THREAD_NOTIFY_ALL(mRequestCondition)
-
-			// all our threads should have been woken now, so join
-			for (WorkerThreadList::iterator i = mWorkers.begin(); i != mWorkers.end(); ++i)
-			{
-				(*i)->join();
-				CM_THREAD_DESTROY(*i);
-			}
-			mWorkers.clear();
-#endif
-
-			if (mWorkerFunc != nullptr)
-			{
-				cm_delete(mWorkerFunc);
-				mWorkerFunc = nullptr;
-			}
-
-			mIsRunning = false;
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::addRequestHandler(UINT16 channel, RequestHandler* rh)
-	{
-		CM_LOCK_RW_MUTEX_WRITE(mRequestHandlerMutex);
-
-		RequestHandlerListByChannel::iterator i = mRequestHandlers.find(channel);
-		if (i == mRequestHandlers.end())
-			i = mRequestHandlers.insert(RequestHandlerListByChannel::value_type(channel, RequestHandlerList())).first;
-
-		RequestHandlerList& handlers = i->second;
-		bool duplicate = false;
-		for (RequestHandlerList::iterator j = handlers.begin(); j != handlers.end(); ++j)
-		{
-			if ((*j)->getHandler() == rh)
-			{
-				duplicate = true;
-				break;
-			}
-		}
-		if (!duplicate)
-			handlers.push_back(cm_shared_ptr<RequestHandlerHolder>(rh));
-
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::removeRequestHandler(UINT16 channel, RequestHandler* rh)
-	{
-		CM_LOCK_RW_MUTEX_WRITE(mRequestHandlerMutex);
-
-		RequestHandlerListByChannel::iterator i = mRequestHandlers.find(channel);
-		if (i != mRequestHandlers.end())
-		{
-			RequestHandlerList& handlers = i->second;
-			for (RequestHandlerList::iterator j = handlers.begin(); j != handlers.end(); ++j)
-			{
-				if ((*j)->getHandler() == rh)
-				{
-					// Disconnect - this will make it safe across copies of the list
-					// this is threadsafe and will wait for existing processes to finish
-					(*j)->disconnectHandler();
-					handlers.erase(j);	
-					break;
-				}
-			}
-
-		}
-
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::addResponseHandler(UINT16 channel, ResponseHandler* rh)
-	{
-		ResponseHandlerListByChannel::iterator i = mResponseHandlers.find(channel);
-		if (i == mResponseHandlers.end())
-			i = mResponseHandlers.insert(ResponseHandlerListByChannel::value_type(channel, ResponseHandlerList())).first;
-
-		ResponseHandlerList& handlers = i->second;
-		if (std::find(handlers.begin(), handlers.end(), rh) == handlers.end())
-			handlers.push_back(rh);
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::removeResponseHandler(UINT16 channel, ResponseHandler* rh)
-	{
-		ResponseHandlerListByChannel::iterator i = mResponseHandlers.find(channel);
-		if (i != mResponseHandlers.end())
-		{
-			ResponseHandlerList& handlers = i->second;
-			ResponseHandlerList::iterator j = std::find(
-				handlers.begin(), handlers.end(), rh);
-			if (j != handlers.end())
-				handlers.erase(j);
-
-		}
-	}
-	//---------------------------------------------------------------------
-	WorkQueue::RequestID WorkQueue::peekNextFreeRequestId()
-	{
-		{
-			// lock to acquire rid and push request to the queue
-			CM_LOCK_MUTEX(mRequestMutex)
-
-			RequestID rid = mRequestCount + 1;
-			return rid;
-		}
-	}
-	//---------------------------------------------------------------------
-	WorkQueue::RequestID WorkQueue::addRequest(UINT16 channel, 
-		const boost::any& rData, UINT8 retryCount, bool forceSynchronous)
-	{
-		Request* req = 0;
-		RequestID rid = 0;
-
-		{
-			// lock to acquire rid and push request to the queue
-			CM_LOCK_MUTEX(mRequestMutex)
-
-				if (!mAcceptRequests || mShuttingDown)
-					return 0;
-
-			rid = ++mRequestCount;
-			req = cm_new<Request, ScratchAlloc>(channel, rData, retryCount, rid);
-
-#if CM_THREAD_SUPPORT
-			if (!forceSynchronous)
-			{
-				mRequestQueue.push_back(req);
-				notifyWorkers();
-				return rid;
-			}
-#endif
-		}
-
-		processRequestResponse(req);
-
-		return rid;
-
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::addRequestWithRID(WorkQueue::RequestID rid, UINT16 channel, 
-		const boost::any& rData, UINT8 retryCount)
-	{
-		// lock to push request to the queue
-		CM_LOCK_MUTEX(mRequestMutex)
-
-		if (mShuttingDown)
-			return;
-
-		Request* req = cm_new<Request, ScratchAlloc>(channel, rData, retryCount, rid);
-
-#if CM_THREAD_SUPPORT
-		mRequestQueue.push_back(req);
-		notifyWorkers();
-#else
-		processRequestResponse(req);
-#endif
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::abortRequest(RequestID id)
-	{
-		CM_LOCK_MUTEX(mProcessMutex)
-
-			// NOTE: Pending requests are exist any of RequestQueue, ProcessQueue and
-			// ResponseQueue when keeping ProcessMutex, so we check all of these queues.
-
-			for (RequestQueue::iterator i = mProcessQueue.begin(); i != mProcessQueue.end(); ++i)
-			{
-				if ((*i)->getID() == id)
-				{
-					(*i)->abortRequest();
-					break;
-				}
-			}
-
-			{
-				CM_LOCK_MUTEX(mRequestMutex)
-
-					for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
-					{
-						if ((*i)->getID() == id)
-						{
-							(*i)->abortRequest();
-							break;
-						}
-					}
-			}
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::abortRequestsByChannel(UINT16 channel)
-	{
-		CM_LOCK_MUTEX(mProcessMutex)
-
-			for (RequestQueue::iterator i = mProcessQueue.begin(); i != mProcessQueue.end(); ++i)
-			{
-				if ((*i)->getChannel() == channel)
-				{
-					(*i)->abortRequest();
-				}
-			}
-
-			{
-				CM_LOCK_MUTEX(mRequestMutex)
-
-					for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
-					{
-						if ((*i)->getChannel() == channel)
-						{
-							(*i)->abortRequest();
-						}
-					}
-			}
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::abortAllRequests()
-	{
-		CM_LOCK_MUTEX(mProcessMutex)
-
-			for (RequestQueue::iterator i = mProcessQueue.begin(); i != mProcessQueue.end(); ++i)
-			{
-				(*i)->abortRequest();
-			}
-
-			{
-				CM_LOCK_MUTEX(mRequestMutex)
-
-					for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
-					{
-						(*i)->abortRequest();
-					}
-			}
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::setPaused(bool pause)
-	{
-		CM_LOCK_MUTEX(mRequestMutex)
-
-		mPaused = pause;
-	}
-	//---------------------------------------------------------------------
-	bool WorkQueue::isPaused() const
-	{
-		return mPaused;
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::setRequestsAccepted(bool accept)
-	{
-		CM_LOCK_MUTEX(mRequestMutex)
-
-		mAcceptRequests = accept;
-	}
-	//---------------------------------------------------------------------
-	bool WorkQueue::getRequestsAccepted() const
-	{
-		return mAcceptRequests;
-	}
-	//---------------------------------------------------------------------
-	size_t WorkQueue::getWorkerThreadCount() const
-	{
-		return mWorkerThreadCount;
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::setWorkerThreadCount(size_t c)
-	{
-		mWorkerThreadCount = c;
-	}
-	//---------------------------------------------------------------------
-	UINT16 WorkQueue::getChannel(const String& channelName)
-	{
-		CM_LOCK_MUTEX(mChannelMapMutex)
-
-		ChannelMap::iterator i = mChannelMap.find(channelName);
-		if (i == mChannelMap.end())
-		{
-			i = mChannelMap.insert(ChannelMap::value_type(channelName, mNextChannel++)).first;
-		}
-		return i->second;
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::processRequestResponse(Request* r)
-	{
-		Response* response = processRequest(r);
-
-		CM_LOCK_MUTEX(mProcessMutex)
-
-		RequestQueue::iterator it;
-		for( it = mProcessQueue.begin(); it != mProcessQueue.end(); ++it )
-		{
-			if( (*it) == r )
-			{
-				mProcessQueue.erase( it );
-				break;
-			}
-		}
-
-		if (response)
-		{
-			if (!response->succeeded())
-			{
-				// Failed, should we retry?
-				const Request* req = response->getRequest();
-				if (req->getRetryCount())
-				{
-					addRequestWithRID(req->getID(), req->getChannel(), req->getData(), 
-						req->getRetryCount() - 1);
-					// discard response (this also deletes request)
-					cm_delete<ScratchAlloc>(response);
-					return;
-				}
-			}
-
-			processResponse(response);
-			cm_delete<ScratchAlloc>(response);
-		}
-		else
-		{
-			// no response, delete request
-			gDebug().logWarning("warning: no handler processed request " + toString((int)r->getID()) + ", channel " + toString(r->getChannel()));
-
-			cm_delete<ScratchAlloc>(r);
-		}
-
-	}
-	WorkQueue::Response* WorkQueue::processRequest(Request* r)
-	{
-		RequestHandlerListByChannel handlerListCopy;
-		{
-			// lock the list only to make a copy of it, to maximise parallelism
-			CM_LOCK_RW_MUTEX_READ(mRequestHandlerMutex);
-
-			handlerListCopy = mRequestHandlers;
-
-		}
-
-		Response* response = 0;
-
-		RequestHandlerListByChannel::iterator i = handlerListCopy.find(r->getChannel());
-		if (i != handlerListCopy.end())
-		{
-			RequestHandlerList& handlers = i->second;
-			for (RequestHandlerList::reverse_iterator j = handlers.rbegin(); j != handlers.rend(); ++j)
-			{
-				// threadsafe call which tests canHandleRequest and calls it if so 
-				response = (*j)->handleRequest(r, this);
-
-				if (response)
-					break;
-			}
-		}
-
-		return response;
-
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::processResponse(Response* r)
-	{
-		ResponseHandlerListByChannel::iterator i = mResponseHandlers.find(r->getRequest()->getChannel());
-		if (i != mResponseHandlers.end())
-		{
-			ResponseHandlerList& handlers = i->second;
-			for (ResponseHandlerList::reverse_iterator j = handlers.rbegin(); j != handlers.rend(); ++j)
-			{
-				if ((*j)->canHandleResponse(r, this))
-				{
-					(*j)->handleResponse(r, this);
-				}
-			}
-		}
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::processNextRequest()
-	{
-		Request* request = 0;
-		{
-			// scoped to only lock while retrieving the next request
-			CM_LOCK_MUTEX(mProcessMutex)
-			{
-				CM_LOCK_MUTEX(mRequestMutex)
-
-					if (!mRequestQueue.empty())
-					{
-						request = mRequestQueue.front();
-						mRequestQueue.pop_front();
-						mProcessQueue.push_back( request );
-					}
-			}
-		}
-
-		if (request)
-		{
-			processRequestResponse(request);
-		}
-
-
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::waitForNextRequest()
-	{
-#if CM_THREAD_SUPPORT
-		// Lock; note that OGRE_THREAD_WAIT will free the lock
-		CM_LOCK_MUTEX_NAMED(mRequestMutex, queueLock);
-		if (mRequestQueue.empty())
-		{
-			// frees lock and suspends the thread
-			CM_THREAD_WAIT(mRequestCondition, mRequestMutex, queueLock);
-		}
-		// When we get back here, it's because we've been notified 
-		// and thus the thread has been woken up. Lock has also been
-		// re-acquired, but we won't use it. It's safe to try processing and fail
-		// if another thread has got in first and grabbed the request
-#endif
-
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::threadMain()
-	{
-		// default worker thread
-#if CM_THREAD_SUPPORT
-		// Spin forever until we're told to shut down
-		while (!isShuttingDown())
-		{
-			waitForNextRequest();
-			processNextRequest();
-		}
-#endif
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::notifyWorkers()
-	{
-		// wake up waiting thread
-		CM_THREAD_NOTIFY_ONE(mRequestCondition)
-	}
-	//---------------------------------------------------------------------
-	WorkQueue::Request::Request(UINT16 channel, const boost::any& rData, UINT8 retry, RequestID rid)
-		: mChannel(channel), mData(rData), mRetryCount(retry), mID(rid), mAborted(false)
-	{
-
-	}
-	//---------------------------------------------------------------------
-	WorkQueue::Request::~Request()
-	{
-
-	}
-	//---------------------------------------------------------------------
-	//---------------------------------------------------------------------
-	WorkQueue::Response::Response(Request* rq, bool success, const boost::any& data)
-		: mRequest(rq), mSuccess(success), mData(data)
-	{
-
-	}
-	//---------------------------------------------------------------------
-	WorkQueue::Response::~Response()
-	{
-		cm_delete<ScratchAlloc>(mRequest);
-	}
-	//---------------------------------------------------------------------
-	void WorkQueue::WorkerFunc::operator()()
-	{
-		mQueue->threadMain();
-	}
-
-	void WorkQueue::WorkerFunc::run()
-	{
-		mQueue->threadMain();
-	}
-}