Explorar o código

Removed boost dependencies from linker, they're loaded automatically

Marko Pintera %!s(int64=13) %!d(string=hai) anos
pai
achega
dd60bfafb3

+ 2 - 2
CamelotClient/CamelotClient.vcxproj

@@ -60,7 +60,7 @@
       <SubSystem>Console</SubSystem>
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;%(AdditionalDependencies)</AdditionalDependencies>
-      <AdditionalLibraryDirectories>..\lib\$(Configuration);%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalLibraryDirectories>..\lib\$(Configuration);..\Dependencies\lib\Debug;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -79,7 +79,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalLibraryDirectories>..\lib\$(Configuration)</AdditionalLibraryDirectories>
+      <AdditionalLibraryDirectories>..\lib\$(Configuration);..\Dependencies\lib\Release</AdditionalLibraryDirectories>
       <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;%(AdditionalDependencies)</AdditionalDependencies>
     </Link>
   </ItemDefinitionGroup>

+ 2 - 2
CamelotFBXImporter/CamelotFBXImporter.vcxproj

@@ -54,7 +54,7 @@
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Debug;./Dependencies/lib/Debug</AdditionalLibraryDirectories>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;fbxsdk-2013.3-mdd.lib;libboost_signals-vc110-mt-gd-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;fbxsdk-2013.3-mdd.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
   </ItemDefinitionGroup>
@@ -72,7 +72,7 @@
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Release;./Dependencies/lib/Release</AdditionalLibraryDirectories>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;fbxsdk-2013.3-md.lib;libboost_signals-vc110-mt-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;fbxsdk-2013.3-md.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
   </ItemDefinitionGroup>

+ 2 - 2
CamelotForwardRenderer/CamelotForwardRenderer.vcxproj

@@ -54,7 +54,7 @@
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Debug</AdditionalLibraryDirectories>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;libboost_signals-vc110-mt-gd-1_49.lib</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib</AdditionalDependencies>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
   </ItemDefinitionGroup>
@@ -72,7 +72,7 @@
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Release</AdditionalLibraryDirectories>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;libboost_signals-vc110-mt-1_49.lib</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib</AdditionalDependencies>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
   </ItemDefinitionGroup>

+ 2 - 2
CamelotFreeImgImporter/CamelotFreeImgImporter.vcxproj

@@ -61,7 +61,7 @@
     </ClCompile>
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;FreeImaged.lib;libboost_signals-vc110-mt-gd-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;FreeImaged.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>../lib/$(Configuration);./Dependencies/lib/Debug;../Dependencies/lib/Debug</AdditionalLibraryDirectories>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
@@ -79,7 +79,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;FreeImage.lib;libboost_signals-vc110-mt-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;FreeImage.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>../lib/$(Configuration);./Dependencies/lib/Release;../Dependencies/lib/Release</AdditionalLibraryDirectories>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>

+ 2 - 2
CamelotGLRenderer/CamelotGLRenderer.vcxproj

@@ -56,7 +56,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <AdditionalDependencies>glu32.lib;opengl32.lib;CamelotRenderer.lib;CamelotUtility.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <ImportLibrary>..lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
-      <AdditionalLibraryDirectories>..\lib\$(Configuration);..\Dependencies\Debug;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalLibraryDirectories>..\lib\$(Configuration);..\Dependencies\lib\Debug;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
@@ -74,7 +74,7 @@
       <OptimizeReferences>true</OptimizeReferences>
       <AdditionalDependencies>glu32.lib;opengl32.lib;CamelotRenderer.lib;CamelotUtility.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <ImportLibrary>..lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
-      <AdditionalLibraryDirectories>..\lib\$(Configuration);..\Dependencies\Debug</AdditionalLibraryDirectories>
+      <AdditionalLibraryDirectories>..\lib\$(Configuration);..\Dependencies\lib\Release</AdditionalLibraryDirectories>
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup>

+ 2 - 2
CamelotOISInput/CamelotOISInput.vcxproj

@@ -54,7 +54,7 @@
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Debug;./Dependencies/lib/Debug</AdditionalLibraryDirectories>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;OIS.lib;libboost_signals-vc110-mt-gd-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;OIS.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
   </ItemDefinitionGroup>
@@ -72,7 +72,7 @@
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Release;./Dependencies/lib/Release</AdditionalLibraryDirectories>
-      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;OIS.lib;libboost_signals-vc110-mt-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotRenderer.lib;CamelotUtility.lib;OIS.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
   </ItemDefinitionGroup>

+ 2 - 2
CamelotRenderer/CamelotRenderer.vcxproj

@@ -63,7 +63,7 @@
     <Link>
       <SubSystem>NotSet</SubSystem>
       <GenerateDebugInformation>true</GenerateDebugInformation>
-      <AdditionalDependencies>CamelotUtility.lib;libboost_signals-vc110-mt-gd-1_49.lib;cg.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotUtility.lib;cg.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Debug</AdditionalLibraryDirectories>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>
@@ -84,7 +84,7 @@
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
-      <AdditionalDependencies>CamelotUtility.lib;libboost_signals-vc110-mt-1_49.lib;cg.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>CamelotUtility.lib;cg.lib;%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>../lib/$(Configuration);../Dependencies/lib/Release</AdditionalLibraryDirectories>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
     </Link>

+ 0 - 1
CamelotRenderer/Source/CmGpuProgramManager.cpp

@@ -48,7 +48,6 @@ namespace CamelotEngine {
     {
 		GpuProgramPtr prg;
 		{
-			CM_LOCK_AUTO_MUTEX
 			prg = createProgram( code, gptype, syntaxCode);
 		}
         prg->load();

+ 0 - 1
CamelotRenderer/Source/CmHardwareBufferManager.cpp

@@ -119,7 +119,6 @@ namespace CamelotEngine {
 		// which locks the same mutexes (via other methods) but in reverse order
 		CM_LOCK_MUTEX(mVertexBuffersMutex)
 		{
-			CM_LOCK_MUTEX(mTempBuffersMutex)
 			HardwareVertexBufferPtr vbuf;
 
 			// Locate existing buffer copy in temporary vertex buffers

+ 0 - 4
CamelotRenderer/Source/CmHighLevelGpuProgram.cpp

@@ -73,10 +73,6 @@ namespace CamelotEngine
     //---------------------------------------------------------------------------
     GpuProgramParametersSharedPtr HighLevelGpuProgram::createParameters(void)
     {
-		// Lock mutex before allowing this since this is a top-level method
-		// called outside of the load()
-		CM_LOCK_AUTO_MUTEX
-
         // Make sure param defs are loaded
         GpuProgramParametersSharedPtr params = GpuProgramParametersSharedPtr(new GpuProgramParameters());
 		// Only populate named parameters if we can support this program

+ 1 - 1
CamelotRenderer/Source/CmInput.cpp

@@ -132,7 +132,7 @@ namespace CamelotEngine
 
 	void Input::updateSmoothInput()
 	{
-		float currentTime = gTime().getTimeSinceApplicationStart();
+		float currentTime = gTime().getTime();
 
 		mHorizontalHistoryBuffer[mCurrentBufferIdx] = (float)mMouseLastRel.x;
 		mVerticalHistoryBuffer[mCurrentBufferIdx] = (float)mMouseLastRel.y;

+ 4 - 2
CamelotUtility/CamelotUtility.vcxproj

@@ -54,7 +54,7 @@
     <Link>
       <GenerateDebugInformation>true</GenerateDebugInformation>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
-      <AdditionalDependencies>libboost_signals-vc110-mt-gd-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>../Dependencies/lib/Debug</AdditionalLibraryDirectories>
       <SubSystem>NotSet</SubSystem>
       <NoEntryPoint>false</NoEntryPoint>
@@ -76,13 +76,14 @@
       <EnableCOMDATFolding>true</EnableCOMDATFolding>
       <OptimizeReferences>true</OptimizeReferences>
       <ImportLibrary>..\lib\$(Configuration)\$(TargetName).lib</ImportLibrary>
-      <AdditionalDependencies>libboost_signals-vc110-mt-1_49.lib;%(AdditionalDependencies)</AdditionalDependencies>
+      <AdditionalDependencies>%(AdditionalDependencies)</AdditionalDependencies>
       <AdditionalLibraryDirectories>../Dependencies/lib/Release</AdditionalLibraryDirectories>
       <NoEntryPoint>false</NoEntryPoint>
     </Link>
   </ItemDefinitionGroup>
   <ItemGroup>
     <ClCompile Include="Source\CmUUID.cpp" />
+    <ClCompile Include="Source\CmWorkQueue.cpp" />
     <ClCompile Include="Source\Win32\CmTimer.cpp" />
     <ClInclude Include="Include\CmBinarySerializer.h" />
     <ClInclude Include="Include\CmBitwise.h" />
@@ -142,6 +143,7 @@
     <ClCompile Include="Source\CmTextureData.cpp">
       <FileType>CppHeader</FileType>
     </ClCompile>
+    <ClInclude Include="Include\CmWorkQueue.h" />
   </ItemGroup>
   <ItemGroup>
     <ClCompile Include="Include\CmAxisAlignedBox.cpp" />

+ 6 - 0
CamelotUtility/CamelotUtility.vcxproj.filters

@@ -189,6 +189,9 @@
     <ClInclude Include="Include\CmTime.h">
       <Filter>Header Files</Filter>
     </ClInclude>
+    <ClInclude Include="Include\CmWorkQueue.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
   </ItemGroup>
   <ItemGroup>
     <ClCompile Include="Include\CmAxisAlignedBox.cpp">
@@ -275,5 +278,8 @@
     <ClCompile Include="Source\CmUUID.cpp">
       <Filter>Source Files</Filter>
     </ClCompile>
+    <ClCompile Include="Source\CmWorkQueue.cpp">
+      <Filter>Source Files</Filter>
+    </ClCompile>
   </ItemGroup>
 </Project>

+ 5 - 0
CamelotUtility/Include/CmPrerequisitesUtil.h

@@ -27,6 +27,11 @@ THE SOFTWARE
 
 #include <assert.h>
 
+// 0 - No thread support
+// 1 - Render system is thread safe (NOT WORKING and will probably be removed)
+// 2 - Thread support but render system can only be accessed from main thread
+#define CM_THREAD_SUPPORT 2
+
 // Platform-specific stuff
 #include "CmPlatform.h"
 

+ 0 - 7
CamelotUtility/Include/CmStdHeaders.h

@@ -112,13 +112,6 @@ extern "C" {
 }
 #endif
 
-#if CM_THREAD_SUPPORT
-#	if !defined(NOMINMAX) && defined(_MSC_VER)
-#		define NOMINMAX // required to stop windows.h messing up std::min
-#	endif
-#	include "CmThreadHeaders.h"
-#endif
-
 #if defined ( CM_GCC_VISIBILITY ) && (CM_PLATFORM != CM_PLATFORM_APPLE)
 #   pragma GCC visibility pop
 #endif

+ 5 - 0
CamelotUtility/Include/CmString.h

@@ -261,6 +261,11 @@ namespace CamelotEngine {
         unsigned short width = 0, char fill = ' ', 
         std::ios::fmtflags flags = std::ios::fmtflags(0) );
 
+	/** Converts a long long to a String. */
+	CM_UTILITY_EXPORT String toString(unsigned long long int val, 
+		unsigned short width = 0, char fill = ' ', 
+		std::ios::fmtflags flags = std::ios::fmtflags(0) );
+
 	/** Global conversion methods **/
 
     /** Converts a boolean to a String. 

+ 9 - 1
CamelotUtility/Include/CmTime.h

@@ -19,7 +19,14 @@ namespace CamelotEngine
 		 *
 		 * @return	The time since application start, in seconds.
 		 */
-		float getTimeSinceApplicationStart() { return mTimeSinceStart; }
+		float getTime() { return mTimeSinceStart; }
+
+		/**
+		 * @brief	Gets the time elapsed since application start.
+		 *
+		 * @return	The time since application start, in miliseconds.
+		 */
+		UINT64 getTimeMs() { return mTimeSinceStartMs; }
 
 		/**
 		 * @brief	Gets the time since last frame was executed.
@@ -48,6 +55,7 @@ namespace CamelotEngine
 	private:
 		float mFrameDelta; // Frame delta in seconds
 		float mTimeSinceStart; // Time since start in seconds
+		UINT64 mTimeSinceStartMs;
 
 		unsigned long mAppStartTime; // Time the application started, in microseconds
 		unsigned long mLastFrameTime; // Time since last runOneFrame call, In microseconds

+ 435 - 0
CamelotUtility/Include/CmWorkQueue.h

@@ -0,0 +1,435 @@
+#pragma once
+
+#include "CmPrerequisitesUtil.h"
+#include "boost/any.hpp"
+
+namespace CamelotEngine
+{
+	/** \addtogroup Core
+	*  @{
+	*/
+	/** \addtogroup General
+	*  @{
+	*/
+
+	/** Interface to a general purpose request / response style background work queue.
+	@remarks
+		A work queue is a simple structure, where requests for work are placed
+		onto the queue, then removed by a worker for processing, then finally
+		a response is placed on the result queue for the originator to pick up
+		at their leisure. The typical use for this is in a threaded environment, 
+		although any kind of deferred processing could use this approach to 
+		decouple and distribute work over a period of time even 
+		if it was single threaded.
+	@par
+		WorkQueues also incorporate thread pools. One or more background worker threads
+		can wait on the queue and be notified when a request is waiting to be
+		processed. For maximal thread usage, a WorkQueue instance should be shared
+		among many sources of work, rather than many work queues being created.
+		This way, you can share a small number of hardware threads among a large 
+		number of background tasks. This doesn't mean you have to implement all the
+		request processing in one class, you can plug in many handlers in order to
+		process the requests.
+	*/
+	class CM_UTILITY_EXPORT WorkQueue
+	{
+	protected:
+		typedef std::map<String, UINT16> ChannelMap;
+		ChannelMap mChannelMap;
+		UINT16 mNextChannel;
+		CM_MUTEX(mChannelMapMutex)
+	public:
+		/// Numeric identifier for a request
+		typedef unsigned long long int RequestID;
+
+		/** General purpose request structure. 
+		*/
+		class CM_UTILITY_EXPORT Request
+		{
+			friend class WorkQueue;
+		protected:
+			/// The request channel, as an integer 
+			UINT16 mChannel;
+			/// The details of the request (user defined)
+			boost::any mData;
+			/// Retry count - set this to non-zero to have the request try again on failure
+			UINT8 mRetryCount;
+			/// Identifier (assigned by the system)
+			RequestID mID;
+			/// Abort Flag
+			mutable bool mAborted;
+
+		public:
+			/// Constructor 
+			Request(UINT16 channel, const boost::any& rData, UINT8 retry, RequestID rid);
+			~Request();
+			/// Set the abort flag
+			void abortRequest() const { mAborted = true; }
+			/// Get the request channel (top level categorisation)
+			UINT16 getChannel() const { return mChannel; }
+			/// Get the user details of this request
+			const boost::any& getData() const { return mData; }
+			/// Get the remaining retry count
+			UINT8 getRetryCount() const { return mRetryCount; }
+			/// Get the identifier of this request
+			RequestID getID() const { return mID; }
+			/// Get the abort flag
+			bool getAborted() const { return mAborted; }
+		};
+
+		/** General purpose response structure. 
+		*/
+		struct CM_UTILITY_EXPORT Response
+		{
+			/// Pointer to the request that this response is in relation to
+			const Request* mRequest;
+			/// Whether the work item succeeded or not
+			bool mSuccess;
+			/// Data associated with the result of the process
+			boost::any mData;
+
+		public:
+			Response(const Request* rq, bool success, const boost::any& data);
+			~Response();
+			/// Get the request that this is a response to (NB destruction destroys this)
+			const Request* getRequest() const { return mRequest; }
+			/// Return whether this is a successful response
+			bool succeeded() const { return mSuccess; }
+			/// Return the response data (user defined, only valid on success)
+			const boost::any& getData() const { return mData; }
+			/// Abort the request
+			void abortRequest() { mRequest->abortRequest(); }
+		};
+
+		/** Interface definition for a handler of requests. 
+		@remarks
+		User classes are expected to implement this interface in order to
+		process requests on the queue. It's important to realise that
+		the calls to this class may be in a separate thread to the main
+		render context, and as such it may not be possible to make
+		rendersystem or other GPU-dependent calls in this handler. You can only
+		do so if the queue was created with 'workersCanAccessRenderSystem'
+		set to true, and OGRE_THREAD_SUPPORT=1, but this puts extra strain
+		on the thread safety of the render system and is not recommended.
+		It is best to perform CPU-side work in these handlers and let the
+		response handler transfer results to the GPU in the main render thread.
+		*/
+		class CM_UTILITY_EXPORT RequestHandler
+		{
+		public:
+			RequestHandler() {}
+			virtual ~RequestHandler() {}
+
+			/** Return whether this handler can process a given request. 
+			@remarks
+			Defaults to true, but if you wish to add several handlers each of
+			which deal with different types of request, you can override
+			this method. 
+			*/
+			virtual bool canHandleRequest(const Request* req, const WorkQueue* srcQ) 
+			{ (void)srcQ; return !req->getAborted(); }
+
+			/** The handler method every subclass must implement. 
+			If a failure is encountered, return a Response with a failure
+			result rather than raise an exception.
+			@param req The Request structure, which is effectively owned by the
+			handler during this call. It must be attached to the returned
+			Response regardless of success or failure.
+			@param srcQ The work queue that this request originated from
+			@return Pointer to a Response object - the caller is responsible
+			for deleting the object.
+			*/
+			virtual Response* handleRequest(const Request* req, const WorkQueue* srcQ) = 0;
+		};
+
+		/** Interface definition for a handler of responses. 
+		@remarks
+		User classes are expected to implement this interface in order to
+		process responses from the queue. All calls to this class will be 
+		in the main render thread and thus all GPU resources will be
+		available. 
+		*/
+		class CM_UTILITY_EXPORT ResponseHandler
+		{
+		public:
+			ResponseHandler() {}
+			virtual ~ResponseHandler() {}
+
+			/** Return whether this handler can process a given response. 
+			@remarks
+			Defaults to true, but if you wish to add several handlers each of
+			which deal with different types of response, you can override
+			this method. 
+			*/
+			virtual bool canHandleResponse(const Response* res, const WorkQueue* srcQ) 
+			{ (void)srcQ; return !res->getRequest()->getAborted(); }
+
+			/** The handler method every subclass must implement. 
+			@param res The Response structure. The caller is responsible for
+			deleting this after the call is made, none of the data contained
+			(except pointers to structures in user Any data) will persist
+			after this call is returned.
+			@param srcQ The work queue that this request originated from
+			*/
+			virtual void handleResponse(const Response* res, const WorkQueue* srcQ) = 0;
+		};
+
+	protected:
+		size_t mWorkerThreadCount;
+		bool mIsRunning;
+		unsigned long mResposeTimeLimitMS;
+
+		typedef deque<Request*>::type RequestQueue;
+		typedef deque<Response*>::type ResponseQueue;
+		RequestQueue mRequestQueue;
+		RequestQueue mProcessQueue;
+		ResponseQueue mResponseQueue;
+
+		/// Thread function
+		struct WorkerFunc CM_THREAD_WORKER_INHERIT
+		{
+			WorkQueue* mQueue;
+
+			WorkerFunc(WorkQueue* q) 
+				: mQueue(q) {}
+
+			void operator()();
+
+			void run();
+		};
+		WorkerFunc* mWorkerFunc;
+
+		/** Intermediate structure to hold a pointer to a request handler which 
+			provides insurance against the handler itself being disconnected
+			while the list remains unchanged.
+		*/
+		class CM_UTILITY_EXPORT RequestHandlerHolder
+		{
+		protected:
+			CM_RW_MUTEX(mRWMutex);
+			RequestHandler* mHandler;
+		public:
+			RequestHandlerHolder(RequestHandler* handler)
+				: mHandler(handler)	{}
+
+			// Disconnect the handler to allow it to be destroyed
+			void disconnectHandler()
+			{
+				// write lock - must wait for all requests to finish
+				CM_LOCK_RW_MUTEX_WRITE(mRWMutex);
+				mHandler = 0;
+			}
+
+			/** Get handler pointer - note, only use this for == comparison or similar,
+				do not attempt to call it as it is not thread safe. 
+			*/
+			RequestHandler* getHandler() { return mHandler; }
+
+			/** Process a request if possible.
+			@return Valid response if processed, null otherwise
+			*/
+			Response* handleRequest(const Request* req, const WorkQueue* srcQ)
+			{
+				// Read mutex so that multiple requests can be processed by the
+				// same handler in parallel if required
+				CM_LOCK_RW_MUTEX_READ(mRWMutex);
+				Response* response = 0;
+				if (mHandler)
+				{
+					if (mHandler->canHandleRequest(req, srcQ))
+					{
+						response = mHandler->handleRequest(req, srcQ);
+					}
+				}
+				return response;
+			}
+
+		};
+		// Hold these by shared pointer so they can be copied keeping same instance
+		typedef std::shared_ptr<RequestHandlerHolder> RequestHandlerHolderPtr;
+
+		typedef list<RequestHandlerHolderPtr>::type RequestHandlerList;
+		typedef list<ResponseHandler*>::type ResponseHandlerList;
+		typedef map<UINT16, RequestHandlerList>::type RequestHandlerListByChannel;
+		typedef map<UINT16, ResponseHandlerList>::type ResponseHandlerListByChannel;
+
+		RequestHandlerListByChannel mRequestHandlers;
+		ResponseHandlerListByChannel mResponseHandlers;
+		RequestID mRequestCount;
+		bool mPaused;
+		bool mAcceptRequests;
+		bool mShuttingDown;
+
+		/// Synchroniser token to wait / notify on thread init 
+		CM_THREAD_SYNCHRONISER(mInitSync)
+
+		CM_THREAD_SYNCHRONISER(mRequestCondition)
+
+		/// Init notification mutex (must lock before waiting on initCondition)
+		CM_MUTEX(mInitMutex)
+		CM_MUTEX(mRequestMutex)
+		CM_MUTEX(mProcessMutex)
+		CM_MUTEX(mResponseMutex)
+		CM_RW_MUTEX(mRequestHandlerMutex);
+
+#if CM_THREAD_SUPPORT
+		typedef vector<CM_THREAD_TYPE*>::type WorkerThreadList;
+		WorkerThreadList mWorkers;
+#endif
+
+	public:
+		WorkQueue();
+		~WorkQueue();		
+
+		/** Start up the queue with the options that have been set.
+		@param forceRestart If the queue is already running, whether to shut it
+			down and restart.
+		*/
+		void startup(bool forceRestart = true);
+
+		/** Shut down the queue.
+		*/
+		void shutdown();
+
+		/** Add a request handler instance to the queue. 
+		@remarks
+			Every queue must have at least one request handler instance for each 
+			channel in which requests are raised. If you 
+			add more than one handler per channel, then you must implement canHandleRequest 
+			differently	in each if you wish them to respond to different requests.
+		@param channel The channel for requests you want to handle
+		@param rh Your handler
+		*/
+		void addRequestHandler(UINT16 channel, RequestHandler* rh);
+		/** Remove a request handler. */
+		void removeRequestHandler(UINT16 channel, RequestHandler* rh);
+
+		/** Add a response handler instance to the queue. 
+		@remarks
+			Every queue must have at least one response handler instance for each 
+			channel in which requests are raised. If you add more than one, then you 
+			must implement canHandleResponse differently in each if you wish them 
+			to respond to different responses.
+		@param channel The channel for responses you want to handle
+		@param rh Your handler
+		*/
+		void addResponseHandler(UINT16 channel, ResponseHandler* rh);
+		/** Remove a Response handler. */
+		void removeResponseHandler(UINT16 channel, ResponseHandler* rh);
+
+		/** Add a new request to the queue.
+		@param channel The channel this request will go into = 0; the channel is the top-level
+			categorisation of the request
+		@param requestType An identifier that's unique within this queue which
+			identifies the type of the request (user decides the actual value)
+		@param rData The data required by the request process. 
+		@param retryCount The number of times the request should be retried
+			if it fails.
+		@param forceSynchronous Forces the request to be processed immediately
+			even if threading is enabled.
+		@returns The ID of the request that has been added
+		*/
+		RequestID addRequest(UINT16 channel, const boost::any& rData, UINT8 retryCount = 0, 
+			bool forceSynchronous = false);
+
+		/** Abort a previously issued request.
+		If the request is still waiting to be processed, it will be 
+		removed from the queue.
+		@param id The ID of the previously issued request.
+		*/
+		void abortRequest(RequestID id);
+
+		/** Abort all previously issued requests in a given channel.
+		Any requests still waiting to be processed of the given channel, will be 
+		removed from the queue.
+		@param channel The type of request to be aborted
+		*/
+		void abortRequestsByChannel(UINT16 channel);
+
+		/** Abort all previously issued requests.
+		Any requests still waiting to be processed will be removed from the queue.
+		Any requests that are being processed will still complete.
+		*/
+		void abortAllRequests();
+		
+		/** Set whether to pause further processing of any requests. 
+		If true, any further requests will simply be queued and not processed until
+		setPaused(false) is called. Any requests which are in the process of being
+		worked on already will still continue. 
+		*/
+		void setPaused(bool pause);
+		/// Return whether the queue is paused ie not sending more work to workers
+		bool isPaused() const;
+
+		/** Set whether to accept new requests or not. 
+		If true, requests are added to the queue as usual. If false, requests
+		are silently ignored until setRequestsAccepted(true) is called. 
+		*/
+		void setRequestsAccepted(bool accept);
+		/// Returns whether requests are being accepted right now
+		bool getRequestsAccepted() const;
+
+		/** Get the number of worker threads that this queue will start when 
+			startup() is called. 
+		*/
+		size_t getWorkerThreadCount() const;
+
+		/** Set the number of worker threads that this queue will start
+			when startup() is called (default 1).
+			Calling this will have no effect unless the queue is shut down and
+			restarted.
+		*/
+		void setWorkerThreadCount(size_t c);
+
+		/** Get a channel ID for a given channel name. 
+		@remarks
+			Channels are assigned on a first-come, first-served basis and are
+			not persistent across application instances. This method allows 
+			applications to not worry about channel clashes through manually
+			assigned channel numbers.
+		*/
+		UINT16 getChannel(const String& channelName);
+
+		/** Returns whether the queue is trying to shut down. */
+		bool isShuttingDown() const { return mShuttingDown; }
+
+	protected:
+		/** Process the responses in the queue.
+		@remarks
+			This method is public, and must be called from the main render
+			thread to 'pump' responses through the system. The method will usually
+			try to clear all responses before returning = 0; however, you can specify
+			a time limit on the response processing to limit the impact of
+			spikes in demand by calling setResponseProcessingTimeLimit.
+		*/
+		void processResponses(); 
+
+		void processRequestResponse(Request* r, bool synchronous);
+		Response* processRequest(Request* r);
+		void processResponse(Response* r);
+
+		/// Put a Request on the queue with a specific RequestID.
+		void addRequestWithRID(RequestID rid, UINT16 channel, const boost::any& rData, UINT8 retryCount);
+
+		/** To be called by a separate thread; will return immediately if there
+			are items in the queue, or suspend the thread until new items are added
+			otherwise.
+		*/
+		void waitForNextRequest();
+
+		/** Process the next request on the queue. 
+		@remarks
+			This method is public, but only intended for advanced users to call. 
+			The only reason you would call this, is if you were using your 
+			own thread to drive the worker processing. The thread calling this
+			method will be the thread used to call the RequestHandler.
+		*/
+		void processNextRequest();
+
+		/// Main function for each thread spawned.
+		void threadMain();
+
+		/// Notify workers about a new request. 
+		void notifyWorkers();
+	};
+}

+ 12 - 0
CamelotUtility/Source/CmString.cpp

@@ -486,6 +486,18 @@ namespace CamelotEngine {
 		return stream.str();
 	}
 	//-----------------------------------------------------------------------
+	String toString(unsigned long long int val, 
+		unsigned short width, char fill, std::ios::fmtflags flags)
+	{
+		stringstream stream;
+		stream.width(width);
+		stream.fill(fill);
+		if (flags)
+			stream.setf(flags);
+		stream << val;
+		return stream.str();
+	}
+	//-----------------------------------------------------------------------
 	String toString(const Vector2& val)
 	{
 		stringstream stream;

+ 1 - 0
CamelotUtility/Source/CmTime.cpp

@@ -27,6 +27,7 @@ namespace CamelotEngine
 
 		mFrameDelta = (float)((currentFrameTime - mLastFrameTime) * MICROSEC_TO_SEC);
 		mTimeSinceStart = (float)(currentFrameTime * MICROSEC_TO_SEC);
+		mTimeSinceStartMs = (UINT64)(currentFrameTime / 1000);
 
 		mLastFrameTime = currentFrameTime;
 

+ 620 - 0
CamelotUtility/Source/CmWorkQueue.cpp

@@ -0,0 +1,620 @@
+/*
+-----------------------------------------------------------------------------
+This source file is part of OGRE
+(Object-oriented Graphics Rendering Engine)
+For the latest info, see http://www.ogre3d.org/
+
+Copyright (c) 2000-2011 Torus Knot Software Ltd
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+-----------------------------------------------------------------------------
+*/
+#include "CmWorkQueue.h"
+#include "CmDebug.h"
+
+namespace CamelotEngine {
+	WorkQueue::WorkQueue() 
+		: mNextChannel(0) 		
+		, mWorkerThreadCount(1)
+		, mIsRunning(false)
+		, mWorkerFunc(0)
+		, mRequestCount(0)
+		, mPaused(false)
+		, mAcceptRequests(true)
+	{}
+	//---------------------------------------------------------------------
+	WorkQueue::~WorkQueue() 
+	{
+		shutdown();
+
+		for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
+		{
+			delete (*i);
+		}
+		mRequestQueue.clear();
+
+		for (ResponseQueue::iterator i = mResponseQueue.begin(); i != mResponseQueue.end(); ++i)
+		{
+			delete (*i);
+		}
+		mResponseQueue.clear();
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::startup(bool forceRestart)
+	{
+		if (mIsRunning)
+		{
+			if (forceRestart)
+				shutdown();
+			else
+				return;
+		}
+
+		mShuttingDown = false;
+
+		mWorkerFunc = new WorkerFunc(this);
+
+#if CM_THREAD_SUPPORT
+		for (UINT8 i = 0; i < mWorkerThreadCount; ++i)
+		{
+			CM_THREAD_CREATE(t, *mWorkerFunc);
+			mWorkers.push_back(t);
+		}
+#endif
+
+		mIsRunning = true;
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::shutdown()
+	{
+		if( !mIsRunning )
+			return;
+
+		mShuttingDown = true;
+		abortAllRequests();
+#if CM_THREAD_SUPPORT
+		// wake all threads (they should check shutting down as first thing after wait)
+		CM_THREAD_NOTIFY_ALL(mRequestCondition)
+
+			// all our threads should have been woken now, so join
+			for (WorkerThreadList::iterator i = mWorkers.begin(); i != mWorkers.end(); ++i)
+			{
+				(*i)->join();
+				CM_THREAD_DESTROY(*i);
+			}
+			mWorkers.clear();
+#endif
+
+			if (mWorkerFunc)
+			{
+				delete mWorkerFunc;
+				mWorkerFunc = 0;
+			}
+
+			mIsRunning = false;
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::addRequestHandler(UINT16 channel, RequestHandler* rh)
+	{
+		CM_LOCK_RW_MUTEX_WRITE(mRequestHandlerMutex);
+
+		RequestHandlerListByChannel::iterator i = mRequestHandlers.find(channel);
+		if (i == mRequestHandlers.end())
+			i = mRequestHandlers.insert(RequestHandlerListByChannel::value_type(channel, RequestHandlerList())).first;
+
+		RequestHandlerList& handlers = i->second;
+		bool duplicate = false;
+		for (RequestHandlerList::iterator j = handlers.begin(); j != handlers.end(); ++j)
+		{
+			if ((*j)->getHandler() == rh)
+			{
+				duplicate = true;
+				break;
+			}
+		}
+		if (!duplicate)
+			handlers.push_back(RequestHandlerHolderPtr(new RequestHandlerHolder(rh)));
+
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::removeRequestHandler(UINT16 channel, RequestHandler* rh)
+	{
+		CM_LOCK_RW_MUTEX_WRITE(mRequestHandlerMutex);
+
+		RequestHandlerListByChannel::iterator i = mRequestHandlers.find(channel);
+		if (i != mRequestHandlers.end())
+		{
+			RequestHandlerList& handlers = i->second;
+			for (RequestHandlerList::iterator j = handlers.begin(); j != handlers.end(); ++j)
+			{
+				if ((*j)->getHandler() == rh)
+				{
+					// Disconnect - this will make it safe across copies of the list
+					// this is threadsafe and will wait for existing processes to finish
+					(*j)->disconnectHandler();
+					handlers.erase(j);	
+					break;
+				}
+			}
+
+		}
+
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::addResponseHandler(UINT16 channel, ResponseHandler* rh)
+	{
+		ResponseHandlerListByChannel::iterator i = mResponseHandlers.find(channel);
+		if (i == mResponseHandlers.end())
+			i = mResponseHandlers.insert(ResponseHandlerListByChannel::value_type(channel, ResponseHandlerList())).first;
+
+		ResponseHandlerList& handlers = i->second;
+		if (std::find(handlers.begin(), handlers.end(), rh) == handlers.end())
+			handlers.push_back(rh);
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::removeResponseHandler(UINT16 channel, ResponseHandler* rh)
+	{
+		ResponseHandlerListByChannel::iterator i = mResponseHandlers.find(channel);
+		if (i != mResponseHandlers.end())
+		{
+			ResponseHandlerList& handlers = i->second;
+			ResponseHandlerList::iterator j = std::find(
+				handlers.begin(), handlers.end(), rh);
+			if (j != handlers.end())
+				handlers.erase(j);
+
+		}
+	}
+	//---------------------------------------------------------------------
+	WorkQueue::RequestID WorkQueue::addRequest(UINT16 channel, 
+		const boost::any& rData, UINT8 retryCount, bool forceSynchronous)
+	{
+		Request* req = 0;
+		RequestID rid = 0;
+
+		{
+			// lock to acquire rid and push request to the queue
+			CM_LOCK_MUTEX(mRequestMutex)
+
+				if (!mAcceptRequests || mShuttingDown)
+					return 0;
+
+			rid = ++mRequestCount;
+			req = new Request(channel, rData, retryCount, rid);
+
+#if CM_THREAD_SUPPORT
+			if (!forceSynchronous)
+			{
+				mRequestQueue.push_back(req);
+				notifyWorkers();
+				return rid;
+			}
+#endif
+		}
+
+
+		processRequestResponse(req, true);
+
+		return rid;
+
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::abortRequest(RequestID id)
+	{
+		CM_LOCK_MUTEX(mProcessMutex)
+
+			// NOTE: Pending requests are exist any of RequestQueue, ProcessQueue and
+			// ResponseQueue when keeping ProcessMutex, so we check all of these queues.
+
+			for (RequestQueue::iterator i = mProcessQueue.begin(); i != mProcessQueue.end(); ++i)
+			{
+				if ((*i)->getID() == id)
+				{
+					(*i)->abortRequest();
+					break;
+				}
+			}
+
+			{
+				CM_LOCK_MUTEX(mRequestMutex)
+
+					for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
+					{
+						if ((*i)->getID() == id)
+						{
+							(*i)->abortRequest();
+							break;
+						}
+					}
+			}
+
+			{
+				CM_LOCK_MUTEX(mResponseMutex)
+
+					for (ResponseQueue::iterator i = mResponseQueue.begin(); i != mResponseQueue.end(); ++i)
+					{
+						if( (*i)->getRequest()->getID() == id )
+						{
+							(*i)->abortRequest();
+							break;
+						}
+					}
+			}
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::abortRequestsByChannel(UINT16 channel)
+	{
+		CM_LOCK_MUTEX(mProcessMutex)
+
+			for (RequestQueue::iterator i = mProcessQueue.begin(); i != mProcessQueue.end(); ++i)
+			{
+				if ((*i)->getChannel() == channel)
+				{
+					(*i)->abortRequest();
+				}
+			}
+
+			{
+				CM_LOCK_MUTEX(mRequestMutex)
+
+					for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
+					{
+						if ((*i)->getChannel() == channel)
+						{
+							(*i)->abortRequest();
+						}
+					}
+			}
+
+			{
+				CM_LOCK_MUTEX(mResponseMutex)
+
+					for (ResponseQueue::iterator i = mResponseQueue.begin(); i != mResponseQueue.end(); ++i)
+					{
+						if( (*i)->getRequest()->getChannel() == channel )
+						{
+							(*i)->abortRequest();
+						}
+					}
+			}
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::abortAllRequests()
+	{
+		CM_LOCK_MUTEX(mProcessMutex)
+
+			for (RequestQueue::iterator i = mProcessQueue.begin(); i != mProcessQueue.end(); ++i)
+			{
+				(*i)->abortRequest();
+			}
+
+			{
+				CM_LOCK_MUTEX(mRequestMutex)
+
+					for (RequestQueue::iterator i = mRequestQueue.begin(); i != mRequestQueue.end(); ++i)
+					{
+						(*i)->abortRequest();
+					}
+			}
+
+			{
+				CM_LOCK_MUTEX(mResponseMutex)
+
+					for (ResponseQueue::iterator i = mResponseQueue.begin(); i != mResponseQueue.end(); ++i)
+					{
+						(*i)->abortRequest();
+					}
+			}
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::setPaused(bool pause)
+	{
+		CM_LOCK_MUTEX(mRequestMutex)
+
+		mPaused = pause;
+	}
+	//---------------------------------------------------------------------
+	bool WorkQueue::isPaused() const
+	{
+		return mPaused;
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::setRequestsAccepted(bool accept)
+	{
+		CM_LOCK_MUTEX(mRequestMutex)
+
+		mAcceptRequests = accept;
+	}
+	//---------------------------------------------------------------------
+	bool WorkQueue::getRequestsAccepted() const
+	{
+		return mAcceptRequests;
+	}
+	//---------------------------------------------------------------------
+	size_t WorkQueue::getWorkerThreadCount() const
+	{
+		return mWorkerThreadCount;
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::setWorkerThreadCount(size_t c)
+	{
+		mWorkerThreadCount = c;
+	}
+	//---------------------------------------------------------------------
+	UINT16 WorkQueue::getChannel(const String& channelName)
+	{
+		CM_LOCK_MUTEX(mChannelMapMutex)
+
+		ChannelMap::iterator i = mChannelMap.find(channelName);
+		if (i == mChannelMap.end())
+		{
+			i = mChannelMap.insert(ChannelMap::value_type(channelName, mNextChannel++)).first;
+		}
+		return i->second;
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::processResponses() 
+	{
+		// keep going until we run out of responses or out of time
+		while(true)
+		{
+			Response* response = 0;
+			{
+				CM_LOCK_MUTEX(mResponseMutex)
+
+					if (mResponseQueue.empty())
+						break; // exit loop
+					else
+					{
+						response = mResponseQueue.front();
+						mResponseQueue.pop_front();
+					}
+			}
+
+			if (response)
+			{
+				processResponse(response);
+
+				delete response;
+
+			}
+		}
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::processRequestResponse(Request* r, bool synchronous)
+	{
+		Response* response = processRequest(r);
+
+		CM_LOCK_MUTEX(mProcessMutex)
+
+			RequestQueue::iterator it;
+		for( it = mProcessQueue.begin(); it != mProcessQueue.end(); ++it )
+		{
+			if( (*it) == r )
+			{
+				mProcessQueue.erase( it );
+				break;
+			}
+		}
+
+		if (response)
+		{
+			if (!response->succeeded())
+			{
+				// Failed, should we retry?
+				const Request* req = response->getRequest();
+				if (req->getRetryCount())
+				{
+					addRequestWithRID(req->getID(), req->getChannel(), req->getData(), 
+						req->getRetryCount() - 1);
+					// discard response (this also deletes request)
+					delete response;
+					return;
+				}
+			}
+			if (synchronous)
+			{
+				processResponse(response);
+				delete response;
+			}
+			else
+			{
+				if( response->getRequest()->getAborted() )
+				{
+					// destroy response user data
+					response->abortRequest();
+				}
+				// Queue response
+				CM_LOCK_MUTEX(mResponseMutex)
+					mResponseQueue.push_back(response);
+				// no need to wake thread, this is processed by the main thread
+			}
+
+		}
+		else
+		{
+			// no response, delete request
+			gDebug().logWarning("warning: no handler processed request "
+				+ toString(r->getID()) + ", channel " + toString(r->getChannel()));
+			delete r;
+		}
+
+	}
+	WorkQueue::Response* WorkQueue::processRequest(Request* r)
+	{
+		RequestHandlerListByChannel handlerListCopy;
+		{
+			// lock the list only to make a copy of it, to maximise parallelism
+			CM_LOCK_RW_MUTEX_READ(mRequestHandlerMutex);
+
+			handlerListCopy = mRequestHandlers;
+
+		}
+
+		Response* response = 0;
+
+		RequestHandlerListByChannel::iterator i = handlerListCopy.find(r->getChannel());
+		if (i != handlerListCopy.end())
+		{
+			RequestHandlerList& handlers = i->second;
+			for (RequestHandlerList::reverse_iterator j = handlers.rbegin(); j != handlers.rend(); ++j)
+			{
+				// threadsafe call which tests canHandleRequest and calls it if so 
+				response = (*j)->handleRequest(r, this);
+
+				if (response)
+					break;
+			}
+		}
+
+		return response;
+
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::processResponse(Response* r)
+	{
+		ResponseHandlerListByChannel::iterator i = mResponseHandlers.find(r->getRequest()->getChannel());
+		if (i != mResponseHandlers.end())
+		{
+			ResponseHandlerList& handlers = i->second;
+			for (ResponseHandlerList::reverse_iterator j = handlers.rbegin(); j != handlers.rend(); ++j)
+			{
+				if ((*j)->canHandleResponse(r, this))
+				{
+					(*j)->handleResponse(r, this);
+				}
+			}
+		}
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::addRequestWithRID(WorkQueue::RequestID rid, UINT16 channel, 
+		const boost::any& rData, UINT8 retryCount)
+	{
+		// lock to push request to the queue
+		CM_LOCK_MUTEX(mRequestMutex)
+
+			if (mShuttingDown)
+				return;
+
+		Request* req = new Request(channel, rData, retryCount, rid);
+
+#if CM_THREAD_SUPPORT
+		mRequestQueue.push_back(req);
+		notifyWorkers();
+#else
+		processRequestResponse(req, true);
+#endif
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::processNextRequest()
+	{
+		Request* request = 0;
+		{
+			// scoped to only lock while retrieving the next request
+			CM_LOCK_MUTEX(mProcessMutex)
+			{
+				CM_LOCK_MUTEX(mRequestMutex)
+
+					if (!mRequestQueue.empty())
+					{
+						request = mRequestQueue.front();
+						mRequestQueue.pop_front();
+						mProcessQueue.push_back( request );
+					}
+			}
+		}
+
+		if (request)
+		{
+			processRequestResponse(request, false);
+		}
+
+
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::waitForNextRequest()
+	{
+#if CM_THREAD_SUPPORT
+		// Lock; note that OGRE_THREAD_WAIT will free the lock
+		CM_LOCK_MUTEX_NAMED(mRequestMutex, queueLock);
+		if (mRequestQueue.empty())
+		{
+			// frees lock and suspends the thread
+			CM_THREAD_WAIT(mRequestCondition, mRequestMutex, queueLock);
+		}
+		// When we get back here, it's because we've been notified 
+		// and thus the thread has been woken up. Lock has also been
+		// re-acquired, but we won't use it. It's safe to try processing and fail
+		// if another thread has got in first and grabbed the request
+#endif
+
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::threadMain()
+	{
+		// default worker thread
+#if CM_THREAD_SUPPORT
+		// Spin forever until we're told to shut down
+		while (!isShuttingDown())
+		{
+			waitForNextRequest();
+			processNextRequest();
+		}
+#endif
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::notifyWorkers()
+	{
+		// wake up waiting thread
+		CM_THREAD_NOTIFY_ONE(mRequestCondition)
+	}
+	//---------------------------------------------------------------------
+	WorkQueue::Request::Request(UINT16 channel, const boost::any& rData, UINT8 retry, RequestID rid)
+		: mChannel(channel), mData(rData), mRetryCount(retry), mID(rid), mAborted(false)
+	{
+
+	}
+	//---------------------------------------------------------------------
+	WorkQueue::Request::~Request()
+	{
+
+	}
+	//---------------------------------------------------------------------
+	//---------------------------------------------------------------------
+	WorkQueue::Response::Response(const Request* rq, bool success, const boost::any& data)
+		: mRequest(rq), mSuccess(success), mData(data)
+	{
+
+	}
+	//---------------------------------------------------------------------
+	WorkQueue::Response::~Response()
+	{
+		delete mRequest;
+	}
+	//---------------------------------------------------------------------
+	void WorkQueue::WorkerFunc::operator()()
+	{
+		mQueue->threadMain();
+	}
+
+	void WorkQueue::WorkerFunc::run()
+	{
+		mQueue->threadMain();
+	}
+}