EACallback.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. ///////////////////////////////////////////////////////////////////////////////
  2. // Copyright (c) Electronic Arts Inc. All rights reserved.
  3. ///////////////////////////////////////////////////////////////////////////////
  4. ///////////////////////////////////////////////////////////////////////////////
  5. // To do: Deal with possible int64_t rollover in various parts of code below.
  6. ///////////////////////////////////////////////////////////////////////////////
  7. #include <EAStdC/internal/Config.h>
  8. #include <EAStdC/EACallback.h>
  9. #include <EAStdC/EARandomDistribution.h>
  10. #include <string.h>
  11. #include <EAAssert/eaassert.h>
  12. ///////////////////////////////////////////////////////////////////////////
  13. // Some stuff used to support the callbacks
  14. //
  15. #if EASTDC_THREADING_SUPPORTED
  16. #define EA_CALLBACK_PROCESSOR_MUTEX_LOCK() mMutex.Lock()
  17. #define EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK() mMutex.Unlock()
  18. #else
  19. #define EA_CALLBACK_PROCESSOR_MUTEX_LOCK()
  20. #define EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK()
  21. #endif
  22. // We define the following macros from CoreAllocator here to avoid a dependency on the MemoryMan package.
  23. // #ifndef EA_CB_CA_NEW
  24. // #define EA_CB_CA_NEW(Class, pAllocator, pName) new ((pAllocator)->Alloc(sizeof(Class), pName, 0, EA_ALIGN_OF(Class), 0)) Class
  25. // #endif
  26. //
  27. // #ifndef EA_CB_CA_DELETE
  28. // #define EA_CB_CA_DELETE(pObject, pAllocator) EA::StdC::delete_object(pObject, pAllocator)
  29. // #endif
  30. namespace EA
  31. {
  32. namespace StdC
  33. {
  34. ///////////////////////////////////////////////////////////////////////////////
  35. // Misc
  36. ///////////////////////////////////////////////////////////////////////////////
  37. // The use of standard min/max leads to compile errors sensitive
  38. // to the state of EASTDC_THREADING_SUPPORTED.
  39. template <class T>
  40. const T smin(const T& a, const T& b)
  41. {
  42. return a < b ? a : b;
  43. }
  44. static void DefaultCallback(Callback* pCallback, void*, uint64_t, uint64_t)
  45. {
  46. pCallback->Stop();
  47. }
  48. /*
  49. /// delete_object
  50. ///
  51. /// Deletes an object created by create_object.
  52. /// See create_object for specifications and examples.
  53. ///
  54. template <typename T>
  55. inline void delete_object(T* pObject, Allocator::ICoreAllocator* pAllocator)
  56. {
  57. if(pObject) // As per the C++ standard, deletion of NULL results in a no-op.
  58. {
  59. pObject->~T();
  60. pAllocator->Free(pObject);
  61. }
  62. }
  63. */
  64. ///////////////////////////////////////////////////////////////////////////////
  65. // Callback
  66. ///////////////////////////////////////////////////////////////////////////////
  67. Callback::Callback()
  68. : mPeriod(UINT64_C(1000000000))
  69. , mPrecision(500000)
  70. , mpCallbackManager(NULL)
  71. , mpFunction(NULL)
  72. , mpFunctionArg(NULL)
  73. , mType(kTypeTime)
  74. , mbStarted(0)
  75. , mbOneShot(false)
  76. , mbEnableRefCount(false)
  77. , mNextCallbackEvent(0)
  78. , mLastCallbackEvent(0)
  79. {
  80. EA_ASSERT((int64_t)mPeriod > 0); // Sanity checks.
  81. EA_ASSERT((int64_t)mPrecision > 0);
  82. SetFunctionInfo(NULL, NULL, false);
  83. }
  84. Callback::Callback(CallbackFunctionType pCallbackFunc, void* pCallbackFuncArg, uint64_t period,
  85. uint64_t precision, Type type, bool bEnableRefCount)
  86. : mPeriod(period)
  87. , mPrecision(precision)
  88. , mpCallbackManager(NULL)
  89. , mpFunction(NULL)
  90. , mpFunctionArg(NULL)
  91. , mType(type)
  92. , mbStarted(0)
  93. , mbOneShot(false)
  94. , mbEnableRefCount(false)
  95. , mNextCallbackEvent(0)
  96. , mLastCallbackEvent(0)
  97. {
  98. EA_ASSERT((int64_t)mPeriod > 0); // Sanity checks.
  99. EA_ASSERT((int64_t)mPrecision > 0);
  100. SetFunctionInfo(pCallbackFunc, pCallbackFuncArg, bEnableRefCount);
  101. }
  102. Callback::~Callback()
  103. {
  104. if((int32_t)mbStarted) // Cast to int32_t because mbStarted might be an atomic int class.
  105. Stop();
  106. }
  107. // Sets the function which is called when the time/tick count expire. Note that if the
  108. // in asynch mode, the callback could occur in a different thread from the thread that
  109. // started the timer.
  110. bool Callback::SetFunctionInfo(Callback::CallbackFunctionType pCallbackFunction, void* pCallbackArgument, bool bEnableRefCount)
  111. {
  112. if(pCallbackFunction)
  113. {
  114. mpFunction = pCallbackFunction;
  115. mpFunctionArg = pCallbackArgument;
  116. }
  117. else
  118. {
  119. mpFunction = DefaultCallback;
  120. mpFunctionArg = this;
  121. }
  122. if(bEnableRefCount)
  123. {
  124. mbEnableRefCount = true;
  125. AddRefCallback(); // This will AddRef the pointer if it is non-NULL.
  126. }
  127. return true;
  128. }
  129. void Callback::GetFunctionInfo(Callback::CallbackFunctionType& pCallbackFunction, void*& pCallbackArgument) const
  130. {
  131. pCallbackFunction = mpFunction;
  132. pCallbackArgument = mpFunctionArg;
  133. }
  134. void Callback::Call(uint64_t absoluteValue, uint64_t deltaValue)
  135. {
  136. EA_ASSERT(mpFunction);
  137. if(mpFunction)
  138. mpFunction(this, mpFunctionArg, absoluteValue, deltaValue);
  139. }
  140. uint64_t Callback::GetPeriod() const
  141. {
  142. return mPeriod;
  143. }
  144. bool Callback::SetPeriod(uint64_t nPeriod)
  145. {
  146. EA_ASSERT((int64_t)nPeriod > 0); // Sanity checks.
  147. mPeriod = nPeriod;
  148. return true;
  149. }
  150. uint64_t Callback::GetPrecision() const
  151. {
  152. return mPrecision;
  153. }
  154. bool Callback::SetPrecision(uint64_t nPrecision)
  155. {
  156. EA_ASSERT((int64_t)nPrecision >= 0);
  157. mPrecision = nPrecision;
  158. return true;
  159. }
  160. bool Callback::Start(ICallbackManager* pCallbackManager, bool bOneShot)
  161. {
  162. if(!(int32_t)mbStarted) // Cast to int32_t because mbStarted might be an atomic int class.
  163. {
  164. if(!pCallbackManager)
  165. pCallbackManager = GetCallbackManager();
  166. mpCallbackManager = pCallbackManager;
  167. if(pCallbackManager)
  168. mbStarted = (mpCallbackManager->Add(this, bOneShot) ? 1 : 0);
  169. }
  170. return ((int32_t)mbStarted != 0);
  171. }
  172. void Callback::Stop()
  173. {
  174. if((int32_t)mbStarted) // Cast to int32_t because mbStarted might be an atomic int class.
  175. {
  176. mpCallbackManager->Remove(this);
  177. mbStarted = 0;
  178. // Note that the following may result in the Callback object (this)
  179. // being deleted, due to a reference count decrement on itself.
  180. // Thus it is important that this be the last thing done in this function.
  181. if(mbEnableRefCount)
  182. ReleaseCallback();
  183. }
  184. }
  185. bool Callback::IsStarted() const
  186. {
  187. return ((int32_t)mbStarted != 0); // Cast to int32_t because mbStarted might be an atomic int class.
  188. }
  189. bool Callback::SetType(Type type)
  190. {
  191. mType = type;
  192. return true;
  193. }
  194. Callback::Type Callback::GetType() const
  195. {
  196. return mType;
  197. }
  198. void Callback::AddRefCallback()
  199. {
  200. Call(kMessageAddRef, 0);
  201. }
  202. void Callback::ReleaseCallback()
  203. {
  204. Call(kMessageRelease, 0);
  205. }
  206. ///////////////////////////////////////////////////////////////////////////////
  207. // CallbackVector
  208. ///////////////////////////////////////////////////////////////////////////////
  209. CallbackManager::CallbackVector::CallbackVector()
  210. : mpBegin(mLocalBuffer),
  211. mpEnd(mLocalBuffer),
  212. mpCapacity(mLocalBuffer + EAArrayCount(mLocalBuffer))
  213. {
  214. #if defined(EA_DEBUG)
  215. memset(mLocalBuffer, 0, sizeof(mLocalBuffer));
  216. #endif
  217. }
  218. CallbackManager::CallbackVector::~CallbackVector()
  219. {
  220. if(mpBegin != mLocalBuffer)
  221. EASTDC_DELETE[] mpBegin; // It's OK if this is NULL; C++ allows it.
  222. }
  223. CallbackManager::CallbackVector::iterator CallbackManager::CallbackVector::erase(value_type* pIterator)
  224. {
  225. EA_ASSERT((pIterator >= mpBegin) && (pIterator < mpEnd));
  226. const size_t moveCount = (size_t)((mpEnd - pIterator) - 1);
  227. memmove(pIterator, pIterator + 1, moveCount * sizeof(value_type));
  228. --mpEnd;
  229. #if defined(EA_DEBUG)
  230. memset(mpEnd, 0, sizeof(value_type));
  231. #endif
  232. return pIterator;
  233. }
  234. CallbackManager::CallbackVector::iterator CallbackManager::CallbackVector::push_back(value_type value)
  235. {
  236. if((mpEnd + 1) >= mpCapacity) // If there is insufficient existing capacity...
  237. {
  238. const size_t oldSize = (size_t)(mpEnd - mpBegin);
  239. const size_t oldCapacity = (size_t)(mpCapacity - mpBegin);
  240. const size_t newCapacity = (oldCapacity >= 2) ? (oldCapacity * 2) : 4;
  241. value_type* pBegin = EASTDC_NEW("EACallback") value_type[newCapacity];
  242. EA_ASSERT(pBegin);
  243. memcpy(pBegin, mpBegin, oldSize * sizeof(value_type));
  244. if(mpBegin != mLocalBuffer)
  245. EASTDC_DELETE[] mpBegin;
  246. mpBegin = pBegin;
  247. mpEnd = pBegin + oldSize;
  248. mpCapacity = pBegin + newCapacity;
  249. }
  250. *mpEnd = value;
  251. return ++mpEnd;
  252. }
  253. ///////////////////////////////////////////////////////////////////////////////
  254. // CallbackManager
  255. ///////////////////////////////////////////////////////////////////////////////
  256. static ICallbackManager* gpCallbackManager = NULL;
  257. EASTDC_API ICallbackManager* GetCallbackManager()
  258. {
  259. return gpCallbackManager;
  260. }
  261. EASTDC_API void SetCallbackManager(ICallbackManager* pCallbackManager)
  262. {
  263. gpCallbackManager = pCallbackManager;
  264. }
  265. CallbackManager::CallbackManager()
  266. : mCallbackArray()
  267. , mStopwatch(EA::StdC::Stopwatch::kUnitsNanoseconds)
  268. , mTickCounter(0)
  269. , mUserEventCounter(0)
  270. , mbInitialized(false)
  271. , mbRunning(false)
  272. , mbAsync(false)
  273. , mRandom()
  274. #if EASTDC_THREADING_SUPPORTED
  275. , mNSecPerTick(10000000)
  276. , mNSecPerTickLastTimeMeasured(INT64_MIN)
  277. , mNSecPerTickLastTickMeasured(INT64_MIN)
  278. , mNextCallbackEventTime(0)
  279. , mNextCallbackEventTick(0)
  280. , mMutex()
  281. , mThread()
  282. , mbThreadStarted(0)
  283. , mThreadParam()
  284. #endif
  285. {
  286. // mCallbackArray.reserve(8); Disabled because it already has a built-in mLocalBuffer.
  287. }
  288. CallbackManager::~CallbackManager()
  289. {
  290. CallbackManager::Shutdown();
  291. }
  292. bool CallbackManager::Init(bool bAsync, bool bAsyncStart
  293. #if EASTDC_THREADING_SUPPORTED
  294. , EA::Thread::ThreadParameters threadParam
  295. #endif
  296. )
  297. {
  298. if(!mbRunning)
  299. {
  300. mbAsync = bAsync;
  301. mbRunning = true;
  302. #if EASTDC_THREADING_SUPPORTED
  303. mThreadParam = threadParam;
  304. #else
  305. EA_ASSERT(!mbAsync);
  306. mbAsync = false; // The best we can do. Should never happen though.
  307. #endif
  308. mStopwatch.Restart();
  309. if(mbAsync && bAsyncStart)
  310. mbRunning = StartThread(); // If StartThread fails then set mbRunning to false.
  311. }
  312. return mbRunning;
  313. }
  314. void CallbackManager::Shutdown()
  315. {
  316. EA_CALLBACK_PROCESSOR_MUTEX_LOCK();
  317. if(mbRunning)
  318. {
  319. mbRunning = false; // Set this to false so no further calls to CallbackManager will proceed.
  320. StopThread();
  321. mStopwatch.Stop();
  322. // Stop all running Callbacks. This allows them to do cleanup.
  323. for(size_t i = 0, iEnd = mCallbackArray.size(); i < iEnd; ++i)
  324. {
  325. if(mCallbackArray[i]) // It's possible this could be NULL, because stopped callbacks are merely NULL their in the mCallbackArray.
  326. {
  327. Callback* pCallback = mCallbackArray[i]; // Make a temp because we will be unlocking our mutex below.
  328. mCallbackArray[i] = NULL; // Leave it as NULL for now. We'll actually erase the entry later during our update cycle. Our code is fine with NULL pointers and it's useful to keep them because their slots can be re-used.
  329. EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK();
  330. pCallback->Stop();
  331. EA_CALLBACK_PROCESSOR_MUTEX_LOCK();
  332. }
  333. }
  334. mCallbackArray.clear();
  335. }
  336. EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK();
  337. }
  338. // Returns true if the thread is running upon return of this function.
  339. // Will return true if the thread was already running upon calling this function.
  340. bool CallbackManager::StartThread()
  341. {
  342. #if EASTDC_THREADING_SUPPORTED
  343. if(mbAsync)
  344. {
  345. if(mbThreadStarted.SetValueConditional(1, 0)) // If the thread was previously 0 and we set it to 1...
  346. {
  347. mThreadParam.mpName = "CallbackManager"; // Some platforms have an extremely limited thread name buffer and will clip this.
  348. EA::Thread::ThreadId threadId = mThread.Begin(RunStatic, static_cast<CallbackManager*>(this), &mThreadParam);
  349. EA_ASSERT(threadId != EA::Thread::kThreadIdInvalid);
  350. return (threadId != EA::Thread::kThreadIdInvalid);
  351. }
  352. return true; // Else the thread was already running...
  353. }
  354. #endif
  355. return false;
  356. }
  357. void CallbackManager::StopThread()
  358. {
  359. #if EASTDC_THREADING_SUPPORTED
  360. if(mbThreadStarted.SetValueConditional(0, 1)) // If the thread was previously 1 and we set it to 0...
  361. {
  362. mThread.Wake(); // Should be a semaphore or condition variable signal.
  363. mThread.WaitForEnd();
  364. }
  365. #endif
  366. }
  367. void CallbackManager::Update()
  368. {
  369. int64_t curTick = 0;
  370. int64_t curTime = 0;
  371. int64_t curUserEvent = 0;
  372. UpdateInternal(curTick, curTime, curUserEvent);
  373. EA_UNUSED(curTick);
  374. EA_UNUSED(curTime);
  375. EA_UNUSED(curUserEvent);
  376. }
  377. struct TempUnitsInfo
  378. {
  379. int64_t mUnits;
  380. int64_t* mpNextEventUnits;
  381. };
  382. void CallbackManager::UpdateInternal(int64_t& curTick, int64_t& curTime, int64_t& curUserEvent)
  383. {
  384. EA_CALLBACK_PROCESSOR_MUTEX_LOCK();
  385. EA_ASSERT(mbRunning); // The user must have called CallbackManager::Init before using it.
  386. curTick = ++mTickCounter;
  387. curTime = (int64_t)mStopwatch.GetElapsedTime();
  388. curUserEvent = (int64_t)mUserEventCounter;
  389. if(!mCallbackArray.empty())
  390. {
  391. // Every time Update is called, we need to call the elapsed Callbacks and then
  392. // figure out the next time we'll need to do a callback.
  393. // Scan our list and call the callbacks as needed
  394. int64_t nextCallBackUserEvent = 0;
  395. TempUnitsInfo timeInfo = { curTime, &mNextCallbackEventTime };
  396. TempUnitsInfo tickInfo = { curTick, &mNextCallbackEventTick };
  397. TempUnitsInfo userEventInfo = { curUserEvent, &nextCallBackUserEvent };
  398. for(size_t i = 0; i < mCallbackArray.size(); ++i) // Intentionally re-evaluate size every time through, as it could change dynamically below. Intentionally use < instead of !=, as size could decrease by any amount during the execution below.
  399. {
  400. Callback* pCallback = mCallbackArray[i];
  401. TempUnitsInfo* pTUI = NULL;
  402. if(pCallback)
  403. {
  404. // Call the callback function if needed
  405. switch(pCallback->GetType())
  406. {
  407. case Callback::kTypeTime:
  408. pTUI = &timeInfo;
  409. break;
  410. case Callback::kTypeTick:
  411. pTUI = &tickInfo;
  412. break;
  413. default:
  414. case Callback::kTypeUserEvent:
  415. pTUI = &userEventInfo;
  416. break;
  417. }
  418. EA_ASSERT(pTUI != NULL);
  419. if(pTUI->mUnits >= pCallback->mNextCallbackEvent) // If it's time to call this callback...
  420. {
  421. // We have to beware that this Call might result in the callee manipulating
  422. // CallbackManager (us) and change our state, particularly with respect to
  423. // starting and stopping callbacks (including this callback).
  424. // As of this writing, our mutex is locked during the Call. This leaves an
  425. // opportunity for threading deadlock. To consider: See if we can unlock the
  426. // mutex before calling this. We would need to re-evaluate some of our state
  427. // upon return if we did this. Maybe have a member variable called mHasChanged
  428. // to make this more efficient.
  429. pCallback->Call((uint64_t)pTUI->mUnits, (uint64_t)(pTUI->mUnits - pCallback->mLastCallbackEvent));
  430. if((i < mCallbackArray.size()) && (mCallbackArray[i] == pCallback)) // If the callback wasn't stopped and removed during the Call to the user above...
  431. {
  432. pCallback->mLastCallbackEvent = pTUI->mUnits;
  433. if(pCallback->mbOneShot)
  434. pCallback->Stop();
  435. else
  436. {
  437. const int32_t precision = (int32_t)pCallback->GetPrecision();
  438. const int64_t period = (int64_t)pCallback->GetPeriod();
  439. pCallback->mNextCallbackEvent = (pTUI->mUnits + period);
  440. if(precision) // To consider; For kTypeTime it might be worth testing for (precision > 100) or similar instead here.
  441. {
  442. // An alternative to the use of random below would be a load minimization
  443. // strategy with quite a bit more involved implementation.
  444. const int32_t delta = RandomInt32UniformRange(mRandom, -precision, precision - 1); // Note by Paul P: I added this -1 so unit tests could pass, but it doesn't seem right.
  445. const int64_t nextCallbackEvent = pCallback->mNextCallbackEvent + delta;
  446. if(nextCallbackEvent > pTUI->mUnits) // Ignore precision adjustments that make it so the next event is prior to the current one.
  447. pCallback->mNextCallbackEvent = nextCallbackEvent;
  448. }
  449. EA_ASSERT(pCallback->mNextCallbackEvent >= pTUI->mUnits); // Assert that the next event is not backwards in time.
  450. if(mbAsync)
  451. {
  452. if(*pTUI->mpNextEventUnits > pCallback->mNextCallbackEvent) // Update mNextCallbackEventTime or mNextCallbackEventTick to reflect what is the
  453. *pTUI->mpNextEventUnits = pCallback->mNextCallbackEvent; // minimum time until the next event. We'll use that in the thread Run function to
  454. } // know how long to sleep/wait before it needs to do another callback.
  455. }
  456. }
  457. }
  458. } // if(pCallback)
  459. else
  460. {
  461. mCallbackArray.erase(&mCallbackArray[i]);
  462. }
  463. } // for(...)
  464. } // if(!mCallbackArray.empty())
  465. EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK();
  466. }
  467. // Thread function.
  468. intptr_t CallbackManager::Run()
  469. {
  470. #if EASTDC_THREADING_SUPPORTED
  471. EA_ASSERT(mbThreadStarted.GetValue() != 0);
  472. while(mbRunning)
  473. {
  474. int64_t curTick;
  475. int64_t curTime;
  476. int64_t curUserEvent;
  477. UpdateInternal(curTick, curTime, curUserEvent);
  478. // Update msec/tick value if needed.
  479. // Note by Paul Pedriana: I don't like this nanosecond tick calculation logic. IMO it's too delicate.
  480. const int64_t kNSecPerTickFrequency = UINT64_C(50000000); // in nsec -- how often to update mNSecPerTick
  481. if(curTime > (mNSecPerTickLastTimeMeasured + kNSecPerTickFrequency))
  482. {
  483. mNSecPerTick = ((double)curTime - (double)mNSecPerTickLastTimeMeasured) / ((double)curTick - (double)mNSecPerTickLastTickMeasured);
  484. mNSecPerTickLastTimeMeasured = curTime;
  485. mNSecPerTickLastTickMeasured = curTick;
  486. }
  487. // Come up with sleeping time and put the thread to sleep
  488. // To do: We need to switch this to an alternative synchronization primitive, as sleeping isn't very great.
  489. #if EA_WINAPI_FAMILY_PARTITION(EA_WINAPI_PARTITION_DESKTOP) // If using a Microsoft OS where Wake is supported and we can sleep for a long time.
  490. int64_t timeToNextEventMs = INT_MAX;
  491. #else
  492. int64_t timeToNextEventMs = 50;
  493. #endif
  494. if(!mCallbackArray.empty()) // If there are any active callbacks...
  495. {
  496. if(mNextCallbackEventTime < curTime)
  497. mNextCallbackEventTime = curTime + 100000000; // 100 milliseconds worth of nanoseconds. The number is arbitrary. Probably should be a smaller value for faster machines.
  498. if(mNextCallbackEventTick < curTick)
  499. mNextCallbackEventTick = curTick + 1000; // Arbitrary.
  500. //const int64_t absoluteTime = (int64_t)mStopwatch.GetElapsedTime(); // Nanoseconds
  501. const int64_t timeDelta = mNextCallbackEventTime - curTime; // Nanoseconds
  502. const int64_t tickDelta = (int64_t)((mNextCallbackEventTick - curTick) * mNSecPerTick); // Nanoseconds
  503. const int64_t minDelta = smin(timeDelta, tickDelta); // Nanoseconds
  504. timeToNextEventMs = (minDelta / 1000000) / 2; // Convert minDelta to milliseconds (what ThreadSleep wants) and half it in order to oversample the callback time (is this necessary?)
  505. if(timeToNextEventMs < 0)
  506. timeToNextEventMs = 0; // simply yield.
  507. }
  508. // Question by Paul Pedriana upon examining this code: Why is this implemented using thread sleeping instead of a conventional
  509. // thread synchronization primitive such as a Semaphore? At the least this should be a semaphore wait with a timeout set
  510. // to be equal to timeToNextEventMs. Then instead of waking a thread with Thread::Wake, we can simply signal the semaphore. This is
  511. // expecially so because some platforms don't support waking threads from sleep.
  512. if(timeToNextEventMs == 0)
  513. Thread::ThreadSleep(EA::Thread::kTimeoutYield);
  514. else
  515. Thread::ThreadSleep(EA::Thread::ThreadTime(timeToNextEventMs));
  516. //#if defined(EA_DEBUG)
  517. // static int64_t lastSleepTimes[200];
  518. // static int lastSleepTimeIndex = 0;
  519. // if(lastSleepTimeIndex == 200)
  520. // lastSleepTimeIndex = 0;
  521. // lastSleepTimes[lastSleepTimeIndex++] = timeToNextEventMs;
  522. //#endif
  523. }
  524. #endif
  525. return 0;
  526. }
  527. bool CallbackManager::Add(Callback* pCallback, bool bOneShot)
  528. {
  529. bool bReturnValue = false;
  530. EA_ASSERT(pCallback != NULL);
  531. EA_CALLBACK_PROCESSOR_MUTEX_LOCK();
  532. if(mbRunning)
  533. {
  534. size_t found = 0xffffffff;
  535. size_t found_empty = 0xffffffff;
  536. // See if pCallback is already added and while doing so see if there is an existing empty slot if it's not already added.
  537. for(size_t i = 0, iEnd = mCallbackArray.size(); i < iEnd; ++i)
  538. {
  539. Callback* pCallbackTemp = mCallbackArray[i];
  540. if(pCallbackTemp == pCallback)
  541. {
  542. found = i;
  543. break;
  544. }
  545. else if(!pCallbackTemp && (found_empty == 0xffffffff))
  546. found_empty = i;
  547. }
  548. if(found == 0xffffffff) // If pCallback isn't already present...
  549. {
  550. if(found_empty == 0xffffffff) // If no empty slot was found...
  551. mCallbackArray.push_back(pCallback);
  552. else
  553. mCallbackArray[found_empty] = pCallback;
  554. int64_t units = 0; // This is the current time, current tick, or current user event number.
  555. int64_t nextUnits = 0;
  556. int64_t* pNextEventUnits = &nextUnits;
  557. int32_t precision = (int32_t)pCallback->GetPrecision();
  558. int64_t period = (int64_t)pCallback->GetPeriod();
  559. switch(pCallback->GetType())
  560. {
  561. case Callback::kTypeTime: // If the callback triggers after a set amount of time...
  562. units = (int64_t)mStopwatch.GetElapsedTime();
  563. pNextEventUnits = &mNextCallbackEventTime;
  564. break;
  565. case Callback::kTypeTick: // If the callback triggers after a set amount of ticks...
  566. units = (int64_t)mTickCounter;
  567. pNextEventUnits = &mNextCallbackEventTick;
  568. break;
  569. case Callback::kTypeUserEvent: // If the callback triggers after a manually user-generated event...
  570. default:
  571. break;
  572. }
  573. pCallback->mbOneShot = bOneShot;
  574. pCallback->mNextCallbackEvent = units + period;
  575. pCallback->mLastCallbackEvent = units;
  576. if(precision)
  577. {
  578. const int32_t delta = RandomInt32UniformRange(mRandom, -precision, precision - 1); // Note by Paul P: I added this -1 so unit tests could pass, but it doesn't seem right.
  579. const int64_t nextCallbackEvent = pCallback->mNextCallbackEvent + delta;
  580. if(nextCallbackEvent > pCallback->mNextCallbackEvent) // Ignore precision adjustments that make it so the next event is prior to the current one.
  581. pCallback->mNextCallbackEvent = nextCallbackEvent;
  582. }
  583. EA_ASSERT(pCallback->mNextCallbackEvent >= units); // Assert that the next event is not backwards in time.
  584. if(mbAsync)
  585. {
  586. // Note by Paul P: Is the following really supposed to use a < comparison? I didn't originally write this.
  587. // It works but I'm not sure it's the best way to do this. It seems to me that the next
  588. // event units should by default be a very long time from now and newly added Callback
  589. // objects should reduce that time. I think that this code here works because while we set a
  590. // mNextCallbackEventTime/mNextCallbackEventTick to be further in the future, the RunInternal function
  591. // will loop over Callback objects and select the actual soonest one. If we switched the > here to a >
  592. // then we would need to make sure we initially set mNextCallbackEventTime/mNextCallbackEventTick to be
  593. // a high value instead of it's initial default of 0, because if it starts as zero it will get stuck
  594. // there permanently because it never gets updated (I tried this so I know it happens like so).
  595. if(*pNextEventUnits < pCallback->mNextCallbackEvent)
  596. *pNextEventUnits = pCallback->mNextCallbackEvent;
  597. }
  598. }
  599. bReturnValue = true; // This might turn false below in case of an error.
  600. #if EASTDC_THREADING_SUPPORTED
  601. if(mbAsync) // If we run in async (background thread) mode...
  602. {
  603. if(mbThreadStarted.GetValue() == 0) // If the thread hasn't been started yet...
  604. bReturnValue = StartThread(); // Starts it if not already started. Is there something useful we could do with the return value of this?
  605. if((mNextCallbackEventTime < (int64_t)mStopwatch.GetElapsedTime()) || // If we need to wake the thread now to do a callback...
  606. (mNextCallbackEventTick < (int64_t)mTickCounter))
  607. {
  608. // Note: Some platforms don't have the capability of waking a sleeping thread.
  609. // This code should be using a semaphore instead of thread sleep/wake.
  610. mThread.Wake();
  611. }
  612. }
  613. #endif
  614. }
  615. EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK();
  616. return bReturnValue;
  617. }
  618. bool CallbackManager::Remove(Callback* pCallback)
  619. {
  620. bool bRemoved = false;
  621. EA_CALLBACK_PROCESSOR_MUTEX_LOCK();
  622. if(pCallback)
  623. {
  624. if(mbRunning)
  625. {
  626. for(size_t i = 0, iEnd = mCallbackArray.size(); i < iEnd; ++i)
  627. {
  628. if(mCallbackArray[i] == pCallback)
  629. {
  630. mCallbackArray[i] = NULL; // We might re-use this slot later, so we don't take the CPU cycles to free the array right now.
  631. bRemoved = true;
  632. break;
  633. }
  634. }
  635. }
  636. }
  637. EA_CALLBACK_PROCESSOR_MUTEX_UNLOCK();
  638. // It's important to call this outside our mutex lock.
  639. if(bRemoved)
  640. pCallback->Stop();
  641. return bRemoved;
  642. }
  643. #if EASTDC_THREADING_SUPPORTED
  644. EA::Thread::Thread& CallbackManager::GetThread()
  645. {
  646. return mThread;
  647. }
  648. void CallbackManager::Lock()
  649. {
  650. mMutex.Lock();
  651. }
  652. void CallbackManager::Unlock()
  653. {
  654. mMutex.Unlock();
  655. }
  656. #endif
  657. void CallbackManager::OnUserEvent()
  658. {
  659. // To consider: Call the Update function here if callbacks waiting on user events are due.
  660. // The problem with doing this is that it makes OnUserEvent have side effects
  661. // which may be beyond what the user wants or expects.
  662. #if EASTDC_THREADING_SUPPORTED
  663. // Note: Some platforms don't have the capability to wake a sleeping thread.
  664. // This code should be using a semaphore instead of thread sleep/wake.
  665. if(mThread.GetStatus() == EA::Thread::Thread::kStatusRunning)
  666. mThread.Wake();
  667. #endif
  668. ++mUserEventCounter;
  669. }
  670. uint64_t CallbackManager::GetTime()
  671. {
  672. return mStopwatch.GetElapsedTime();
  673. }
  674. } // namespace StdC
  675. } // namespace EA