BsProfilerCPU.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. //********************************** Banshee Engine (www.banshee3d.com) **************************************************//
  2. //**************** Copyright (c) 2016 Marko Pintera ([email protected]). All rights reserved. **********************//
  3. #include "BsProfilerCPU.h"
  4. #include "BsDebug.h"
  5. #include "BsPlatform.h"
  6. namespace BansheeEngine
  7. {
  8. ProfilerCPU::Timer::Timer()
  9. {
  10. time = 0.0f;
  11. }
  12. void ProfilerCPU::Timer::start()
  13. {
  14. startTime = getCurrentTime();
  15. }
  16. void ProfilerCPU::Timer::stop()
  17. {
  18. time += getCurrentTime() - startTime;
  19. }
  20. void ProfilerCPU::Timer::reset()
  21. {
  22. time = 0.0f;
  23. }
  24. inline double ProfilerCPU::Timer::getCurrentTime()
  25. {
  26. return PlatformUtility::queryPerformanceTimerMs();
  27. }
  28. ProfilerCPU::TimerPrecise::TimerPrecise()
  29. {
  30. cycles = 0;
  31. }
  32. void ProfilerCPU::TimerPrecise::start()
  33. {
  34. startCycles = getNumCycles();
  35. }
  36. void ProfilerCPU::TimerPrecise::stop()
  37. {
  38. cycles += getNumCycles() - startCycles;
  39. }
  40. void ProfilerCPU::TimerPrecise::reset()
  41. {
  42. cycles = 0;
  43. }
  44. inline UINT64 ProfilerCPU::TimerPrecise::getNumCycles()
  45. {
  46. #if BS_COMPILER == BS_COMPILER_GNUC
  47. asm volatile("cpuid" : : : "%eax", "%ebx", "%ecx", "%edx" );
  48. UINT32 __a,__d;
  49. asm volatile("rdtsc" : "=a" (__a), "=d" (__d));
  50. return ( UINT64(__a) | UINT64(__d) << 32 );
  51. #else
  52. int a[4];
  53. int b = 0;
  54. __cpuid(a, b);
  55. return __rdtsc();
  56. #endif
  57. }
  58. ProfilerCPU::ProfileData::ProfileData(FrameAlloc* alloc)
  59. :samples(alloc)
  60. { }
  61. void ProfilerCPU::ProfileData::beginSample()
  62. {
  63. memAllocs = MemoryCounter::getNumAllocs();
  64. memFrees = MemoryCounter::getNumFrees();
  65. timer.reset();
  66. timer.start();
  67. }
  68. void ProfilerCPU::ProfileData::endSample()
  69. {
  70. timer.stop();
  71. UINT64 numAllocs = MemoryCounter::getNumAllocs() - memAllocs;
  72. UINT64 numFrees = MemoryCounter::getNumFrees() - memFrees;
  73. samples.push_back(ProfileSample(timer.time, numAllocs, numFrees));
  74. }
  75. void ProfilerCPU::ProfileData::resumeLastSample()
  76. {
  77. timer.start();
  78. samples.erase(samples.end() - 1);
  79. }
  80. ProfilerCPU::PreciseProfileData::PreciseProfileData(FrameAlloc* alloc)
  81. :samples(alloc)
  82. { }
  83. void ProfilerCPU::PreciseProfileData::beginSample()
  84. {
  85. memAllocs = MemoryCounter::getNumAllocs();
  86. memFrees = MemoryCounter::getNumFrees();
  87. timer.reset();
  88. timer.start();
  89. }
  90. void ProfilerCPU::PreciseProfileData::endSample()
  91. {
  92. timer.stop();
  93. UINT64 numAllocs = MemoryCounter::getNumAllocs() - memAllocs;
  94. UINT64 numFrees = MemoryCounter::getNumFrees() - memFrees;
  95. samples.push_back(PreciseProfileSample(timer.cycles, numAllocs, numFrees));
  96. }
  97. void ProfilerCPU::PreciseProfileData::resumeLastSample()
  98. {
  99. timer.start();
  100. samples.erase(samples.end() - 1);
  101. }
  102. BS_THREADLOCAL ProfilerCPU::ThreadInfo* ProfilerCPU::ThreadInfo::activeThread = nullptr;
  103. ProfilerCPU::ThreadInfo::ThreadInfo()
  104. :isActive(false), rootBlock(nullptr), frameAlloc(1024 * 512), activeBlocks(nullptr)
  105. {
  106. }
  107. void ProfilerCPU::ThreadInfo::begin(const char* _name)
  108. {
  109. if(isActive)
  110. {
  111. LOGWRN("Profiler::beginThread called on a thread that was already being sampled");
  112. return;
  113. }
  114. if(rootBlock == nullptr)
  115. rootBlock = getBlock(_name);
  116. activeBlock = ActiveBlock(ActiveSamplingType::Basic, rootBlock);
  117. if (activeBlocks == nullptr)
  118. activeBlocks = frameAlloc.alloc<Stack<ActiveBlock, StdFrameAlloc<ActiveBlock>>>(&frameAlloc);
  119. activeBlocks->push(activeBlock);
  120. rootBlock->basic.beginSample();
  121. isActive = true;
  122. }
  123. void ProfilerCPU::ThreadInfo::end()
  124. {
  125. if(activeBlock.type == ActiveSamplingType::Basic)
  126. activeBlock.block->basic.endSample();
  127. else
  128. activeBlock.block->precise.endSample();
  129. activeBlocks->pop();
  130. if(!isActive)
  131. LOGWRN("Profiler::endThread called on a thread that isn't being sampled.");
  132. if (activeBlocks->size() > 0)
  133. {
  134. LOGWRN("Profiler::endThread called but not all sample pairs were closed. Sampling data will not be valid.");
  135. while (activeBlocks->size() > 0)
  136. {
  137. ActiveBlock& curBlock = activeBlocks->top();
  138. if(curBlock.type == ActiveSamplingType::Basic)
  139. curBlock.block->basic.endSample();
  140. else
  141. curBlock.block->precise.endSample();
  142. activeBlocks->pop();
  143. }
  144. }
  145. isActive = false;
  146. activeBlock = ActiveBlock();
  147. frameAlloc.dealloc(activeBlocks);
  148. activeBlocks = nullptr;
  149. }
  150. void ProfilerCPU::ThreadInfo::reset()
  151. {
  152. if(isActive)
  153. end();
  154. if(rootBlock != nullptr)
  155. releaseBlock(rootBlock);
  156. rootBlock = nullptr;
  157. frameAlloc.clear(); // Note: This never actually frees memory
  158. }
  159. ProfilerCPU::ProfiledBlock* ProfilerCPU::ThreadInfo::getBlock(const char* name)
  160. {
  161. ProfiledBlock* block = frameAlloc.alloc<ProfiledBlock>(&frameAlloc);
  162. block->name = (char*)frameAlloc.alloc(((UINT32)strlen(name) + 1) * sizeof(char));
  163. strcpy(block->name, name);
  164. return block;
  165. }
  166. void ProfilerCPU::ThreadInfo::releaseBlock(ProfiledBlock* block)
  167. {
  168. frameAlloc.dealloc((UINT8*)block->name);
  169. frameAlloc.dealloc(block);
  170. }
  171. ProfilerCPU::ProfiledBlock::ProfiledBlock(FrameAlloc* alloc)
  172. :children(alloc), basic(alloc), precise(alloc)
  173. { }
  174. ProfilerCPU::ProfiledBlock::~ProfiledBlock()
  175. {
  176. ThreadInfo* thread = ThreadInfo::activeThread;
  177. for(auto& child : children)
  178. thread->releaseBlock(child);
  179. children.clear();
  180. }
  181. ProfilerCPU::ProfiledBlock* ProfilerCPU::ProfiledBlock::findChild(const char* name) const
  182. {
  183. for(auto& child : children)
  184. {
  185. if(strcmp(child->name, name) == 0)
  186. return child;
  187. }
  188. return nullptr;
  189. }
  190. ProfilerCPU::ProfilerCPU()
  191. :mBasicTimerOverhead(0.0), mPreciseTimerOverhead(0), mBasicSamplingOverheadMs(0.0), mPreciseSamplingOverheadCycles(0),
  192. mBasicSamplingOverheadCycles(0), mPreciseSamplingOverheadMs(0.0)
  193. {
  194. // TODO - We only estimate overhead on program start. It might be better to estimate it each time beginThread is called,
  195. // and keep separate values per thread.
  196. estimateTimerOverhead();
  197. }
  198. ProfilerCPU::~ProfilerCPU()
  199. {
  200. reset();
  201. BS_LOCK_MUTEX(mThreadSync);
  202. for(auto& threadInfo : mActiveThreads)
  203. bs_delete<ThreadInfo, ProfilerAlloc>(threadInfo);
  204. }
  205. void ProfilerCPU::beginThread(const char* name)
  206. {
  207. ThreadInfo* thread = ThreadInfo::activeThread;
  208. if(thread == nullptr)
  209. {
  210. ThreadInfo::activeThread = bs_new<ThreadInfo, ProfilerAlloc>();
  211. thread = ThreadInfo::activeThread;
  212. {
  213. BS_LOCK_MUTEX(mThreadSync);
  214. mActiveThreads.push_back(thread);
  215. }
  216. }
  217. thread->begin(name);
  218. }
  219. void ProfilerCPU::endThread()
  220. {
  221. // I don't do a nullcheck where on purpose, so endSample can be called ASAP
  222. ThreadInfo::activeThread->end();
  223. }
  224. void ProfilerCPU::beginSample(const char* name)
  225. {
  226. ThreadInfo* thread = ThreadInfo::activeThread;
  227. if(thread == nullptr || !thread->isActive)
  228. {
  229. beginThread("Unknown");
  230. thread = ThreadInfo::activeThread;
  231. }
  232. ProfiledBlock* parent = thread->activeBlock.block;
  233. ProfiledBlock* block = nullptr;
  234. if(parent != nullptr)
  235. block = parent->findChild(name);
  236. if(block == nullptr)
  237. {
  238. block = thread->getBlock(name);
  239. if(parent != nullptr)
  240. parent->children.push_back(block);
  241. else
  242. thread->rootBlock->children.push_back(block);
  243. }
  244. thread->activeBlock = ActiveBlock(ActiveSamplingType::Basic, block);
  245. thread->activeBlocks->push(thread->activeBlock);
  246. block->basic.beginSample();
  247. }
  248. void ProfilerCPU::endSample(const char* name)
  249. {
  250. ThreadInfo* thread = ThreadInfo::activeThread;
  251. ProfiledBlock* block = thread->activeBlock.block;
  252. #if BS_DEBUG_MODE
  253. if(block == nullptr)
  254. {
  255. LOGWRN("Mismatched CPUProfiler::endSample. No beginSample was called.");
  256. return;
  257. }
  258. if(thread->activeBlock.type == ActiveSamplingType::Precise)
  259. {
  260. LOGWRN("Mismatched CPUProfiler::endSample. Was expecting Profiler::endSamplePrecise.");
  261. return;
  262. }
  263. if(strcmp(block->name, name) != 0)
  264. {
  265. LOGWRN("Mismatched CPUProfiler::endSample. Was expecting \"" + String(block->name) +
  266. "\" but got \"" + String(name) + "\". Sampling data will not be valid.");
  267. return;
  268. }
  269. #endif
  270. block->basic.endSample();
  271. thread->activeBlocks->pop();
  272. if (!thread->activeBlocks->empty())
  273. thread->activeBlock = thread->activeBlocks->top();
  274. else
  275. thread->activeBlock = ActiveBlock();
  276. }
  277. void ProfilerCPU::beginSamplePrecise(const char* name)
  278. {
  279. // Note: There is a (small) possibility a context switch will happen during this measurement in which case result will be skewed.
  280. // Increasing thread priority might help. This is generally only a problem with code that executes a long time (10-15+ ms - depending on OS quant length)
  281. ThreadInfo* thread = ThreadInfo::activeThread;
  282. if(thread == nullptr || !thread->isActive)
  283. beginThread("Unknown");
  284. ProfiledBlock* parent = thread->activeBlock.block;
  285. ProfiledBlock* block = nullptr;
  286. if(parent != nullptr)
  287. block = parent->findChild(name);
  288. if(block == nullptr)
  289. {
  290. block = thread->getBlock(name);
  291. if(parent != nullptr)
  292. parent->children.push_back(block);
  293. else
  294. thread->rootBlock->children.push_back(block);
  295. }
  296. thread->activeBlock = ActiveBlock(ActiveSamplingType::Precise, block);
  297. thread->activeBlocks->push(thread->activeBlock);
  298. block->precise.beginSample();
  299. }
  300. void ProfilerCPU::endSamplePrecise(const char* name)
  301. {
  302. ThreadInfo* thread = ThreadInfo::activeThread;
  303. ProfiledBlock* block = thread->activeBlock.block;
  304. #if BS_DEBUG_MODE
  305. if(block == nullptr)
  306. {
  307. LOGWRN("Mismatched Profiler::endSamplePrecise. No beginSamplePrecise was called.");
  308. return;
  309. }
  310. if(thread->activeBlock.type == ActiveSamplingType::Basic)
  311. {
  312. LOGWRN("Mismatched CPUProfiler::endSamplePrecise. Was expecting Profiler::endSample.");
  313. return;
  314. }
  315. if (strcmp(block->name, name) != 0)
  316. {
  317. LOGWRN("Mismatched Profiler::endSamplePrecise. Was expecting \"" + String(block->name) +
  318. "\" but got \"" + String(name) + "\". Sampling data will not be valid.");
  319. return;
  320. }
  321. #endif
  322. block->precise.endSample();
  323. thread->activeBlocks->pop();
  324. if (!thread->activeBlocks->empty())
  325. thread->activeBlock = thread->activeBlocks->top();
  326. else
  327. thread->activeBlock = ActiveBlock();
  328. }
  329. void ProfilerCPU::reset()
  330. {
  331. ThreadInfo* thread = ThreadInfo::activeThread;
  332. if(thread != nullptr)
  333. thread->reset();
  334. }
  335. CPUProfilerReport ProfilerCPU::generateReport()
  336. {
  337. CPUProfilerReport report;
  338. ThreadInfo* thread = ThreadInfo::activeThread;
  339. if(thread == nullptr)
  340. return report;
  341. if(thread->isActive)
  342. thread->end();
  343. // We need to separate out basic and precise data and form two separate hierarchies
  344. if(thread->rootBlock == nullptr)
  345. return report;
  346. struct TempEntry
  347. {
  348. TempEntry(ProfiledBlock* _parentBlock, UINT32 _entryIdx)
  349. :parentBlock(_parentBlock), entryIdx(_entryIdx)
  350. { }
  351. ProfiledBlock* parentBlock;
  352. UINT32 entryIdx;
  353. ProfilerVector<UINT32> childIndexes;
  354. };
  355. ProfilerVector<CPUProfilerBasicSamplingEntry> basicEntries;
  356. ProfilerVector<CPUProfilerPreciseSamplingEntry> preciseEntries;
  357. // Fill up flatHierarchy array in a way so we always process children before parents
  358. ProfilerStack<UINT32> todo;
  359. ProfilerVector<TempEntry> flatHierarchy;
  360. UINT32 entryIdx = 0;
  361. todo.push(entryIdx);
  362. flatHierarchy.push_back(TempEntry(thread->rootBlock, entryIdx));
  363. entryIdx++;
  364. while(!todo.empty())
  365. {
  366. UINT32 curDataIdx = todo.top();
  367. ProfiledBlock* curBlock = flatHierarchy[curDataIdx].parentBlock;
  368. todo.pop();
  369. for(auto& child : curBlock->children)
  370. {
  371. flatHierarchy[curDataIdx].childIndexes.push_back(entryIdx);
  372. todo.push(entryIdx);
  373. flatHierarchy.push_back(TempEntry(child, entryIdx));
  374. entryIdx++;
  375. }
  376. }
  377. // Calculate sampling data for all entries
  378. basicEntries.resize(flatHierarchy.size());
  379. preciseEntries.resize(flatHierarchy.size());
  380. for(auto iter = flatHierarchy.rbegin(); iter != flatHierarchy.rend(); ++iter)
  381. {
  382. TempEntry& curData = *iter;
  383. ProfiledBlock* curBlock = curData.parentBlock;
  384. CPUProfilerBasicSamplingEntry* entryBasic = &basicEntries[curData.entryIdx];
  385. CPUProfilerPreciseSamplingEntry* entryPrecise = &preciseEntries[curData.entryIdx];
  386. // Calculate basic data
  387. entryBasic->data.name = String(curBlock->name);
  388. entryBasic->data.memAllocs = 0;
  389. entryBasic->data.memFrees = 0;
  390. entryBasic->data.totalTimeMs = 0.0;
  391. entryBasic->data.maxTimeMs = 0.0;
  392. for(auto& sample : curBlock->basic.samples)
  393. {
  394. entryBasic->data.totalTimeMs += sample.time;
  395. entryBasic->data.maxTimeMs = std::max(entryBasic->data.maxTimeMs, sample.time);
  396. entryBasic->data.memAllocs += sample.numAllocs;
  397. entryBasic->data.memFrees += sample.numFrees;
  398. }
  399. entryBasic->data.numCalls = (UINT32)curBlock->basic.samples.size();
  400. if(entryBasic->data.numCalls > 0)
  401. entryBasic->data.avgTimeMs = entryBasic->data.totalTimeMs / entryBasic->data.numCalls;
  402. double totalChildTime = 0.0;
  403. for(auto& childIdx : curData.childIndexes)
  404. {
  405. CPUProfilerBasicSamplingEntry* childEntry = &basicEntries[childIdx];
  406. totalChildTime += childEntry->data.totalTimeMs;
  407. childEntry->data.pctOfParent = (float)(childEntry->data.totalTimeMs / entryBasic->data.totalTimeMs);
  408. entryBasic->data.estimatedOverheadMs += childEntry->data.estimatedOverheadMs;
  409. }
  410. entryBasic->data.estimatedOverheadMs += curBlock->basic.samples.size() * mBasicSamplingOverheadMs;
  411. entryBasic->data.estimatedOverheadMs += curBlock->precise.samples.size() * mPreciseSamplingOverheadMs;
  412. entryBasic->data.totalSelfTimeMs = entryBasic->data.totalTimeMs - totalChildTime;
  413. if(entryBasic->data.numCalls > 0)
  414. entryBasic->data.avgSelfTimeMs = entryBasic->data.totalSelfTimeMs / entryBasic->data.numCalls;
  415. entryBasic->data.estimatedSelfOverheadMs = mBasicTimerOverhead;
  416. // Calculate precise data
  417. entryPrecise->data.name = String(curBlock->name);
  418. entryPrecise->data.memAllocs = 0;
  419. entryPrecise->data.memFrees = 0;
  420. entryPrecise->data.totalCycles = 0;
  421. entryPrecise->data.maxCycles = 0;
  422. for(auto& sample : curBlock->precise.samples)
  423. {
  424. entryPrecise->data.totalCycles += sample.cycles;
  425. entryPrecise->data.maxCycles = std::max(entryPrecise->data.maxCycles, sample.cycles);
  426. entryPrecise->data.memAllocs += sample.numAllocs;
  427. entryPrecise->data.memFrees += sample.numFrees;
  428. }
  429. entryPrecise->data.numCalls = (UINT32)curBlock->precise.samples.size();
  430. if(entryPrecise->data.numCalls > 0)
  431. entryPrecise->data.avgCycles = entryPrecise->data.totalCycles / entryPrecise->data.numCalls;
  432. UINT64 totalChildCycles = 0;
  433. for(auto& childIdx : curData.childIndexes)
  434. {
  435. CPUProfilerPreciseSamplingEntry* childEntry = &preciseEntries[childIdx];
  436. totalChildCycles += childEntry->data.totalCycles;
  437. childEntry->data.pctOfParent = childEntry->data.totalCycles / (float)entryPrecise->data.totalCycles;
  438. entryPrecise->data.estimatedOverhead += childEntry->data.estimatedOverhead;
  439. }
  440. entryPrecise->data.estimatedOverhead += curBlock->precise.samples.size() * mPreciseSamplingOverheadCycles;
  441. entryPrecise->data.estimatedOverhead += curBlock->basic.samples.size() * mBasicSamplingOverheadCycles;
  442. entryPrecise->data.totalSelfCycles = entryPrecise->data.totalCycles - totalChildCycles;
  443. if(entryPrecise->data.numCalls > 0)
  444. entryPrecise->data.avgSelfCycles = entryPrecise->data.totalSelfCycles / entryPrecise->data.numCalls;
  445. entryPrecise->data.estimatedSelfOverhead = mPreciseTimerOverhead;
  446. }
  447. // Prune empty basic entries
  448. ProfilerStack<UINT32> finalBasicHierarchyTodo;
  449. ProfilerStack<UINT32> parentBasicEntryIndexes;
  450. ProfilerVector<TempEntry> newBasicEntries;
  451. finalBasicHierarchyTodo.push(0);
  452. entryIdx = 0;
  453. parentBasicEntryIndexes.push(entryIdx);
  454. newBasicEntries.push_back(TempEntry(nullptr, entryIdx));
  455. entryIdx++;
  456. while(!finalBasicHierarchyTodo.empty())
  457. {
  458. UINT32 parentEntryIdx = parentBasicEntryIndexes.top();
  459. parentBasicEntryIndexes.pop();
  460. UINT32 curEntryIdx = finalBasicHierarchyTodo.top();
  461. TempEntry& curEntry = flatHierarchy[curEntryIdx];
  462. finalBasicHierarchyTodo.pop();
  463. for(auto& childIdx : curEntry.childIndexes)
  464. {
  465. finalBasicHierarchyTodo.push(childIdx);
  466. CPUProfilerBasicSamplingEntry& basicEntry = basicEntries[childIdx];
  467. if(basicEntry.data.numCalls > 0)
  468. {
  469. newBasicEntries.push_back(TempEntry(nullptr, childIdx));
  470. newBasicEntries[parentEntryIdx].childIndexes.push_back(entryIdx);
  471. parentBasicEntryIndexes.push(entryIdx);
  472. entryIdx++;
  473. }
  474. else
  475. parentBasicEntryIndexes.push(parentEntryIdx);
  476. }
  477. }
  478. if(newBasicEntries.size() > 0)
  479. {
  480. ProfilerVector<CPUProfilerBasicSamplingEntry*> finalBasicEntries;
  481. report.mBasicSamplingRootEntry = basicEntries[newBasicEntries[0].entryIdx];
  482. finalBasicEntries.push_back(&report.mBasicSamplingRootEntry);
  483. finalBasicHierarchyTodo.push(0);
  484. while(!finalBasicHierarchyTodo.empty())
  485. {
  486. UINT32 curEntryIdx = finalBasicHierarchyTodo.top();
  487. finalBasicHierarchyTodo.pop();
  488. TempEntry& curEntry = newBasicEntries[curEntryIdx];
  489. CPUProfilerBasicSamplingEntry* basicEntry = finalBasicEntries[curEntryIdx];
  490. basicEntry->childEntries.resize(curEntry.childIndexes.size());
  491. UINT32 idx = 0;
  492. for(auto& childIdx : curEntry.childIndexes)
  493. {
  494. TempEntry& childEntry = newBasicEntries[childIdx];
  495. basicEntry->childEntries[idx] = basicEntries[childEntry.entryIdx];
  496. finalBasicEntries.push_back(&(basicEntry->childEntries[idx]));
  497. finalBasicHierarchyTodo.push(childIdx);
  498. idx++;
  499. }
  500. }
  501. }
  502. // Prune empty precise entries
  503. ProfilerStack<UINT32> finalPreciseHierarchyTodo;
  504. ProfilerStack<UINT32> parentPreciseEntryIndexes;
  505. ProfilerVector<TempEntry> newPreciseEntries;
  506. finalPreciseHierarchyTodo.push(0);
  507. entryIdx = 0;
  508. parentPreciseEntryIndexes.push(entryIdx);
  509. newPreciseEntries.push_back(TempEntry(nullptr, entryIdx));
  510. entryIdx++;
  511. while(!finalPreciseHierarchyTodo.empty())
  512. {
  513. UINT32 parentEntryIdx = parentPreciseEntryIndexes.top();
  514. parentPreciseEntryIndexes.pop();
  515. UINT32 curEntryIdx = finalPreciseHierarchyTodo.top();
  516. TempEntry& curEntry = flatHierarchy[curEntryIdx];
  517. finalPreciseHierarchyTodo.pop();
  518. for(auto& childIdx : curEntry.childIndexes)
  519. {
  520. finalPreciseHierarchyTodo.push(childIdx);
  521. CPUProfilerPreciseSamplingEntry& preciseEntry = preciseEntries[childIdx];
  522. if(preciseEntry.data.numCalls > 0)
  523. {
  524. newPreciseEntries.push_back(TempEntry(nullptr, childIdx));
  525. newPreciseEntries[parentEntryIdx].childIndexes.push_back(entryIdx);
  526. parentPreciseEntryIndexes.push(entryIdx);
  527. entryIdx++;
  528. }
  529. else
  530. parentPreciseEntryIndexes.push(parentEntryIdx);
  531. }
  532. }
  533. if(newPreciseEntries.size() > 0)
  534. {
  535. ProfilerVector<CPUProfilerPreciseSamplingEntry*> finalPreciseEntries;
  536. report.mPreciseSamplingRootEntry = preciseEntries[newPreciseEntries[0].entryIdx];
  537. finalPreciseEntries.push_back(&report.mPreciseSamplingRootEntry);
  538. finalPreciseHierarchyTodo.push(0);
  539. while(!finalPreciseHierarchyTodo.empty())
  540. {
  541. UINT32 curEntryIdx = finalPreciseHierarchyTodo.top();
  542. finalPreciseHierarchyTodo.pop();
  543. TempEntry& curEntry = newPreciseEntries[curEntryIdx];
  544. CPUProfilerPreciseSamplingEntry* preciseEntry = finalPreciseEntries[curEntryIdx];
  545. preciseEntry->childEntries.resize(curEntry.childIndexes.size());
  546. UINT32 idx = 0;
  547. for(auto& childIdx : curEntry.childIndexes)
  548. {
  549. TempEntry& childEntry = newPreciseEntries[childIdx];
  550. preciseEntry->childEntries[idx] = preciseEntries[childEntry.entryIdx];
  551. finalPreciseEntries.push_back(&preciseEntry->childEntries.back());
  552. finalPreciseHierarchyTodo.push(childIdx);
  553. idx++;
  554. }
  555. }
  556. }
  557. return report;
  558. }
  559. void ProfilerCPU::estimateTimerOverhead()
  560. {
  561. // Get an idea of how long timer calls and RDTSC takes
  562. const UINT32 reps = 1000, sampleReps = 20;
  563. mBasicTimerOverhead = 1000000.0;
  564. mPreciseTimerOverhead = 1000000;
  565. for (UINT32 tries = 0; tries < 20; tries++)
  566. {
  567. Timer timer;
  568. for (UINT32 i = 0; i < reps; i++)
  569. {
  570. timer.start();
  571. timer.stop();
  572. }
  573. double avgTime = double(timer.time)/double(reps);
  574. if (avgTime < mBasicTimerOverhead)
  575. mBasicTimerOverhead = avgTime;
  576. TimerPrecise timerPrecise;
  577. for (UINT32 i = 0; i < reps; i++)
  578. {
  579. timerPrecise.start();
  580. timerPrecise.stop();
  581. }
  582. UINT64 avgCycles = timerPrecise.cycles/reps;
  583. if (avgCycles < mPreciseTimerOverhead)
  584. mPreciseTimerOverhead = avgCycles;
  585. }
  586. mBasicSamplingOverheadMs = 1000000.0;
  587. mPreciseSamplingOverheadMs = 1000000.0;
  588. mBasicSamplingOverheadCycles = 1000000;
  589. mPreciseSamplingOverheadCycles = 1000000;
  590. for (UINT32 tries = 0; tries < 3; tries++)
  591. {
  592. /************************************************************************/
  593. /* AVERAGE TIME IN MS FOR BASIC SAMPLING */
  594. /************************************************************************/
  595. Timer timerA;
  596. timerA.start();
  597. beginThread("Main");
  598. // Two different cases that can effect performance, one where
  599. // sample already exists and other where new one needs to be created
  600. for (UINT32 i = 0; i < sampleReps; i++)
  601. {
  602. beginSample("TestAvg1");
  603. endSample("TestAvg1");
  604. beginSample("TestAvg2");
  605. endSample("TestAvg2");
  606. beginSample("TestAvg3");
  607. endSample("TestAvg3");
  608. beginSample("TestAvg4");
  609. endSample("TestAvg4");
  610. beginSample("TestAvg5");
  611. endSample("TestAvg5");
  612. beginSample("TestAvg6");
  613. endSample("TestAvg6");
  614. beginSample("TestAvg7");
  615. endSample("TestAvg7");
  616. beginSample("TestAvg8");
  617. endSample("TestAvg8");
  618. beginSample("TestAvg9");
  619. endSample("TestAvg9");
  620. beginSample("TestAvg10");
  621. endSample("TestAvg10");
  622. }
  623. for (UINT32 i = 0; i < sampleReps * 5; i++)
  624. {
  625. beginSample(("TestAvg#" + toString(i)).c_str());
  626. endSample(("TestAvg#" + toString(i)).c_str());
  627. }
  628. endThread();
  629. timerA.stop();
  630. reset();
  631. double avgTimeBasic = double(timerA.time)/double(sampleReps * 10 + sampleReps * 5) - mBasicTimerOverhead;
  632. if (avgTimeBasic < mBasicSamplingOverheadMs)
  633. mBasicSamplingOverheadMs = avgTimeBasic;
  634. /************************************************************************/
  635. /* AVERAGE CYCLES FOR BASIC SAMPLING */
  636. /************************************************************************/
  637. TimerPrecise timerPreciseA;
  638. timerPreciseA.start();
  639. beginThread("Main");
  640. // Two different cases that can effect performance, one where
  641. // sample already exists and other where new one needs to be created
  642. for (UINT32 i = 0; i < sampleReps; i++)
  643. {
  644. beginSample("TestAvg1");
  645. endSample("TestAvg1");
  646. beginSample("TestAvg2");
  647. endSample("TestAvg2");
  648. beginSample("TestAvg3");
  649. endSample("TestAvg3");
  650. beginSample("TestAvg4");
  651. endSample("TestAvg4");
  652. beginSample("TestAvg5");
  653. endSample("TestAvg5");
  654. beginSample("TestAvg6");
  655. endSample("TestAvg6");
  656. beginSample("TestAvg7");
  657. endSample("TestAvg7");
  658. beginSample("TestAvg8");
  659. endSample("TestAvg8");
  660. beginSample("TestAvg9");
  661. endSample("TestAvg9");
  662. beginSample("TestAvg10");
  663. endSample("TestAvg10");
  664. }
  665. for (UINT32 i = 0; i < sampleReps * 5; i++)
  666. {
  667. beginSample(("TestAvg#" + toString(i)).c_str());
  668. endSample(("TestAvg#" + toString(i)).c_str());
  669. }
  670. endThread();
  671. timerPreciseA.stop();
  672. reset();
  673. UINT64 avgCyclesBasic = timerPreciseA.cycles/(sampleReps * 10 + sampleReps * 5) - mPreciseTimerOverhead;
  674. if (avgCyclesBasic < mBasicSamplingOverheadCycles)
  675. mBasicSamplingOverheadCycles = avgCyclesBasic;
  676. /************************************************************************/
  677. /* AVERAGE TIME IN MS FOR PRECISE SAMPLING */
  678. /************************************************************************/
  679. Timer timerB;
  680. timerB.start();
  681. beginThread("Main");
  682. // Two different cases that can effect performance, one where
  683. // sample already exists and other where new one needs to be created
  684. for (UINT32 i = 0; i < sampleReps; i++)
  685. {
  686. beginSamplePrecise("TestAvg1");
  687. endSamplePrecise("TestAvg1");
  688. beginSamplePrecise("TestAvg2");
  689. endSamplePrecise("TestAvg2");
  690. beginSamplePrecise("TestAvg3");
  691. endSamplePrecise("TestAvg3");
  692. beginSamplePrecise("TestAvg4");
  693. endSamplePrecise("TestAvg4");
  694. beginSamplePrecise("TestAvg5");
  695. endSamplePrecise("TestAvg5");
  696. beginSamplePrecise("TestAvg6");
  697. endSamplePrecise("TestAvg6");
  698. beginSamplePrecise("TestAvg7");
  699. endSamplePrecise("TestAvg7");
  700. beginSamplePrecise("TestAvg8");
  701. endSamplePrecise("TestAvg8");
  702. beginSamplePrecise("TestAvg9");
  703. endSamplePrecise("TestAvg9");
  704. beginSamplePrecise("TestAvg10");
  705. endSamplePrecise("TestAvg10");
  706. }
  707. for (UINT32 i = 0; i < sampleReps * 5; i++)
  708. {
  709. beginSamplePrecise(("TestAvg#" + toString(i)).c_str());
  710. endSamplePrecise(("TestAvg#" + toString(i)).c_str());
  711. }
  712. endThread();
  713. timerB.stop();
  714. reset();
  715. double avgTimesPrecise = timerB.time/(sampleReps * 10 + sampleReps * 5);
  716. if (avgTimesPrecise < mPreciseSamplingOverheadMs)
  717. mPreciseSamplingOverheadMs = avgTimesPrecise;
  718. /************************************************************************/
  719. /* AVERAGE CYCLES FOR PRECISE SAMPLING */
  720. /************************************************************************/
  721. TimerPrecise timerPreciseB;
  722. timerPreciseB.start();
  723. beginThread("Main");
  724. // Two different cases that can effect performance, one where
  725. // sample already exists and other where new one needs to be created
  726. for (UINT32 i = 0; i < sampleReps; i++)
  727. {
  728. beginSamplePrecise("TestAvg1");
  729. endSamplePrecise("TestAvg1");
  730. beginSamplePrecise("TestAvg2");
  731. endSamplePrecise("TestAvg2");
  732. beginSamplePrecise("TestAvg3");
  733. endSamplePrecise("TestAvg3");
  734. beginSamplePrecise("TestAvg4");
  735. endSamplePrecise("TestAvg4");
  736. beginSamplePrecise("TestAvg5");
  737. endSamplePrecise("TestAvg5");
  738. beginSamplePrecise("TestAvg6");
  739. endSamplePrecise("TestAvg6");
  740. beginSamplePrecise("TestAvg7");
  741. endSamplePrecise("TestAvg7");
  742. beginSamplePrecise("TestAvg8");
  743. endSamplePrecise("TestAvg8");
  744. beginSamplePrecise("TestAvg9");
  745. endSamplePrecise("TestAvg9");
  746. beginSamplePrecise("TestAvg10");
  747. endSamplePrecise("TestAvg10");
  748. }
  749. for (UINT32 i = 0; i < sampleReps * 5; i++)
  750. {
  751. beginSamplePrecise(("TestAvg#" + toString(i)).c_str());
  752. endSamplePrecise(("TestAvg#" + toString(i)).c_str());
  753. }
  754. endThread();
  755. timerPreciseB.stop();
  756. reset();
  757. UINT64 avgCyclesPrecise = timerPreciseB.cycles/(sampleReps * 10 + sampleReps * 5);
  758. if (avgCyclesPrecise < mPreciseSamplingOverheadCycles)
  759. mPreciseSamplingOverheadCycles = avgCyclesPrecise;
  760. }
  761. }
  762. CPUProfilerBasicSamplingEntry::Data::Data()
  763. :numCalls(0), avgTimeMs(0.0), maxTimeMs(0.0), totalTimeMs(0.0),
  764. avgSelfTimeMs(0.0), totalSelfTimeMs(0.0), estimatedSelfOverheadMs(0.0),
  765. estimatedOverheadMs(0.0), pctOfParent(1.0f)
  766. { }
  767. CPUProfilerPreciseSamplingEntry::Data::Data()
  768. :numCalls(0), avgCycles(0), maxCycles(0), totalCycles(0),
  769. avgSelfCycles(0), totalSelfCycles(0), estimatedSelfOverhead(0),
  770. estimatedOverhead(0), pctOfParent(1.0f)
  771. { }
  772. CPUProfilerReport::CPUProfilerReport()
  773. {
  774. }
  775. ProfilerCPU& gProfilerCPU()
  776. {
  777. return ProfilerCPU::instance();
  778. }
  779. }