ShaderProgramBinaryDumpMain.cpp 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/ShaderCompiler/ShaderProgramCompiler.h>
  6. #include <AnKi/ShaderCompiler/MaliOfflineCompiler.h>
  7. #include <AnKi/ShaderCompiler/RadeonGpuAnalyzer.h>
  8. #include <AnKi/Util/ThreadHive.h>
  9. #include <AnKi/Util/System.h>
  10. using namespace anki;
  11. static const char* kUsage = R"(Dump the shader binary to stdout
  12. Usage: %s [options] input_shader_program_binary
  13. Options:
  14. -stats : Print performance statistics for all shaders. By default it doesn't
  15. -only-stats : Print only stats
  16. )";
  17. static Error parseCommandLineArgs(WeakArray<char*> argv, Bool& dumpStats, Bool& dumpBinary, StringRaii& filename)
  18. {
  19. // Parse config
  20. if(argv.getSize() < 2)
  21. {
  22. return Error::kUserData;
  23. }
  24. dumpStats = false;
  25. dumpBinary = true;
  26. filename = argv[argv.getSize() - 1];
  27. for(U32 i = 1; i < argv.getSize() - 1; i++)
  28. {
  29. if(CString(argv[i]) == "-stats")
  30. {
  31. dumpStats = true;
  32. }
  33. else if(CString(argv[i]) == "-only-stats")
  34. {
  35. dumpBinary = false;
  36. dumpStats = true;
  37. }
  38. }
  39. return Error::kNone;
  40. }
  41. Error dumpStats(const ShaderProgramBinary& bin)
  42. {
  43. HeapMemoryPool pool(allocAligned, nullptr);
  44. printf("\nOffline compilers stats:\n");
  45. fflush(stdout);
  46. class Stats
  47. {
  48. public:
  49. class
  50. {
  51. public:
  52. F64 m_fma;
  53. F64 m_cvt;
  54. F64 m_sfu;
  55. F64 m_loadStore;
  56. F64 m_varying;
  57. F64 m_texture;
  58. F64 m_workRegisters;
  59. F64 m_fp16ArithmeticPercentage;
  60. F64 m_spillingCount;
  61. } m_arm;
  62. class
  63. {
  64. public:
  65. F64 m_vgprCount;
  66. F64 m_sgprCount;
  67. F64 m_isaSize;
  68. } m_amd;
  69. Stats(F64 v)
  70. {
  71. m_arm.m_fma = m_arm.m_cvt = m_arm.m_sfu = m_arm.m_loadStore = m_arm.m_varying = m_arm.m_texture =
  72. m_arm.m_workRegisters = m_arm.m_fp16ArithmeticPercentage = m_arm.m_spillingCount = v;
  73. m_amd.m_vgprCount = m_amd.m_sgprCount = m_amd.m_isaSize = v;
  74. }
  75. Stats()
  76. : Stats(0.0)
  77. {
  78. }
  79. void op(const Stats& b, void (*func)(F64& a, F64 b))
  80. {
  81. func(m_arm.m_fma, b.m_arm.m_fma);
  82. func(m_arm.m_cvt, b.m_arm.m_cvt);
  83. func(m_arm.m_sfu, b.m_arm.m_sfu);
  84. func(m_arm.m_loadStore, b.m_arm.m_loadStore);
  85. func(m_arm.m_varying, b.m_arm.m_varying);
  86. func(m_arm.m_texture, b.m_arm.m_texture);
  87. func(m_arm.m_workRegisters, b.m_arm.m_workRegisters);
  88. func(m_arm.m_fp16ArithmeticPercentage, b.m_arm.m_fp16ArithmeticPercentage);
  89. func(m_arm.m_spillingCount, b.m_arm.m_spillingCount);
  90. func(m_amd.m_vgprCount, b.m_amd.m_vgprCount);
  91. func(m_amd.m_sgprCount, b.m_amd.m_sgprCount);
  92. func(m_amd.m_isaSize, b.m_amd.m_isaSize);
  93. }
  94. };
  95. class StageStats
  96. {
  97. public:
  98. Stats m_avgStats{0.0};
  99. Stats m_maxStats{-1.0};
  100. Stats m_minStats{kMaxF64};
  101. U32 m_spillingCount = 0;
  102. U32 m_count = 0;
  103. };
  104. class Ctx
  105. {
  106. public:
  107. HeapMemoryPool* m_pool = nullptr;
  108. DynamicArrayRaii<Stats> m_spirvStats{m_pool};
  109. DynamicArrayRaii<Atomic<U32>> m_spirvVisited{m_pool};
  110. Atomic<U32> m_variantCount = {0};
  111. const ShaderProgramBinary* m_bin = nullptr;
  112. Atomic<I32> m_error = {0};
  113. Ctx(HeapMemoryPool* pool)
  114. : m_pool(pool)
  115. {
  116. }
  117. };
  118. Ctx ctx(&pool);
  119. ctx.m_bin = &bin;
  120. ctx.m_spirvStats.create(bin.m_codeBlocks.getSize());
  121. ctx.m_spirvVisited.create(bin.m_codeBlocks.getSize());
  122. memset(ctx.m_spirvVisited.getBegin(), 0, ctx.m_spirvVisited.getSizeInBytes());
  123. ThreadHive hive(getCpuCoresCount(), &pool);
  124. ThreadHiveTaskCallback callback = [](void* userData, [[maybe_unused]] U32 threadId,
  125. [[maybe_unused]] ThreadHive& hive,
  126. [[maybe_unused]] ThreadHiveSemaphore* signalSemaphore) {
  127. Ctx& ctx = *static_cast<Ctx*>(userData);
  128. U32 variantIdx;
  129. while((variantIdx = ctx.m_variantCount.fetchAdd(1)) < ctx.m_bin->m_variants.getSize()
  130. && ctx.m_error.load() == 0)
  131. {
  132. const ShaderProgramBinaryVariant& variant = ctx.m_bin->m_variants[variantIdx];
  133. for(ShaderType shaderType : EnumIterable<ShaderType>())
  134. {
  135. const U32 codeblockIdx = variant.m_codeBlockIndices[shaderType];
  136. if(codeblockIdx == kMaxU32)
  137. {
  138. continue;
  139. }
  140. const Bool visited = ctx.m_spirvVisited[codeblockIdx].fetchAdd(1) != 0;
  141. if(visited)
  142. {
  143. continue;
  144. }
  145. const ShaderProgramBinaryCodeBlock& codeBlock = ctx.m_bin->m_codeBlocks[codeblockIdx];
  146. // Arm stats
  147. MaliOfflineCompilerOut maliocOut;
  148. Error err = runMaliOfflineCompiler(
  149. #if ANKI_OS_LINUX
  150. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/MaliOfflineCompiler/malioc",
  151. #elif ANKI_OS_WINDOWS
  152. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/MaliOfflineCompiler/malioc.exe",
  153. #else
  154. # error "Not supported"
  155. #endif
  156. codeBlock.m_binary, shaderType, *ctx.m_pool, maliocOut);
  157. if(err)
  158. {
  159. ANKI_LOGE("Mali offline compiler failed");
  160. ctx.m_error.store(1);
  161. break;
  162. }
  163. // AMD
  164. RgaOutput rgaOut = {};
  165. #if 1
  166. err = runRadeonGpuAnalyzer(
  167. # if ANKI_OS_LINUX
  168. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/RadeonGpuAnalyzer/rga",
  169. # elif ANKI_OS_WINDOWS
  170. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/RadeonGpuAnalyzer/rga.exe",
  171. # else
  172. # error "Not supported"
  173. # endif
  174. codeBlock.m_binary, shaderType, *ctx.m_pool, rgaOut);
  175. if(err)
  176. {
  177. ANKI_LOGE("Radeon GPU Analyzer compiler failed");
  178. ctx.m_error.store(1);
  179. break;
  180. }
  181. #endif
  182. // Write stats
  183. Stats& stats = ctx.m_spirvStats[codeblockIdx];
  184. stats.m_arm.m_fma = maliocOut.m_fma;
  185. stats.m_arm.m_cvt = maliocOut.m_cvt;
  186. stats.m_arm.m_sfu = maliocOut.m_sfu;
  187. stats.m_arm.m_loadStore = maliocOut.m_loadStore;
  188. stats.m_arm.m_varying = maliocOut.m_varying;
  189. stats.m_arm.m_texture = maliocOut.m_texture;
  190. stats.m_arm.m_workRegisters = maliocOut.m_workRegisters;
  191. stats.m_arm.m_fp16ArithmeticPercentage = maliocOut.m_fp16ArithmeticPercentage;
  192. stats.m_arm.m_spillingCount = (maliocOut.m_spilling) ? 1.0 : 0.0;
  193. stats.m_amd.m_vgprCount = F64(rgaOut.m_vgprCount);
  194. stats.m_amd.m_sgprCount = F64(rgaOut.m_sgprCount);
  195. stats.m_amd.m_isaSize = F64(rgaOut.m_isaSize);
  196. }
  197. if(variantIdx > 0 && ((variantIdx + 1) % 32) == 0)
  198. {
  199. printf("Processed %u out of %u variants\n", variantIdx + 1, ctx.m_bin->m_variants.getSize());
  200. }
  201. } // while
  202. };
  203. for(U32 i = 0; i < hive.getThreadCount(); ++i)
  204. {
  205. hive.submitTask(callback, &ctx);
  206. }
  207. hive.waitAllTasks();
  208. if(ctx.m_error.load() != 0)
  209. {
  210. return Error::kFunctionFailed;
  211. }
  212. // Cather the results
  213. Array<StageStats, U32(ShaderType::kCount)> allStageStats;
  214. for(const ShaderProgramBinaryVariant& variant : bin.m_variants)
  215. {
  216. for(ShaderType stage : EnumIterable<ShaderType>())
  217. {
  218. if(variant.m_codeBlockIndices[stage] == kMaxU32)
  219. {
  220. continue;
  221. }
  222. const Stats& stats = ctx.m_spirvStats[variant.m_codeBlockIndices[stage]];
  223. StageStats& allStats = allStageStats[stage];
  224. ++allStats.m_count;
  225. allStats.m_avgStats.op(stats, [](F64& a, F64 b) {
  226. a += b;
  227. });
  228. allStats.m_minStats.op(stats, [](F64& a, F64 b) {
  229. a = min(a, b);
  230. });
  231. allStats.m_maxStats.op(stats, [](F64& a, F64 b) {
  232. a = max(a, b);
  233. });
  234. }
  235. }
  236. // Print
  237. for(ShaderType shaderType : EnumIterable<ShaderType>())
  238. {
  239. const StageStats& stage = allStageStats[shaderType];
  240. if(stage.m_count == 0)
  241. {
  242. continue;
  243. }
  244. printf("Stage %u\n", U32(shaderType));
  245. printf(" Arm shaders spilling regs %u\n", stage.m_spillingCount);
  246. const F64 countf = F64(stage.m_count);
  247. const Stats& avg = stage.m_avgStats;
  248. printf(" Average:\n");
  249. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n",
  250. avg.m_arm.m_workRegisters / countf, avg.m_arm.m_fma / countf, avg.m_arm.m_cvt / countf,
  251. avg.m_arm.m_sfu / countf, avg.m_arm.m_loadStore / countf, avg.m_arm.m_varying / countf,
  252. avg.m_arm.m_texture / countf, avg.m_arm.m_fp16ArithmeticPercentage / countf);
  253. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", avg.m_amd.m_vgprCount / countf, avg.m_amd.m_sgprCount / countf,
  254. avg.m_amd.m_isaSize / countf);
  255. const Stats& maxs = stage.m_maxStats;
  256. printf(" Max:\n");
  257. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", maxs.m_arm.m_workRegisters,
  258. maxs.m_arm.m_fma, maxs.m_arm.m_cvt, maxs.m_arm.m_sfu, maxs.m_arm.m_loadStore, maxs.m_arm.m_varying,
  259. maxs.m_arm.m_texture, maxs.m_arm.m_fp16ArithmeticPercentage);
  260. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", maxs.m_amd.m_vgprCount, maxs.m_amd.m_sgprCount,
  261. maxs.m_amd.m_isaSize);
  262. }
  263. return Error::kNone;
  264. }
  265. Error dump(CString fname, Bool bDumpStats, Bool dumpBinary)
  266. {
  267. HeapMemoryPool pool(allocAligned, nullptr);
  268. ShaderProgramBinaryWrapper binw(&pool);
  269. ANKI_CHECK(binw.deserializeFromFile(fname));
  270. if(dumpBinary)
  271. {
  272. StringRaii txt(&pool);
  273. dumpShaderProgramBinary(binw.getBinary(), txt);
  274. printf("%s\n", txt.cstr());
  275. }
  276. if(bDumpStats)
  277. {
  278. ANKI_CHECK(dumpStats(binw.getBinary()));
  279. }
  280. return Error::kNone;
  281. }
  282. int main(int argc, char** argv)
  283. {
  284. HeapMemoryPool pool(allocAligned, nullptr);
  285. StringRaii filename(&pool);
  286. Bool dumpStats;
  287. Bool dumpBinary;
  288. if(parseCommandLineArgs(WeakArray<char*>(argv, argc), dumpStats, dumpBinary, filename))
  289. {
  290. ANKI_LOGE(kUsage, argv[0]);
  291. return 1;
  292. }
  293. const Error err = dump(filename, dumpStats, dumpBinary);
  294. if(err)
  295. {
  296. ANKI_LOGE("Can't dump due to an error. Bye");
  297. return 1;
  298. }
  299. return 0;
  300. }