ShaderProgramBinaryDumpMain.cpp 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. // Copyright (C) 2009-2023, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/ShaderCompiler/ShaderProgramCompiler.h>
  6. #include <AnKi/ShaderCompiler/MaliOfflineCompiler.h>
  7. #include <AnKi/ShaderCompiler/RadeonGpuAnalyzer.h>
  8. #include <AnKi/Util/ThreadHive.h>
  9. #include <AnKi/Util/System.h>
  10. using namespace anki;
  11. static const char* kUsage = R"(Dump the shader binary to stdout
  12. Usage: %s [options] input_shader_program_binary
  13. Options:
  14. -stats : Print performance statistics for all shaders. By default it doesn't
  15. -no-binary : Don't print the binary
  16. -no-glsl : Don't print GLSL
  17. -spirv : Print SPIR-V
  18. )";
  19. static Error parseCommandLineArgs(WeakArray<char*> argv, Bool& dumpStats, Bool& dumpBinary, Bool& glsl, Bool& spirv, String& filename)
  20. {
  21. // Parse config
  22. if(argv.getSize() < 2)
  23. {
  24. return Error::kUserData;
  25. }
  26. dumpStats = false;
  27. dumpBinary = true;
  28. glsl = true;
  29. spirv = false;
  30. filename = argv[argv.getSize() - 1];
  31. for(U32 i = 1; i < argv.getSize() - 1; i++)
  32. {
  33. if(CString(argv[i]) == "-stats")
  34. {
  35. dumpStats = true;
  36. }
  37. else if(CString(argv[i]) == "-no-binary")
  38. {
  39. dumpBinary = false;
  40. dumpStats = true;
  41. }
  42. else if(CString(argv[i]) == "-no-glsl")
  43. {
  44. glsl = false;
  45. }
  46. else if(CString(argv[i]) == "-spirv")
  47. {
  48. spirv = true;
  49. }
  50. }
  51. return Error::kNone;
  52. }
  53. Error dumpStats(const ShaderProgramBinary& bin)
  54. {
  55. printf("\nOffline compilers stats:\n");
  56. fflush(stdout);
  57. class Stats
  58. {
  59. public:
  60. class
  61. {
  62. public:
  63. F64 m_fma;
  64. F64 m_cvt;
  65. F64 m_sfu;
  66. F64 m_loadStore;
  67. F64 m_varying;
  68. F64 m_texture;
  69. F64 m_workRegisters;
  70. F64 m_fp16ArithmeticPercentage;
  71. F64 m_spillingCount;
  72. } m_arm;
  73. class
  74. {
  75. public:
  76. F64 m_vgprCount;
  77. F64 m_sgprCount;
  78. F64 m_isaSize;
  79. } m_amd;
  80. Stats(F64 v)
  81. {
  82. m_arm.m_fma = m_arm.m_cvt = m_arm.m_sfu = m_arm.m_loadStore = m_arm.m_varying = m_arm.m_texture = m_arm.m_workRegisters =
  83. m_arm.m_fp16ArithmeticPercentage = m_arm.m_spillingCount = v;
  84. m_amd.m_vgprCount = m_amd.m_sgprCount = m_amd.m_isaSize = v;
  85. }
  86. Stats()
  87. : Stats(0.0)
  88. {
  89. }
  90. void op(const Stats& b, void (*func)(F64& a, F64 b))
  91. {
  92. func(m_arm.m_fma, b.m_arm.m_fma);
  93. func(m_arm.m_cvt, b.m_arm.m_cvt);
  94. func(m_arm.m_sfu, b.m_arm.m_sfu);
  95. func(m_arm.m_loadStore, b.m_arm.m_loadStore);
  96. func(m_arm.m_varying, b.m_arm.m_varying);
  97. func(m_arm.m_texture, b.m_arm.m_texture);
  98. func(m_arm.m_workRegisters, b.m_arm.m_workRegisters);
  99. func(m_arm.m_fp16ArithmeticPercentage, b.m_arm.m_fp16ArithmeticPercentage);
  100. func(m_arm.m_spillingCount, b.m_arm.m_spillingCount);
  101. func(m_amd.m_vgprCount, b.m_amd.m_vgprCount);
  102. func(m_amd.m_sgprCount, b.m_amd.m_sgprCount);
  103. func(m_amd.m_isaSize, b.m_amd.m_isaSize);
  104. }
  105. };
  106. class StageStats
  107. {
  108. public:
  109. Stats m_avgStats{0.0};
  110. Stats m_maxStats{-1.0};
  111. Stats m_minStats{kMaxF64};
  112. U32 m_spillingCount = 0;
  113. U32 m_count = 0;
  114. };
  115. class Ctx
  116. {
  117. public:
  118. DynamicArray<Stats> m_spirvStats;
  119. DynamicArray<Atomic<U32>> m_spirvVisited;
  120. Atomic<U32> m_variantCount = {0};
  121. const ShaderProgramBinary* m_bin = nullptr;
  122. Atomic<I32> m_error = {0};
  123. };
  124. Ctx ctx;
  125. ctx.m_bin = &bin;
  126. ctx.m_spirvStats.resize(bin.m_codeBlocks.getSize());
  127. ctx.m_spirvVisited.resize(bin.m_codeBlocks.getSize(), 0);
  128. memset(ctx.m_spirvVisited.getBegin(), 0, ctx.m_spirvVisited.getSizeInBytes());
  129. ThreadHive hive(getCpuCoresCount());
  130. ThreadHiveTaskCallback callback = [](void* userData, [[maybe_unused]] U32 threadId, [[maybe_unused]] ThreadHive& hive,
  131. [[maybe_unused]] ThreadHiveSemaphore* signalSemaphore) {
  132. Ctx& ctx = *static_cast<Ctx*>(userData);
  133. U32 variantIdx;
  134. while((variantIdx = ctx.m_variantCount.fetchAdd(1)) < ctx.m_bin->m_variants.getSize() && ctx.m_error.load() == 0)
  135. {
  136. const ShaderProgramBinaryVariant& variant = ctx.m_bin->m_variants[variantIdx];
  137. for(ShaderType shaderType : EnumIterable<ShaderType>())
  138. {
  139. const U32 codeblockIdx = variant.m_codeBlockIndices[shaderType];
  140. if(codeblockIdx == kMaxU32)
  141. {
  142. continue;
  143. }
  144. const Bool visited = ctx.m_spirvVisited[codeblockIdx].fetchAdd(1) != 0;
  145. if(visited)
  146. {
  147. continue;
  148. }
  149. const ShaderProgramBinaryCodeBlock& codeBlock = ctx.m_bin->m_codeBlocks[codeblockIdx];
  150. // Arm stats
  151. MaliOfflineCompilerOut maliocOut;
  152. Error err = runMaliOfflineCompiler(
  153. #if ANKI_OS_LINUX
  154. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/MaliOfflineCompiler/malioc",
  155. #elif ANKI_OS_WINDOWS
  156. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/MaliOfflineCompiler/malioc.exe",
  157. #else
  158. # error "Not supported"
  159. #endif
  160. codeBlock.m_binary, shaderType, maliocOut);
  161. if(err)
  162. {
  163. ANKI_LOGE("Mali offline compiler failed");
  164. ctx.m_error.store(1);
  165. break;
  166. }
  167. // AMD
  168. RgaOutput rgaOut = {};
  169. #if 1
  170. err = runRadeonGpuAnalyzer(
  171. # if ANKI_OS_LINUX
  172. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/RadeonGpuAnalyzer/rga",
  173. # elif ANKI_OS_WINDOWS
  174. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/RadeonGpuAnalyzer/rga.exe",
  175. # else
  176. # error "Not supported"
  177. # endif
  178. codeBlock.m_binary, shaderType, rgaOut);
  179. if(err)
  180. {
  181. ANKI_LOGE("Radeon GPU Analyzer compiler failed");
  182. ctx.m_error.store(1);
  183. break;
  184. }
  185. #endif
  186. // Write stats
  187. Stats& stats = ctx.m_spirvStats[codeblockIdx];
  188. stats.m_arm.m_fma = maliocOut.m_fma;
  189. stats.m_arm.m_cvt = maliocOut.m_cvt;
  190. stats.m_arm.m_sfu = maliocOut.m_sfu;
  191. stats.m_arm.m_loadStore = maliocOut.m_loadStore;
  192. stats.m_arm.m_varying = maliocOut.m_varying;
  193. stats.m_arm.m_texture = maliocOut.m_texture;
  194. stats.m_arm.m_workRegisters = maliocOut.m_workRegisters;
  195. stats.m_arm.m_fp16ArithmeticPercentage = maliocOut.m_fp16ArithmeticPercentage;
  196. stats.m_arm.m_spillingCount = (maliocOut.m_spilling) ? 1.0 : 0.0;
  197. stats.m_amd.m_vgprCount = F64(rgaOut.m_vgprCount);
  198. stats.m_amd.m_sgprCount = F64(rgaOut.m_sgprCount);
  199. stats.m_amd.m_isaSize = F64(rgaOut.m_isaSize);
  200. }
  201. if(variantIdx > 0 && ((variantIdx + 1) % 32) == 0)
  202. {
  203. printf("Processed %u out of %u variants\n", variantIdx + 1, ctx.m_bin->m_variants.getSize());
  204. }
  205. } // while
  206. };
  207. for(U32 i = 0; i < hive.getThreadCount(); ++i)
  208. {
  209. hive.submitTask(callback, &ctx);
  210. }
  211. hive.waitAllTasks();
  212. if(ctx.m_error.load() != 0)
  213. {
  214. return Error::kFunctionFailed;
  215. }
  216. // Cather the results
  217. Array<StageStats, U32(ShaderType::kCount)> allStageStats;
  218. for(const ShaderProgramBinaryVariant& variant : bin.m_variants)
  219. {
  220. for(ShaderType stage : EnumIterable<ShaderType>())
  221. {
  222. if(variant.m_codeBlockIndices[stage] == kMaxU32)
  223. {
  224. continue;
  225. }
  226. const Stats& stats = ctx.m_spirvStats[variant.m_codeBlockIndices[stage]];
  227. StageStats& allStats = allStageStats[stage];
  228. ++allStats.m_count;
  229. allStats.m_avgStats.op(stats, [](F64& a, F64 b) {
  230. a += b;
  231. });
  232. allStats.m_minStats.op(stats, [](F64& a, F64 b) {
  233. a = min(a, b);
  234. });
  235. allStats.m_maxStats.op(stats, [](F64& a, F64 b) {
  236. a = max(a, b);
  237. });
  238. }
  239. }
  240. // Print
  241. for(ShaderType shaderType : EnumIterable<ShaderType>())
  242. {
  243. const StageStats& stage = allStageStats[shaderType];
  244. if(stage.m_count == 0)
  245. {
  246. continue;
  247. }
  248. printf("Stage %u\n", U32(shaderType));
  249. printf(" Arm shaders spilling regs %u\n", stage.m_spillingCount);
  250. const F64 countf = F64(stage.m_count);
  251. const Stats& avg = stage.m_avgStats;
  252. printf(" Average:\n");
  253. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", avg.m_arm.m_workRegisters / countf, avg.m_arm.m_fma / countf,
  254. avg.m_arm.m_cvt / countf, avg.m_arm.m_sfu / countf, avg.m_arm.m_loadStore / countf, avg.m_arm.m_varying / countf,
  255. avg.m_arm.m_texture / countf, avg.m_arm.m_fp16ArithmeticPercentage / countf);
  256. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", avg.m_amd.m_vgprCount / countf, avg.m_amd.m_sgprCount / countf,
  257. avg.m_amd.m_isaSize / countf);
  258. const Stats& maxs = stage.m_maxStats;
  259. printf(" Max:\n");
  260. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", maxs.m_arm.m_workRegisters, maxs.m_arm.m_fma,
  261. maxs.m_arm.m_cvt, maxs.m_arm.m_sfu, maxs.m_arm.m_loadStore, maxs.m_arm.m_varying, maxs.m_arm.m_texture,
  262. maxs.m_arm.m_fp16ArithmeticPercentage);
  263. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", maxs.m_amd.m_vgprCount, maxs.m_amd.m_sgprCount, maxs.m_amd.m_isaSize);
  264. }
  265. return Error::kNone;
  266. }
  267. Error dump(CString fname, Bool bDumpStats, Bool dumpBinary, Bool glsl, Bool spirv)
  268. {
  269. ShaderProgramBinaryWrapper binw(&DefaultMemoryPool::getSingleton());
  270. ANKI_CHECK(binw.deserializeFromFile(fname));
  271. if(dumpBinary)
  272. {
  273. ShaderDumpOptions options;
  274. options.m_writeGlsl = glsl;
  275. options.m_writeSpirv = spirv;
  276. String txt;
  277. dumpShaderProgramBinary(options, binw.getBinary(), txt);
  278. printf("%s\n", txt.cstr());
  279. }
  280. if(bDumpStats)
  281. {
  282. ANKI_CHECK(dumpStats(binw.getBinary()));
  283. }
  284. return Error::kNone;
  285. }
  286. ANKI_MAIN_FUNCTION(myMain)
  287. int myMain(int argc, char** argv)
  288. {
  289. class Dummy
  290. {
  291. public:
  292. ~Dummy()
  293. {
  294. DefaultMemoryPool::freeSingleton();
  295. }
  296. } dummy;
  297. DefaultMemoryPool::allocateSingleton(allocAligned, nullptr);
  298. String filename;
  299. Bool dumpStats;
  300. Bool dumpBinary;
  301. Bool glsl;
  302. Bool spirv;
  303. if(parseCommandLineArgs(WeakArray<char*>(argv, argc), dumpStats, dumpBinary, glsl, spirv, filename))
  304. {
  305. ANKI_LOGE(kUsage, argv[0]);
  306. return 1;
  307. }
  308. const Error err = dump(filename, dumpStats, dumpBinary, glsl, spirv);
  309. if(err)
  310. {
  311. ANKI_LOGE("Can't dump due to an error. Bye");
  312. return 1;
  313. }
  314. return 0;
  315. }