ShaderProgramBinaryDumpMain.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. // Copyright (C) 2009-present, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/ShaderCompiler/ShaderCompiler.h>
  6. #include <AnKi/ShaderCompiler/ShaderDump.h>
  7. #include <AnKi/ShaderCompiler/MaliOfflineCompiler.h>
  8. #include <AnKi/ShaderCompiler/RadeonGpuAnalyzer.h>
  9. #include <AnKi/ShaderCompiler/Dxc.h>
  10. #include <AnKi/Util/ThreadHive.h>
  11. #include <AnKi/Util/System.h>
  12. #include <ThirdParty/SpirvCross/spirv.hpp>
  13. using namespace anki;
  14. static const char* kUsage = R"(Dump the shader binary to stdout
  15. Usage: %s [options] input_shader_program_binary
  16. Options:
  17. -stats <0|1> : Print performance statistics for all shaders. Default 0
  18. -binary <0|1> : Print the whole shader program binary. Default 1
  19. -glsl <0|1> : Print GLSL. Default 1
  20. -spirv <0|1> : Print SPIR-V. Default 0
  21. -v : Verbose log
  22. )";
  23. static Error parseCommandLineArgs(WeakArray<char*> argv, Bool& dumpStats, Bool& dumpBinary, Bool& glsl, Bool& spirv, String& filename)
  24. {
  25. // Parse config
  26. if(argv.getSize() < 2)
  27. {
  28. return Error::kUserData;
  29. }
  30. dumpStats = false;
  31. dumpBinary = true;
  32. glsl = true;
  33. spirv = false;
  34. filename = argv[argv.getSize() - 1];
  35. for(U32 i = 1; i < argv.getSize() - 1; i++)
  36. {
  37. if(CString(argv[i]) == "-stats")
  38. {
  39. ++i;
  40. if(i >= argv.getSize())
  41. {
  42. return Error::kUserData;
  43. }
  44. if(CString(argv[i]) == "1")
  45. {
  46. dumpStats = true;
  47. }
  48. else if(CString(argv[i]) == "0")
  49. {
  50. dumpStats = false;
  51. }
  52. else
  53. {
  54. return Error::kUserData;
  55. }
  56. }
  57. else if(CString(argv[i]) == "-binary")
  58. {
  59. ++i;
  60. if(i >= argv.getSize())
  61. {
  62. return Error::kUserData;
  63. }
  64. if(CString(argv[i]) == "1")
  65. {
  66. dumpBinary = true;
  67. }
  68. else if(CString(argv[i]) == "0")
  69. {
  70. dumpBinary = false;
  71. }
  72. else
  73. {
  74. return Error::kUserData;
  75. }
  76. }
  77. else if(CString(argv[i]) == "-glsl")
  78. {
  79. ++i;
  80. if(i >= argv.getSize())
  81. {
  82. return Error::kUserData;
  83. }
  84. if(CString(argv[i]) == "1")
  85. {
  86. glsl = true;
  87. }
  88. else if(CString(argv[i]) == "0")
  89. {
  90. glsl = false;
  91. }
  92. else
  93. {
  94. return Error::kUserData;
  95. }
  96. }
  97. else if(CString(argv[i]) == "-spirv")
  98. {
  99. ++i;
  100. if(i >= argv.getSize())
  101. {
  102. return Error::kUserData;
  103. }
  104. if(CString(argv[i]) == "1")
  105. {
  106. spirv = true;
  107. }
  108. else if(CString(argv[i]) == "0")
  109. {
  110. spirv = false;
  111. }
  112. else
  113. {
  114. return Error::kUserData;
  115. }
  116. }
  117. else if(CString(argv[i]) == "-v")
  118. {
  119. Logger::getSingleton().enableVerbosity(true);
  120. }
  121. }
  122. if(spirv || glsl)
  123. {
  124. dumpBinary = true;
  125. }
  126. return Error::kNone;
  127. }
  128. Error dumpStats(const ShaderBinary& bin)
  129. {
  130. printf("\nOffline compilers stats:\n");
  131. fflush(stdout);
  132. class Stats
  133. {
  134. public:
  135. class
  136. {
  137. public:
  138. F64 m_fma;
  139. F64 m_cvt;
  140. F64 m_sfu;
  141. F64 m_loadStore;
  142. F64 m_varying;
  143. F64 m_texture;
  144. F64 m_workRegisters;
  145. F64 m_fp16ArithmeticPercentage;
  146. F64 m_spillingCount;
  147. } m_arm;
  148. class
  149. {
  150. public:
  151. F64 m_vgprCount;
  152. F64 m_sgprCount;
  153. F64 m_isaSize;
  154. } m_amd;
  155. Stats(F64 v)
  156. {
  157. m_arm.m_fma = m_arm.m_cvt = m_arm.m_sfu = m_arm.m_loadStore = m_arm.m_varying = m_arm.m_texture = m_arm.m_workRegisters =
  158. m_arm.m_fp16ArithmeticPercentage = m_arm.m_spillingCount = v;
  159. m_amd.m_vgprCount = m_amd.m_sgprCount = m_amd.m_isaSize = v;
  160. }
  161. Stats()
  162. : Stats(0.0)
  163. {
  164. }
  165. void op(const Stats& b, void (*func)(F64& a, F64 b))
  166. {
  167. func(m_arm.m_fma, b.m_arm.m_fma);
  168. func(m_arm.m_cvt, b.m_arm.m_cvt);
  169. func(m_arm.m_sfu, b.m_arm.m_sfu);
  170. func(m_arm.m_loadStore, b.m_arm.m_loadStore);
  171. func(m_arm.m_varying, b.m_arm.m_varying);
  172. func(m_arm.m_texture, b.m_arm.m_texture);
  173. func(m_arm.m_workRegisters, b.m_arm.m_workRegisters);
  174. func(m_arm.m_fp16ArithmeticPercentage, b.m_arm.m_fp16ArithmeticPercentage);
  175. func(m_arm.m_spillingCount, b.m_arm.m_spillingCount);
  176. func(m_amd.m_vgprCount, b.m_amd.m_vgprCount);
  177. func(m_amd.m_sgprCount, b.m_amd.m_sgprCount);
  178. func(m_amd.m_isaSize, b.m_amd.m_isaSize);
  179. }
  180. };
  181. class StageStats
  182. {
  183. public:
  184. Stats m_avgStats{0.0};
  185. Stats m_maxStats{-1.0};
  186. Stats m_minStats{kMaxF64};
  187. U32 m_spillingCount = 0;
  188. U32 m_count = 0;
  189. };
  190. class Ctx
  191. {
  192. public:
  193. DynamicArray<Stats> m_spirvStats;
  194. DynamicArray<Atomic<U32>> m_spirvVisited;
  195. Atomic<U32> m_variantCount = {0};
  196. const ShaderBinary* m_bin = nullptr;
  197. Atomic<I32> m_error = {0};
  198. };
  199. Ctx ctx;
  200. ctx.m_bin = &bin;
  201. ctx.m_spirvStats.resize(bin.m_codeBlocks.getSize());
  202. ctx.m_spirvVisited.resize(bin.m_codeBlocks.getSize(), 0);
  203. memset(ctx.m_spirvVisited.getBegin(), 0, ctx.m_spirvVisited.getSizeInBytes());
  204. ThreadHive hive(getCpuCoresCount());
  205. ThreadHiveTaskCallback callback = [](void* userData, [[maybe_unused]] U32 threadId, [[maybe_unused]] ThreadHive& hive,
  206. [[maybe_unused]] ThreadHiveSemaphore* signalSemaphore) {
  207. Ctx& ctx = *static_cast<Ctx*>(userData);
  208. U32 variantIdx;
  209. while((variantIdx = ctx.m_variantCount.fetchAdd(1)) < ctx.m_bin->m_variants.getSize() && ctx.m_error.load() == 0)
  210. {
  211. const ShaderBinaryVariant& variant = ctx.m_bin->m_variants[variantIdx];
  212. for(U32 t = 0; t < variant.m_techniqueCodeBlocks.getSize(); ++t)
  213. {
  214. for(ShaderType shaderType : EnumBitsIterable<ShaderType, ShaderTypeBit>(ctx.m_bin->m_techniques[t].m_shaderTypes))
  215. {
  216. const U32 codeblockIdx = variant.m_techniqueCodeBlocks[t].m_codeBlockIndices[shaderType];
  217. const Bool visited = ctx.m_spirvVisited[codeblockIdx].fetchAdd(1) != 0;
  218. if(visited)
  219. {
  220. continue;
  221. }
  222. const ShaderBinaryCodeBlock& codeBlock = ctx.m_bin->m_codeBlocks[codeblockIdx];
  223. // Rewrite spir-v because of the decorations we ask DXC to put
  224. Bool bRequiresMeshShaders = false;
  225. DynamicArray<U8> newSpirv;
  226. newSpirv.resize(codeBlock.m_binary.getSize());
  227. memcpy(newSpirv.getBegin(), codeBlock.m_binary.getBegin(), codeBlock.m_binary.getSizeInBytes());
  228. visitSpirv(WeakArray<U32>(reinterpret_cast<U32*>(newSpirv.getBegin()), U32(newSpirv.getSizeInBytes() / sizeof(U32))),
  229. [&](U32 cmd, WeakArray<U32> instructions) {
  230. if(cmd == spv::OpDecorate && instructions[1] == spv::DecorationDescriptorSet
  231. && instructions[2] == kDxcVkBindlessRegisterSpace)
  232. {
  233. // Bindless set, rewrite its set
  234. instructions[2] = kMaxRegisterSpaces;
  235. }
  236. else if(cmd == spv::OpCapability && instructions[0] == spv::CapabilityMeshShadingEXT)
  237. {
  238. bRequiresMeshShaders = true;
  239. }
  240. });
  241. // Arm stats
  242. MaliOfflineCompilerOut maliocOut;
  243. Error err = Error::kNone;
  244. if((shaderType == ShaderType::kVertex || shaderType == ShaderType::kPixel || shaderType == ShaderType::kCompute)
  245. && !bRequiresMeshShaders)
  246. {
  247. err = runMaliOfflineCompiler(newSpirv, shaderType, maliocOut);
  248. if(err)
  249. {
  250. ANKI_LOGE("Mali offline compiler failed");
  251. ctx.m_error.store(1);
  252. break;
  253. }
  254. }
  255. // AMD
  256. RgaOutput rgaOut = {};
  257. #if 1
  258. if((shaderType == ShaderType::kVertex || shaderType == ShaderType::kPixel || shaderType == ShaderType::kCompute)
  259. && !bRequiresMeshShaders)
  260. {
  261. err = runRadeonGpuAnalyzer(newSpirv, shaderType, rgaOut);
  262. if(err)
  263. {
  264. ANKI_LOGE("Radeon GPU Analyzer compiler failed");
  265. ctx.m_error.store(1);
  266. break;
  267. }
  268. }
  269. #endif
  270. // Write stats
  271. Stats& stats = ctx.m_spirvStats[codeblockIdx];
  272. stats.m_arm.m_fma = maliocOut.m_fma;
  273. stats.m_arm.m_cvt = maliocOut.m_cvt;
  274. stats.m_arm.m_sfu = maliocOut.m_sfu;
  275. stats.m_arm.m_loadStore = maliocOut.m_loadStore;
  276. stats.m_arm.m_varying = maliocOut.m_varying;
  277. stats.m_arm.m_texture = maliocOut.m_texture;
  278. stats.m_arm.m_workRegisters = maliocOut.m_workRegisters;
  279. stats.m_arm.m_fp16ArithmeticPercentage = maliocOut.m_fp16ArithmeticPercentage;
  280. stats.m_arm.m_spillingCount = (maliocOut.m_spilling) ? 1.0 : 0.0;
  281. stats.m_amd.m_vgprCount = F64(rgaOut.m_vgprCount);
  282. stats.m_amd.m_sgprCount = F64(rgaOut.m_sgprCount);
  283. stats.m_amd.m_isaSize = F64(rgaOut.m_isaSize);
  284. }
  285. if(variantIdx > 0 && ((variantIdx + 1) % 32) == 0)
  286. {
  287. printf("Processed %u out of %u variants\n", variantIdx + 1, ctx.m_bin->m_variants.getSize());
  288. }
  289. }
  290. } // while
  291. };
  292. for(U32 i = 0; i < hive.getThreadCount(); ++i)
  293. {
  294. hive.submitTask(callback, &ctx);
  295. }
  296. hive.waitAllTasks();
  297. if(ctx.m_error.load() != 0)
  298. {
  299. return Error::kFunctionFailed;
  300. }
  301. // Cather the results
  302. Array<StageStats, U32(ShaderType::kCount)> allStageStats;
  303. for(const ShaderBinaryVariant& variant : bin.m_variants)
  304. {
  305. for(U32 t = 0; t < variant.m_techniqueCodeBlocks.getSize(); ++t)
  306. {
  307. for(ShaderType shaderType : EnumBitsIterable<ShaderType, ShaderTypeBit>(ctx.m_bin->m_techniques[t].m_shaderTypes))
  308. {
  309. const U32 codeblockIdx = variant.m_techniqueCodeBlocks[t].m_codeBlockIndices[shaderType];
  310. const Stats& stats = ctx.m_spirvStats[codeblockIdx];
  311. StageStats& allStats = allStageStats[shaderType];
  312. ++allStats.m_count;
  313. allStats.m_avgStats.op(stats, [](F64& a, F64 b) {
  314. a += b;
  315. });
  316. allStats.m_minStats.op(stats, [](F64& a, F64 b) {
  317. a = min(a, b);
  318. });
  319. allStats.m_maxStats.op(stats, [](F64& a, F64 b) {
  320. a = max(a, b);
  321. });
  322. }
  323. }
  324. }
  325. // Print
  326. for(ShaderType shaderType : EnumIterable<ShaderType>())
  327. {
  328. const StageStats& stage = allStageStats[shaderType];
  329. if(stage.m_count == 0)
  330. {
  331. continue;
  332. }
  333. printf("Stage %u\n", U32(shaderType));
  334. printf(" Arm shaders spilling regs %u\n", stage.m_spillingCount);
  335. const F64 countf = F64(stage.m_count);
  336. const Stats& avg = stage.m_avgStats;
  337. printf(" Average:\n");
  338. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", avg.m_arm.m_workRegisters / countf, avg.m_arm.m_fma / countf,
  339. avg.m_arm.m_cvt / countf, avg.m_arm.m_sfu / countf, avg.m_arm.m_loadStore / countf, avg.m_arm.m_varying / countf,
  340. avg.m_arm.m_texture / countf, avg.m_arm.m_fp16ArithmeticPercentage / countf);
  341. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", avg.m_amd.m_vgprCount / countf, avg.m_amd.m_sgprCount / countf,
  342. avg.m_amd.m_isaSize / countf);
  343. const Stats& maxs = stage.m_maxStats;
  344. printf(" Max:\n");
  345. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", maxs.m_arm.m_workRegisters, maxs.m_arm.m_fma,
  346. maxs.m_arm.m_cvt, maxs.m_arm.m_sfu, maxs.m_arm.m_loadStore, maxs.m_arm.m_varying, maxs.m_arm.m_texture,
  347. maxs.m_arm.m_fp16ArithmeticPercentage);
  348. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", maxs.m_amd.m_vgprCount, maxs.m_amd.m_sgprCount, maxs.m_amd.m_isaSize);
  349. }
  350. return Error::kNone;
  351. }
  352. Error dump(CString fname, Bool bDumpStats, Bool dumpBinary, Bool glsl, Bool spirv)
  353. {
  354. ShaderBinary* binary;
  355. ANKI_CHECK(deserializeShaderBinaryFromFile(fname, binary, ShaderCompilerMemoryPool::getSingleton()));
  356. class Dummy
  357. {
  358. public:
  359. ShaderBinary* m_binary;
  360. ~Dummy()
  361. {
  362. ShaderCompilerMemoryPool::getSingleton().free(m_binary);
  363. }
  364. } dummy{binary};
  365. if(dumpBinary)
  366. {
  367. ShaderDumpOptions options;
  368. options.m_writeGlsl = glsl;
  369. options.m_writeSpirv = spirv;
  370. ShaderCompilerString txt;
  371. dumpShaderBinary(options, *binary, txt);
  372. printf("%s\n", txt.cstr());
  373. }
  374. if(bDumpStats)
  375. {
  376. ANKI_CHECK(dumpStats(*binary));
  377. }
  378. return Error::kNone;
  379. }
  380. ANKI_MAIN_FUNCTION(myMain)
  381. int myMain(int argc, char** argv)
  382. {
  383. class Dummy
  384. {
  385. public:
  386. ~Dummy()
  387. {
  388. DefaultMemoryPool::freeSingleton();
  389. ShaderCompilerMemoryPool::freeSingleton();
  390. }
  391. } dummy;
  392. DefaultMemoryPool::allocateSingleton(allocAligned, nullptr);
  393. ShaderCompilerMemoryPool::allocateSingleton(allocAligned, nullptr);
  394. String filename;
  395. Bool dumpStats;
  396. Bool dumpBinary;
  397. Bool glsl;
  398. Bool spirv;
  399. if(parseCommandLineArgs(WeakArray<char*>(argv, argc), dumpStats, dumpBinary, glsl, spirv, filename))
  400. {
  401. ANKI_LOGE(kUsage, argv[0]);
  402. return 1;
  403. }
  404. const Error err = dump(filename, dumpStats, dumpBinary, glsl, spirv);
  405. if(err)
  406. {
  407. ANKI_LOGE("Can't dump due to an error. Bye");
  408. return 1;
  409. }
  410. return 0;
  411. }