ShaderProgramBinaryDumpMain.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. // Copyright (C) 2009-present, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/ShaderCompiler/ShaderCompiler.h>
  6. #include <AnKi/ShaderCompiler/ShaderDump.h>
  7. #include <AnKi/ShaderCompiler/MaliOfflineCompiler.h>
  8. #include <AnKi/ShaderCompiler/RadeonGpuAnalyzer.h>
  9. #include <AnKi/ShaderCompiler/Dxc.h>
  10. #include <AnKi/Util/ThreadHive.h>
  11. #include <AnKi/Util/System.h>
  12. #include <ThirdParty/SpirvCross/spirv.hpp>
  13. using namespace anki;
  14. static const char* kUsage = R"(Dump the shader binary to stdout
  15. Usage: %s [options] input_shader_program_binary
  16. Options:
  17. -stats : Print performance statistics for all shaders. By default it doesn't
  18. -no-binary : Don't print the binary
  19. -no-glsl : Don't print GLSL
  20. -spirv : Print SPIR-V
  21. )";
  22. static Error parseCommandLineArgs(WeakArray<char*> argv, Bool& dumpStats, Bool& dumpBinary, Bool& glsl, Bool& spirv, String& filename)
  23. {
  24. // Parse config
  25. if(argv.getSize() < 2)
  26. {
  27. return Error::kUserData;
  28. }
  29. dumpStats = false;
  30. dumpBinary = true;
  31. glsl = true;
  32. spirv = false;
  33. filename = argv[argv.getSize() - 1];
  34. for(U32 i = 1; i < argv.getSize() - 1; i++)
  35. {
  36. if(CString(argv[i]) == "-stats")
  37. {
  38. dumpStats = true;
  39. }
  40. else if(CString(argv[i]) == "-no-binary")
  41. {
  42. dumpBinary = false;
  43. dumpStats = true;
  44. }
  45. else if(CString(argv[i]) == "-no-glsl")
  46. {
  47. glsl = false;
  48. }
  49. else if(CString(argv[i]) == "-spirv")
  50. {
  51. spirv = true;
  52. }
  53. }
  54. return Error::kNone;
  55. }
  56. Error dumpStats(const ShaderBinary& bin)
  57. {
  58. printf("\nOffline compilers stats:\n");
  59. fflush(stdout);
  60. class Stats
  61. {
  62. public:
  63. class
  64. {
  65. public:
  66. F64 m_fma;
  67. F64 m_cvt;
  68. F64 m_sfu;
  69. F64 m_loadStore;
  70. F64 m_varying;
  71. F64 m_texture;
  72. F64 m_workRegisters;
  73. F64 m_fp16ArithmeticPercentage;
  74. F64 m_spillingCount;
  75. } m_arm;
  76. class
  77. {
  78. public:
  79. F64 m_vgprCount;
  80. F64 m_sgprCount;
  81. F64 m_isaSize;
  82. } m_amd;
  83. Stats(F64 v)
  84. {
  85. m_arm.m_fma = m_arm.m_cvt = m_arm.m_sfu = m_arm.m_loadStore = m_arm.m_varying = m_arm.m_texture = m_arm.m_workRegisters =
  86. m_arm.m_fp16ArithmeticPercentage = m_arm.m_spillingCount = v;
  87. m_amd.m_vgprCount = m_amd.m_sgprCount = m_amd.m_isaSize = v;
  88. }
  89. Stats()
  90. : Stats(0.0)
  91. {
  92. }
  93. void op(const Stats& b, void (*func)(F64& a, F64 b))
  94. {
  95. func(m_arm.m_fma, b.m_arm.m_fma);
  96. func(m_arm.m_cvt, b.m_arm.m_cvt);
  97. func(m_arm.m_sfu, b.m_arm.m_sfu);
  98. func(m_arm.m_loadStore, b.m_arm.m_loadStore);
  99. func(m_arm.m_varying, b.m_arm.m_varying);
  100. func(m_arm.m_texture, b.m_arm.m_texture);
  101. func(m_arm.m_workRegisters, b.m_arm.m_workRegisters);
  102. func(m_arm.m_fp16ArithmeticPercentage, b.m_arm.m_fp16ArithmeticPercentage);
  103. func(m_arm.m_spillingCount, b.m_arm.m_spillingCount);
  104. func(m_amd.m_vgprCount, b.m_amd.m_vgprCount);
  105. func(m_amd.m_sgprCount, b.m_amd.m_sgprCount);
  106. func(m_amd.m_isaSize, b.m_amd.m_isaSize);
  107. }
  108. };
  109. class StageStats
  110. {
  111. public:
  112. Stats m_avgStats{0.0};
  113. Stats m_maxStats{-1.0};
  114. Stats m_minStats{kMaxF64};
  115. U32 m_spillingCount = 0;
  116. U32 m_count = 0;
  117. };
  118. class Ctx
  119. {
  120. public:
  121. DynamicArray<Stats> m_spirvStats;
  122. DynamicArray<Atomic<U32>> m_spirvVisited;
  123. Atomic<U32> m_variantCount = {0};
  124. const ShaderBinary* m_bin = nullptr;
  125. Atomic<I32> m_error = {0};
  126. };
  127. Ctx ctx;
  128. ctx.m_bin = &bin;
  129. ctx.m_spirvStats.resize(bin.m_codeBlocks.getSize());
  130. ctx.m_spirvVisited.resize(bin.m_codeBlocks.getSize(), 0);
  131. memset(ctx.m_spirvVisited.getBegin(), 0, ctx.m_spirvVisited.getSizeInBytes());
  132. ThreadHive hive(getCpuCoresCount());
  133. ThreadHiveTaskCallback callback = [](void* userData, [[maybe_unused]] U32 threadId, [[maybe_unused]] ThreadHive& hive,
  134. [[maybe_unused]] ThreadHiveSemaphore* signalSemaphore) {
  135. Ctx& ctx = *static_cast<Ctx*>(userData);
  136. U32 variantIdx;
  137. while((variantIdx = ctx.m_variantCount.fetchAdd(1)) < ctx.m_bin->m_variants.getSize() && ctx.m_error.load() == 0)
  138. {
  139. const ShaderBinaryVariant& variant = ctx.m_bin->m_variants[variantIdx];
  140. for(U32 t = 0; t < variant.m_techniqueCodeBlocks.getSize(); ++t)
  141. {
  142. for(ShaderType shaderType : EnumBitsIterable<ShaderType, ShaderTypeBit>(ctx.m_bin->m_techniques[t].m_shaderTypes))
  143. {
  144. const U32 codeblockIdx = variant.m_techniqueCodeBlocks[t].m_codeBlockIndices[shaderType];
  145. const Bool visited = ctx.m_spirvVisited[codeblockIdx].fetchAdd(1) != 0;
  146. if(visited)
  147. {
  148. continue;
  149. }
  150. const ShaderBinaryCodeBlock& codeBlock = ctx.m_bin->m_codeBlocks[codeblockIdx];
  151. // Rewrite spir-v because of the decorations we ask DXC to put
  152. Bool bRequiresMeshShaders = false;
  153. DynamicArray<U8> newSpirv;
  154. newSpirv.resize(codeBlock.m_binary.getSize());
  155. memcpy(newSpirv.getBegin(), codeBlock.m_binary.getBegin(), codeBlock.m_binary.getSizeInBytes());
  156. visitSpirv(WeakArray<U32>(reinterpret_cast<U32*>(newSpirv.getBegin()), U32(newSpirv.getSizeInBytes() / sizeof(U32))),
  157. [&](U32 cmd, WeakArray<U32> instructions) {
  158. if(cmd == spv::OpDecorate && instructions[1] == spv::DecorationDescriptorSet
  159. && instructions[2] == kDxcVkBindlessRegisterSpace)
  160. {
  161. // Bindless set, rewrite its set
  162. instructions[2] = kMaxRegisterSpaces;
  163. }
  164. else if(cmd == spv::OpCapability && instructions[0] == spv::CapabilityMeshShadingEXT)
  165. {
  166. bRequiresMeshShaders = true;
  167. }
  168. });
  169. // Arm stats
  170. MaliOfflineCompilerOut maliocOut;
  171. Error err = Error::kNone;
  172. if((shaderType == ShaderType::kVertex || shaderType == ShaderType::kPixel || shaderType == ShaderType::kCompute)
  173. && !bRequiresMeshShaders)
  174. {
  175. err = runMaliOfflineCompiler(newSpirv, shaderType, maliocOut);
  176. if(err)
  177. {
  178. ANKI_LOGE("Mali offline compiler failed");
  179. ctx.m_error.store(1);
  180. break;
  181. }
  182. }
  183. // AMD
  184. RgaOutput rgaOut = {};
  185. #if 0
  186. err = runRadeonGpuAnalyzer(newSpirv, shaderType, rgaOut);
  187. if(err)
  188. {
  189. ANKI_LOGE("Radeon GPU Analyzer compiler failed");
  190. ctx.m_error.store(1);
  191. break;
  192. }
  193. #endif
  194. // Write stats
  195. Stats& stats = ctx.m_spirvStats[codeblockIdx];
  196. stats.m_arm.m_fma = maliocOut.m_fma;
  197. stats.m_arm.m_cvt = maliocOut.m_cvt;
  198. stats.m_arm.m_sfu = maliocOut.m_sfu;
  199. stats.m_arm.m_loadStore = maliocOut.m_loadStore;
  200. stats.m_arm.m_varying = maliocOut.m_varying;
  201. stats.m_arm.m_texture = maliocOut.m_texture;
  202. stats.m_arm.m_workRegisters = maliocOut.m_workRegisters;
  203. stats.m_arm.m_fp16ArithmeticPercentage = maliocOut.m_fp16ArithmeticPercentage;
  204. stats.m_arm.m_spillingCount = (maliocOut.m_spilling) ? 1.0 : 0.0;
  205. stats.m_amd.m_vgprCount = F64(rgaOut.m_vgprCount);
  206. stats.m_amd.m_sgprCount = F64(rgaOut.m_sgprCount);
  207. stats.m_amd.m_isaSize = F64(rgaOut.m_isaSize);
  208. }
  209. if(variantIdx > 0 && ((variantIdx + 1) % 32) == 0)
  210. {
  211. printf("Processed %u out of %u variants\n", variantIdx + 1, ctx.m_bin->m_variants.getSize());
  212. }
  213. }
  214. } // while
  215. };
  216. for(U32 i = 0; i < hive.getThreadCount(); ++i)
  217. {
  218. hive.submitTask(callback, &ctx);
  219. }
  220. hive.waitAllTasks();
  221. if(ctx.m_error.load() != 0)
  222. {
  223. return Error::kFunctionFailed;
  224. }
  225. // Cather the results
  226. Array<StageStats, U32(ShaderType::kCount)> allStageStats;
  227. for(const ShaderBinaryVariant& variant : bin.m_variants)
  228. {
  229. for(U32 t = 0; t < variant.m_techniqueCodeBlocks.getSize(); ++t)
  230. {
  231. for(ShaderType shaderType : EnumBitsIterable<ShaderType, ShaderTypeBit>(ctx.m_bin->m_techniques[t].m_shaderTypes))
  232. {
  233. const U32 codeblockIdx = variant.m_techniqueCodeBlocks[t].m_codeBlockIndices[shaderType];
  234. const Stats& stats = ctx.m_spirvStats[codeblockIdx];
  235. StageStats& allStats = allStageStats[shaderType];
  236. ++allStats.m_count;
  237. allStats.m_avgStats.op(stats, [](F64& a, F64 b) {
  238. a += b;
  239. });
  240. allStats.m_minStats.op(stats, [](F64& a, F64 b) {
  241. a = min(a, b);
  242. });
  243. allStats.m_maxStats.op(stats, [](F64& a, F64 b) {
  244. a = max(a, b);
  245. });
  246. }
  247. }
  248. }
  249. // Print
  250. for(ShaderType shaderType : EnumIterable<ShaderType>())
  251. {
  252. const StageStats& stage = allStageStats[shaderType];
  253. if(stage.m_count == 0)
  254. {
  255. continue;
  256. }
  257. printf("Stage %u\n", U32(shaderType));
  258. printf(" Arm shaders spilling regs %u\n", stage.m_spillingCount);
  259. const F64 countf = F64(stage.m_count);
  260. const Stats& avg = stage.m_avgStats;
  261. printf(" Average:\n");
  262. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", avg.m_arm.m_workRegisters / countf, avg.m_arm.m_fma / countf,
  263. avg.m_arm.m_cvt / countf, avg.m_arm.m_sfu / countf, avg.m_arm.m_loadStore / countf, avg.m_arm.m_varying / countf,
  264. avg.m_arm.m_texture / countf, avg.m_arm.m_fp16ArithmeticPercentage / countf);
  265. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", avg.m_amd.m_vgprCount / countf, avg.m_amd.m_sgprCount / countf,
  266. avg.m_amd.m_isaSize / countf);
  267. const Stats& maxs = stage.m_maxStats;
  268. printf(" Max:\n");
  269. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", maxs.m_arm.m_workRegisters, maxs.m_arm.m_fma,
  270. maxs.m_arm.m_cvt, maxs.m_arm.m_sfu, maxs.m_arm.m_loadStore, maxs.m_arm.m_varying, maxs.m_arm.m_texture,
  271. maxs.m_arm.m_fp16ArithmeticPercentage);
  272. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", maxs.m_amd.m_vgprCount, maxs.m_amd.m_sgprCount, maxs.m_amd.m_isaSize);
  273. }
  274. return Error::kNone;
  275. }
  276. Error dump(CString fname, Bool bDumpStats, Bool dumpBinary, Bool glsl, Bool spirv)
  277. {
  278. ShaderBinary* binary;
  279. ANKI_CHECK(deserializeShaderBinaryFromFile(fname, binary, ShaderCompilerMemoryPool::getSingleton()));
  280. class Dummy
  281. {
  282. public:
  283. ShaderBinary* m_binary;
  284. ~Dummy()
  285. {
  286. ShaderCompilerMemoryPool::getSingleton().free(m_binary);
  287. }
  288. } dummy{binary};
  289. if(dumpBinary)
  290. {
  291. ShaderDumpOptions options;
  292. options.m_writeGlsl = glsl;
  293. options.m_writeSpirv = spirv;
  294. ShaderCompilerString txt;
  295. dumpShaderBinary(options, *binary, txt);
  296. printf("%s\n", txt.cstr());
  297. }
  298. if(bDumpStats)
  299. {
  300. ANKI_CHECK(dumpStats(*binary));
  301. }
  302. return Error::kNone;
  303. }
  304. ANKI_MAIN_FUNCTION(myMain)
  305. int myMain(int argc, char** argv)
  306. {
  307. class Dummy
  308. {
  309. public:
  310. ~Dummy()
  311. {
  312. DefaultMemoryPool::freeSingleton();
  313. ShaderCompilerMemoryPool::freeSingleton();
  314. }
  315. } dummy;
  316. DefaultMemoryPool::allocateSingleton(allocAligned, nullptr);
  317. ShaderCompilerMemoryPool::allocateSingleton(allocAligned, nullptr);
  318. String filename;
  319. Bool dumpStats;
  320. Bool dumpBinary;
  321. Bool glsl;
  322. Bool spirv;
  323. if(parseCommandLineArgs(WeakArray<char*>(argv, argc), dumpStats, dumpBinary, glsl, spirv, filename))
  324. {
  325. ANKI_LOGE(kUsage, argv[0]);
  326. return 1;
  327. }
  328. const Error err = dump(filename, dumpStats, dumpBinary, glsl, spirv);
  329. if(err)
  330. {
  331. ANKI_LOGE("Can't dump due to an error. Bye");
  332. return 1;
  333. }
  334. return 0;
  335. }