ShaderProgramBinaryDumpMain.cpp 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. // Copyright (C) 2009-2022, Panagiotis Christopoulos Charitos and contributors.
  2. // All rights reserved.
  3. // Code licensed under the BSD License.
  4. // http://www.anki3d.org/LICENSE
  5. #include <AnKi/ShaderCompiler/ShaderProgramCompiler.h>
  6. #include <AnKi/ShaderCompiler/MaliOfflineCompiler.h>
  7. #include <AnKi/ShaderCompiler/RadeonGpuAnalyzer.h>
  8. #include <AnKi/Util/ThreadHive.h>
  9. #include <AnKi/Util/System.h>
  10. using namespace anki;
  11. static const char* kUsage = R"(Dump the shader binary to stdout
  12. Usage: %s [options] input_shader_program_binary
  13. Options:
  14. -stats : Print performance statistics for all shaders. By default it doesn't
  15. )";
  16. static Error parseCommandLineArgs(WeakArray<char*> argv, Bool& dumpStats, StringRaii& filename)
  17. {
  18. // Parse config
  19. if(argv.getSize() < 2)
  20. {
  21. return Error::kUserData;
  22. }
  23. dumpStats = false;
  24. filename = argv[argv.getSize() - 1];
  25. for(U32 i = 1; i < argv.getSize() - 1; i++)
  26. {
  27. if(strcmp(argv[i], "-stats") == 0)
  28. {
  29. dumpStats = true;
  30. }
  31. }
  32. return Error::kNone;
  33. }
  34. Error dumpStats(const ShaderProgramBinary& bin)
  35. {
  36. HeapMemoryPool pool(allocAligned, nullptr);
  37. printf("\nOffline compilers stats:\n");
  38. fflush(stdout);
  39. class Stats
  40. {
  41. public:
  42. class
  43. {
  44. public:
  45. F64 m_fma;
  46. F64 m_cvt;
  47. F64 m_sfu;
  48. F64 m_loadStore;
  49. F64 m_varying;
  50. F64 m_texture;
  51. F64 m_workRegisters;
  52. F64 m_fp16ArithmeticPercentage;
  53. } m_arm;
  54. class
  55. {
  56. public:
  57. F64 m_vgprCount;
  58. F64 m_sgprCount;
  59. F64 m_isaSize;
  60. } m_amd;
  61. Stats(F64 v)
  62. {
  63. m_arm.m_fma = m_arm.m_cvt = m_arm.m_sfu = m_arm.m_loadStore = m_arm.m_varying = m_arm.m_texture =
  64. m_arm.m_workRegisters = m_arm.m_fp16ArithmeticPercentage = v;
  65. m_amd.m_vgprCount = m_amd.m_sgprCount = m_amd.m_isaSize = v;
  66. }
  67. };
  68. class StageStats
  69. {
  70. public:
  71. Stats m_avgStats{0.0};
  72. Stats m_maxStats{-1.0};
  73. Stats m_minStats{kMaxF64};
  74. U32 m_spillingCount = 0;
  75. U32 m_count = 0;
  76. };
  77. class Ctx
  78. {
  79. public:
  80. Array<StageStats, U32(ShaderType::kCount)> m_allStats;
  81. Mutex m_allStatsMtx;
  82. Atomic<U32> m_variantCount = {0};
  83. HeapMemoryPool* m_pool = nullptr;
  84. const ShaderProgramBinary* m_bin = nullptr;
  85. Atomic<I32> m_error = {0};
  86. };
  87. Ctx ctx;
  88. ctx.m_pool = &pool;
  89. ctx.m_bin = &bin;
  90. ThreadHive hive(8, &pool);
  91. ThreadHiveTaskCallback callback = [](void* userData, [[maybe_unused]] U32 threadId,
  92. [[maybe_unused]] ThreadHive& hive,
  93. [[maybe_unused]] ThreadHiveSemaphore* signalSemaphore) {
  94. Ctx& ctx = *static_cast<Ctx*>(userData);
  95. U32 variantIdx;
  96. while((variantIdx = ctx.m_variantCount.fetchAdd(1)) < ctx.m_bin->m_variants.getSize()
  97. && ctx.m_error.load() == 0)
  98. {
  99. const ShaderProgramBinaryVariant& variant = ctx.m_bin->m_variants[variantIdx];
  100. for(ShaderType shaderType : EnumIterable<ShaderType>())
  101. {
  102. if(variant.m_codeBlockIndices[shaderType] == kMaxU32)
  103. {
  104. continue;
  105. }
  106. const ShaderProgramBinaryCodeBlock& codeBlock =
  107. ctx.m_bin->m_codeBlocks[variant.m_codeBlockIndices[shaderType]];
  108. // Arm stats
  109. MaliOfflineCompilerOut maliocOut;
  110. Error err = runMaliOfflineCompiler(
  111. #if ANKI_OS_LINUX
  112. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/MaliOfflineCompiler/malioc",
  113. #elif ANKI_OS_WINDOWS
  114. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/MaliOfflineCompiler/malioc.exe",
  115. #else
  116. # error "Not supported"
  117. #endif
  118. codeBlock.m_binary, shaderType, *ctx.m_pool, maliocOut);
  119. if(err)
  120. {
  121. ANKI_LOGE("Mali offline compiler failed");
  122. ctx.m_error.store(1);
  123. break;
  124. }
  125. // AMD
  126. RgaOutput rgaOut;
  127. err = runRadeonGpuAnalyzer(
  128. #if ANKI_OS_LINUX
  129. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Linux64/RadeonGpuAnalyzer/rga",
  130. #elif ANKI_OS_WINDOWS
  131. ANKI_SOURCE_DIRECTORY "/ThirdParty/Bin/Windows64/RadeonGpuAnalyzer/rga.exe",
  132. #else
  133. # error "Not supported"
  134. #endif
  135. codeBlock.m_binary, shaderType, *ctx.m_pool, rgaOut);
  136. if(err)
  137. {
  138. ANKI_LOGE("Radeon GPU Analyzer compiler failed");
  139. ctx.m_error.store(1);
  140. break;
  141. }
  142. // Appends stats
  143. LockGuard lock(ctx.m_allStatsMtx);
  144. StageStats& stage = ctx.m_allStats[shaderType];
  145. if(maliocOut.m_spilling)
  146. {
  147. ++stage.m_spillingCount;
  148. }
  149. ++stage.m_count;
  150. stage.m_avgStats.m_arm.m_fma += maliocOut.m_fma;
  151. stage.m_avgStats.m_arm.m_cvt += maliocOut.m_cvt;
  152. stage.m_avgStats.m_arm.m_sfu += maliocOut.m_sfu;
  153. stage.m_avgStats.m_arm.m_loadStore += maliocOut.m_loadStore;
  154. stage.m_avgStats.m_arm.m_varying += maliocOut.m_varying;
  155. stage.m_avgStats.m_arm.m_texture += maliocOut.m_texture;
  156. stage.m_avgStats.m_arm.m_workRegisters += maliocOut.m_workRegisters;
  157. stage.m_avgStats.m_arm.m_fp16ArithmeticPercentage += maliocOut.m_fp16ArithmeticPercentage;
  158. stage.m_maxStats.m_arm.m_fma = max<F64>(stage.m_maxStats.m_arm.m_fma, maliocOut.m_fma);
  159. stage.m_maxStats.m_arm.m_cvt = max<F64>(stage.m_maxStats.m_arm.m_cvt, maliocOut.m_cvt);
  160. stage.m_maxStats.m_arm.m_sfu = max<F64>(stage.m_maxStats.m_arm.m_sfu, maliocOut.m_sfu);
  161. stage.m_maxStats.m_arm.m_loadStore =
  162. max<F64>(stage.m_maxStats.m_arm.m_loadStore, maliocOut.m_loadStore);
  163. stage.m_maxStats.m_arm.m_varying = max<F64>(stage.m_maxStats.m_arm.m_varying, maliocOut.m_varying);
  164. stage.m_maxStats.m_arm.m_texture = max<F64>(stage.m_maxStats.m_arm.m_texture, maliocOut.m_texture);
  165. stage.m_maxStats.m_arm.m_workRegisters =
  166. max<F64>(stage.m_maxStats.m_arm.m_workRegisters, maliocOut.m_workRegisters);
  167. stage.m_maxStats.m_arm.m_fp16ArithmeticPercentage =
  168. max<F64>(stage.m_maxStats.m_arm.m_fp16ArithmeticPercentage, maliocOut.m_fp16ArithmeticPercentage);
  169. stage.m_minStats.m_arm.m_fma = min<F64>(stage.m_minStats.m_arm.m_fma, maliocOut.m_fma);
  170. stage.m_minStats.m_arm.m_cvt = min<F64>(stage.m_minStats.m_arm.m_cvt, maliocOut.m_cvt);
  171. stage.m_minStats.m_arm.m_sfu = min<F64>(stage.m_minStats.m_arm.m_sfu, maliocOut.m_sfu);
  172. stage.m_minStats.m_arm.m_loadStore =
  173. min<F64>(stage.m_minStats.m_arm.m_loadStore, maliocOut.m_loadStore);
  174. stage.m_minStats.m_arm.m_varying = min<F64>(stage.m_minStats.m_arm.m_varying, maliocOut.m_varying);
  175. stage.m_minStats.m_arm.m_texture = min<F64>(stage.m_minStats.m_arm.m_texture, maliocOut.m_texture);
  176. stage.m_minStats.m_arm.m_workRegisters =
  177. min<F64>(stage.m_minStats.m_arm.m_workRegisters, maliocOut.m_workRegisters);
  178. stage.m_minStats.m_arm.m_fp16ArithmeticPercentage =
  179. min<F64>(stage.m_minStats.m_arm.m_fp16ArithmeticPercentage, maliocOut.m_fp16ArithmeticPercentage);
  180. stage.m_avgStats.m_amd.m_vgprCount += F64(rgaOut.m_vgprCount);
  181. stage.m_avgStats.m_amd.m_sgprCount += F64(rgaOut.m_sgprCount);
  182. stage.m_avgStats.m_amd.m_isaSize += F64(rgaOut.m_isaSize);
  183. stage.m_minStats.m_amd.m_vgprCount = min(stage.m_minStats.m_amd.m_vgprCount, F64(rgaOut.m_vgprCount));
  184. stage.m_minStats.m_amd.m_sgprCount = min(stage.m_minStats.m_amd.m_sgprCount, F64(rgaOut.m_sgprCount));
  185. stage.m_minStats.m_amd.m_isaSize = min(stage.m_minStats.m_amd.m_isaSize, F64(rgaOut.m_isaSize));
  186. stage.m_maxStats.m_amd.m_vgprCount = max(stage.m_maxStats.m_amd.m_vgprCount, F64(rgaOut.m_vgprCount));
  187. stage.m_maxStats.m_amd.m_sgprCount = max(stage.m_maxStats.m_amd.m_sgprCount, F64(rgaOut.m_sgprCount));
  188. stage.m_maxStats.m_amd.m_isaSize = max(stage.m_maxStats.m_amd.m_isaSize, F64(rgaOut.m_isaSize));
  189. }
  190. if(variantIdx > 0 && ((variantIdx + 1) % 32) == 0)
  191. {
  192. printf("Processed %u out of %u variants\n", variantIdx + 1, ctx.m_bin->m_variants.getSize());
  193. }
  194. } // while
  195. };
  196. for(U32 i = 0; i < hive.getThreadCount(); ++i)
  197. {
  198. hive.submitTask(callback, &ctx);
  199. }
  200. hive.waitAllTasks();
  201. if(ctx.m_error.load() != 0)
  202. {
  203. return Error::kFunctionFailed;
  204. }
  205. for(ShaderType shaderType : EnumIterable<ShaderType>())
  206. {
  207. const StageStats& stage = ctx.m_allStats[shaderType];
  208. if(stage.m_count == 0)
  209. {
  210. continue;
  211. }
  212. printf("Stage %u\n", U32(shaderType));
  213. printf(" Arm shaders spilling regs %u\n", stage.m_spillingCount);
  214. const F64 countf = F64(stage.m_count);
  215. const Stats& avg = stage.m_avgStats;
  216. printf(" Average:\n");
  217. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n",
  218. avg.m_arm.m_workRegisters / countf, avg.m_arm.m_fma / countf, avg.m_arm.m_cvt / countf,
  219. avg.m_arm.m_sfu / countf, avg.m_arm.m_loadStore / countf, avg.m_arm.m_varying / countf,
  220. avg.m_arm.m_texture / countf, avg.m_arm.m_fp16ArithmeticPercentage / countf);
  221. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", avg.m_amd.m_vgprCount / countf, avg.m_amd.m_sgprCount / countf,
  222. avg.m_amd.m_isaSize / countf);
  223. const Stats& maxs = stage.m_maxStats;
  224. printf(" Max:\n");
  225. printf(" Arm: Regs %f FMA %f CVT %f SFU %f LS %f VAR %f TEX %f FP16 %f%%\n", maxs.m_arm.m_workRegisters,
  226. maxs.m_arm.m_fma, maxs.m_arm.m_cvt, maxs.m_arm.m_sfu, maxs.m_arm.m_loadStore, maxs.m_arm.m_varying,
  227. maxs.m_arm.m_texture, maxs.m_arm.m_fp16ArithmeticPercentage);
  228. printf(" AMD: VGPR %f SGPR %f ISA size %f\n", maxs.m_amd.m_vgprCount, maxs.m_amd.m_sgprCount,
  229. maxs.m_amd.m_isaSize);
  230. }
  231. return Error::kNone;
  232. }
  233. Error dump(CString fname, Bool bDumpStats)
  234. {
  235. HeapMemoryPool pool(allocAligned, nullptr);
  236. ShaderProgramBinaryWrapper binw(&pool);
  237. ANKI_CHECK(binw.deserializeFromFile(fname));
  238. StringRaii txt(&pool);
  239. dumpShaderProgramBinary(binw.getBinary(), txt);
  240. printf("%s\n", txt.cstr());
  241. if(bDumpStats)
  242. {
  243. ANKI_CHECK(dumpStats(binw.getBinary()));
  244. }
  245. return Error::kNone;
  246. }
  247. int main(int argc, char** argv)
  248. {
  249. HeapMemoryPool pool(allocAligned, nullptr);
  250. StringRaii filename(&pool);
  251. Bool dumpStats;
  252. if(parseCommandLineArgs(WeakArray<char*>(argv, argc), dumpStats, filename))
  253. {
  254. ANKI_LOGE(kUsage, argv[0]);
  255. return 1;
  256. }
  257. const Error err = dump(filename, dumpStats);
  258. if(err)
  259. {
  260. ANKI_LOGE("Can't dump due to an error. Bye");
  261. return 1;
  262. }
  263. return 0;
  264. }