MachineCombiner.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // The machine combiner pass uses machine trace metrics to ensure the combined
  11. // instructions does not lengthen the critical path or the resource depth.
  12. //===----------------------------------------------------------------------===//
  13. #define DEBUG_TYPE "machine-combiner"
  14. // //
  15. ///////////////////////////////////////////////////////////////////////////////
  16. #include "llvm/ADT/Statistic.h"
  17. #include "llvm/ADT/DenseMap.h"
  18. #include "llvm/CodeGen/MachineDominators.h"
  19. #include "llvm/CodeGen/MachineFunction.h"
  20. #include "llvm/CodeGen/MachineFunctionPass.h"
  21. #include "llvm/CodeGen/MachineInstrBuilder.h"
  22. #include "llvm/CodeGen/MachineLoopInfo.h"
  23. #include "llvm/CodeGen/MachineRegisterInfo.h"
  24. #include "llvm/CodeGen/MachineTraceMetrics.h"
  25. #include "llvm/CodeGen/Passes.h"
  26. #include "llvm/CodeGen/TargetSchedule.h"
  27. #include "llvm/Support/CommandLine.h"
  28. #include "llvm/Support/Debug.h"
  29. #include "llvm/Support/raw_ostream.h"
  30. #include "llvm/Target/TargetInstrInfo.h"
  31. #include "llvm/Target/TargetRegisterInfo.h"
  32. #include "llvm/Target/TargetSubtargetInfo.h"
  33. using namespace llvm;
  34. STATISTIC(NumInstCombined, "Number of machineinst combined");
  35. namespace {
  36. class MachineCombiner : public MachineFunctionPass {
  37. const TargetInstrInfo *TII;
  38. const TargetRegisterInfo *TRI;
  39. MCSchedModel SchedModel;
  40. MachineRegisterInfo *MRI;
  41. MachineTraceMetrics *Traces;
  42. MachineTraceMetrics::Ensemble *MinInstr;
  43. TargetSchedModel TSchedModel;
  44. /// True if optimizing for code size.
  45. bool OptSize;
  46. public:
  47. static char ID;
  48. MachineCombiner() : MachineFunctionPass(ID) {
  49. initializeMachineCombinerPass(*PassRegistry::getPassRegistry());
  50. }
  51. void getAnalysisUsage(AnalysisUsage &AU) const override;
  52. bool runOnMachineFunction(MachineFunction &MF) override;
  53. const char *getPassName() const override { return "Machine InstCombiner"; }
  54. private:
  55. bool doSubstitute(unsigned NewSize, unsigned OldSize);
  56. bool combineInstructions(MachineBasicBlock *);
  57. MachineInstr *getOperandDef(const MachineOperand &MO);
  58. unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
  59. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
  60. MachineTraceMetrics::Trace BlockTrace);
  61. unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,
  62. MachineTraceMetrics::Trace BlockTrace);
  63. bool
  64. improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,
  65. MachineTraceMetrics::Trace BlockTrace,
  66. SmallVectorImpl<MachineInstr *> &InsInstrs,
  67. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
  68. bool NewCodeHasLessInsts);
  69. bool preservesResourceLen(MachineBasicBlock *MBB,
  70. MachineTraceMetrics::Trace BlockTrace,
  71. SmallVectorImpl<MachineInstr *> &InsInstrs,
  72. SmallVectorImpl<MachineInstr *> &DelInstrs);
  73. void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
  74. SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
  75. };
  76. }
  77. char MachineCombiner::ID = 0;
  78. char &llvm::MachineCombinerID = MachineCombiner::ID;
  79. INITIALIZE_PASS_BEGIN(MachineCombiner, "machine-combiner",
  80. "Machine InstCombiner", false, false)
  81. INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
  82. INITIALIZE_PASS_END(MachineCombiner, "machine-combiner", "Machine InstCombiner",
  83. false, false)
  84. void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
  85. AU.setPreservesCFG();
  86. AU.addPreserved<MachineDominatorTree>();
  87. AU.addPreserved<MachineLoopInfo>();
  88. AU.addRequired<MachineTraceMetrics>();
  89. AU.addPreserved<MachineTraceMetrics>();
  90. MachineFunctionPass::getAnalysisUsage(AU);
  91. }
  92. MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) {
  93. MachineInstr *DefInstr = nullptr;
  94. // We need a virtual register definition.
  95. if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
  96. DefInstr = MRI->getUniqueVRegDef(MO.getReg());
  97. // PHI's have no depth etc.
  98. if (DefInstr && DefInstr->isPHI())
  99. DefInstr = nullptr;
  100. return DefInstr;
  101. }
  102. /// Computes depth of instructions in vector \InsInstr.
  103. ///
  104. /// \param InsInstrs is a vector of machine instructions
  105. /// \param InstrIdxForVirtReg is a dense map of virtual register to index
  106. /// of defining machine instruction in \p InsInstrs
  107. /// \param BlockTrace is a trace of machine instructions
  108. ///
  109. /// \returns Depth of last instruction in \InsInstrs ("NewRoot")
  110. unsigned
  111. MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
  112. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
  113. MachineTraceMetrics::Trace BlockTrace) {
  114. SmallVector<unsigned, 16> InstrDepth;
  115. assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
  116. // For each instruction in the new sequence compute the depth based on the
  117. // operands. Use the trace information when possible. For new operands which
  118. // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
  119. for (auto *InstrPtr : InsInstrs) { // for each Use
  120. unsigned IDepth = 0;
  121. DEBUG(dbgs() << "NEW INSTR "; InstrPtr->dump(); dbgs() << "\n";);
  122. for (const MachineOperand &MO : InstrPtr->operands()) {
  123. // Check for virtual register operand.
  124. if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
  125. continue;
  126. if (!MO.isUse())
  127. continue;
  128. unsigned DepthOp = 0;
  129. unsigned LatencyOp = 0;
  130. DenseMap<unsigned, unsigned>::iterator II =
  131. InstrIdxForVirtReg.find(MO.getReg());
  132. if (II != InstrIdxForVirtReg.end()) {
  133. // Operand is new virtual register not in trace
  134. assert(II->second < InstrDepth.size() && "Bad Index");
  135. MachineInstr *DefInstr = InsInstrs[II->second];
  136. assert(DefInstr &&
  137. "There must be a definition for a new virtual register");
  138. DepthOp = InstrDepth[II->second];
  139. LatencyOp = TSchedModel.computeOperandLatency(
  140. DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
  141. InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
  142. } else {
  143. MachineInstr *DefInstr = getOperandDef(MO);
  144. if (DefInstr) {
  145. DepthOp = BlockTrace.getInstrCycles(DefInstr).Depth;
  146. LatencyOp = TSchedModel.computeOperandLatency(
  147. DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
  148. InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
  149. }
  150. }
  151. IDepth = std::max(IDepth, DepthOp + LatencyOp);
  152. }
  153. InstrDepth.push_back(IDepth);
  154. }
  155. unsigned NewRootIdx = InsInstrs.size() - 1;
  156. return InstrDepth[NewRootIdx];
  157. }
  158. /// Computes instruction latency as max of latency of defined operands.
  159. ///
  160. /// \param Root is a machine instruction that could be replaced by NewRoot.
  161. /// It is used to compute a more accurate latency information for NewRoot in
  162. /// case there is a dependent instruction in the same trace (\p BlockTrace)
  163. /// \param NewRoot is the instruction for which the latency is computed
  164. /// \param BlockTrace is a trace of machine instructions
  165. ///
  166. /// \returns Latency of \p NewRoot
  167. unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
  168. MachineTraceMetrics::Trace BlockTrace) {
  169. assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
  170. // Check each definition in NewRoot and compute the latency
  171. unsigned NewRootLatency = 0;
  172. for (const MachineOperand &MO : NewRoot->operands()) {
  173. // Check for virtual register operand.
  174. if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
  175. continue;
  176. if (!MO.isDef())
  177. continue;
  178. // Get the first instruction that uses MO
  179. MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg());
  180. RI++;
  181. MachineInstr *UseMO = RI->getParent();
  182. unsigned LatencyOp = 0;
  183. if (UseMO && BlockTrace.isDepInTrace(Root, UseMO)) {
  184. LatencyOp = TSchedModel.computeOperandLatency(
  185. NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
  186. UseMO->findRegisterUseOperandIdx(MO.getReg()));
  187. } else {
  188. LatencyOp = TSchedModel.computeInstrLatency(NewRoot->getOpcode());
  189. }
  190. NewRootLatency = std::max(NewRootLatency, LatencyOp);
  191. }
  192. return NewRootLatency;
  193. }
  194. /// True when the new instruction sequence does not lengthen the critical path
  195. /// and the new sequence has less instructions or the new sequence improves the
  196. /// critical path.
  197. /// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
  198. /// The new code sequence ends in MI NewRoot. A necessary condition for the new
  199. /// sequence to replace the old sequence is that it cannot lengthen the critical
  200. /// path. This is decided by the formula:
  201. /// (NewRootDepth + NewRootLatency) <= (RootDepth + RootLatency + RootSlack)).
  202. /// If the new sequence has an equal length critical path but does not reduce
  203. /// the number of instructions (NewCodeHasLessInsts is false), then it is not
  204. /// considered an improvement. The slack is the number of cycles Root can be
  205. /// delayed before the critical patch becomes longer.
  206. bool MachineCombiner::improvesCriticalPathLen(
  207. MachineBasicBlock *MBB, MachineInstr *Root,
  208. MachineTraceMetrics::Trace BlockTrace,
  209. SmallVectorImpl<MachineInstr *> &InsInstrs,
  210. DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
  211. bool NewCodeHasLessInsts) {
  212. assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
  213. // NewRoot is the last instruction in the \p InsInstrs vector.
  214. // Get depth and latency of NewRoot.
  215. unsigned NewRootIdx = InsInstrs.size() - 1;
  216. MachineInstr *NewRoot = InsInstrs[NewRootIdx];
  217. unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
  218. unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
  219. // Get depth, latency and slack of Root.
  220. unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
  221. unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
  222. unsigned RootSlack = BlockTrace.getInstrSlack(Root);
  223. DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n";
  224. dbgs() << " NewRootDepth: " << NewRootDepth
  225. << " NewRootLatency: " << NewRootLatency << "\n";
  226. dbgs() << " RootDepth: " << RootDepth << " RootLatency: " << RootLatency
  227. << " RootSlack: " << RootSlack << "\n";
  228. dbgs() << " NewRootDepth + NewRootLatency "
  229. << NewRootDepth + NewRootLatency << "\n";
  230. dbgs() << " RootDepth + RootLatency + RootSlack "
  231. << RootDepth + RootLatency + RootSlack << "\n";);
  232. unsigned NewCycleCount = NewRootDepth + NewRootLatency;
  233. unsigned OldCycleCount = RootDepth + RootLatency + RootSlack;
  234. if (NewCodeHasLessInsts)
  235. return NewCycleCount <= OldCycleCount;
  236. else
  237. return NewCycleCount < OldCycleCount;
  238. }
  239. /// helper routine to convert instructions into SC
  240. void MachineCombiner::instr2instrSC(
  241. SmallVectorImpl<MachineInstr *> &Instrs,
  242. SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {
  243. for (auto *InstrPtr : Instrs) {
  244. unsigned Opc = InstrPtr->getOpcode();
  245. unsigned Idx = TII->get(Opc).getSchedClass();
  246. const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx);
  247. InstrsSC.push_back(SC);
  248. }
  249. }
  250. /// True when the new instructions do not increase resource length
  251. bool MachineCombiner::preservesResourceLen(
  252. MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace,
  253. SmallVectorImpl<MachineInstr *> &InsInstrs,
  254. SmallVectorImpl<MachineInstr *> &DelInstrs) {
  255. // Compute current resource length
  256. //ArrayRef<const MachineBasicBlock *> MBBarr(MBB);
  257. SmallVector <const MachineBasicBlock *, 1> MBBarr;
  258. MBBarr.push_back(MBB);
  259. unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr);
  260. // Deal with SC rather than Instructions.
  261. SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC;
  262. SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC;
  263. instr2instrSC(InsInstrs, InsInstrsSC);
  264. instr2instrSC(DelInstrs, DelInstrsSC);
  265. ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
  266. ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
  267. // Compute new resource length.
  268. unsigned ResLenAfterCombine =
  269. BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
  270. DEBUG(dbgs() << "RESOURCE DATA: \n";
  271. dbgs() << " resource len before: " << ResLenBeforeCombine
  272. << " after: " << ResLenAfterCombine << "\n";);
  273. return ResLenAfterCombine <= ResLenBeforeCombine;
  274. }
  275. /// \returns true when new instruction sequence should be generated
  276. /// independent if it lengthens critical path or not
  277. bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) {
  278. if (OptSize && (NewSize < OldSize))
  279. return true;
  280. if (!TSchedModel.hasInstrSchedModel())
  281. return true;
  282. return false;
  283. }
  284. /// Substitute a slow code sequence with a faster one by
  285. /// evaluating instruction combining pattern.
  286. /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction
  287. /// combining based on machine trace metrics. Only combine a sequence of
  288. /// instructions when this neither lengthens the critical path nor increases
  289. /// resource pressure. When optimizing for codesize always combine when the new
  290. /// sequence is shorter.
  291. bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
  292. bool Changed = false;
  293. DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
  294. auto BlockIter = MBB->begin();
  295. while (BlockIter != MBB->end()) {
  296. auto &MI = *BlockIter++;
  297. DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
  298. SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Patterns;
  299. // The motivating example is:
  300. //
  301. // MUL Other MUL_op1 MUL_op2 Other
  302. // \ / \ | /
  303. // ADD/SUB => MADD/MSUB
  304. // (=Root) (=NewRoot)
  305. // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is
  306. // usually beneficial for code size it unfortunately can hurt performance
  307. // when the ADD is on the critical path, but the MUL is not. With the
  308. // substitution the MUL becomes part of the critical path (in form of the
  309. // MADD) and can lengthen it on architectures where the MADD latency is
  310. // longer than the ADD latency.
  311. //
  312. // For each instruction we check if it can be the root of a combiner
  313. // pattern. Then for each pattern the new code sequence in form of MI is
  314. // generated and evaluated. When the efficiency criteria (don't lengthen
  315. // critical path, don't use more resources) is met the new sequence gets
  316. // hooked up into the basic block before the old sequence is removed.
  317. //
  318. // The algorithm does not try to evaluate all patterns and pick the best.
  319. // This is only an artificial restriction though. In practice there is
  320. // mostly one pattern, and getMachineCombinerPatterns() can order patterns
  321. // based on an internal cost heuristic.
  322. if (TII->getMachineCombinerPatterns(MI, Patterns)) {
  323. for (auto P : Patterns) {
  324. SmallVector<MachineInstr *, 16> InsInstrs;
  325. SmallVector<MachineInstr *, 16> DelInstrs;
  326. DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
  327. if (!MinInstr)
  328. MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
  329. MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB);
  330. Traces->verifyAnalysis();
  331. TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs,
  332. InstrIdxForVirtReg);
  333. unsigned NewInstCount = InsInstrs.size();
  334. unsigned OldInstCount = DelInstrs.size();
  335. // Found pattern, but did not generate alternative sequence.
  336. // This can happen e.g. when an immediate could not be materialized
  337. // in a single instruction.
  338. if (!NewInstCount)
  339. continue;
  340. // Substitute when we optimize for codesize and the new sequence has
  341. // fewer instructions OR
  342. // the new sequence neither lengthens the critical path nor increases
  343. // resource pressure.
  344. if (doSubstitute(NewInstCount, OldInstCount) ||
  345. (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs,
  346. InstrIdxForVirtReg,
  347. NewInstCount < OldInstCount) &&
  348. preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) {
  349. for (auto *InstrPtr : InsInstrs)
  350. MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr);
  351. for (auto *InstrPtr : DelInstrs)
  352. InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval();
  353. Changed = true;
  354. ++NumInstCombined;
  355. Traces->invalidate(MBB);
  356. Traces->verifyAnalysis();
  357. // Eagerly stop after the first pattern fires.
  358. break;
  359. } else {
  360. // Cleanup instructions of the alternative code sequence. There is no
  361. // use for them.
  362. MachineFunction *MF = MBB->getParent();
  363. for (auto *InstrPtr : InsInstrs)
  364. MF->DeleteMachineInstr(InstrPtr);
  365. }
  366. InstrIdxForVirtReg.clear();
  367. }
  368. }
  369. }
  370. return Changed;
  371. }
  372. bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
  373. const TargetSubtargetInfo &STI = MF.getSubtarget();
  374. TII = STI.getInstrInfo();
  375. TRI = STI.getRegisterInfo();
  376. SchedModel = STI.getSchedModel();
  377. TSchedModel.init(SchedModel, &STI, TII);
  378. MRI = &MF.getRegInfo();
  379. Traces = &getAnalysis<MachineTraceMetrics>();
  380. MinInstr = 0;
  381. OptSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
  382. DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
  383. if (!TII->useMachineCombiner()) {
  384. DEBUG(dbgs() << " Skipping pass: Target does not support machine combiner\n");
  385. return false;
  386. }
  387. bool Changed = false;
  388. // Try to combine instructions.
  389. for (auto &MBB : MF)
  390. Changed |= combineInstructions(&MBB);
  391. return Changed;
  392. }