CodeGenSchedule.cpp 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784
  1. //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file defines structures to encapsulate the machine model as described in
  11. // the target description.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "CodeGenSchedule.h"
  15. #include "CodeGenTarget.h"
  16. #include "llvm/ADT/STLExtras.h"
  17. #include "llvm/Support/Debug.h"
  18. #include "llvm/Support/Regex.h"
  19. #include "llvm/TableGen/Error.h"
  20. using namespace llvm;
  21. #define DEBUG_TYPE "subtarget-emitter"
  22. #ifndef NDEBUG
  23. static void dumpIdxVec(const IdxVec &V) {
  24. for (unsigned i = 0, e = V.size(); i < e; ++i) {
  25. dbgs() << V[i] << ", ";
  26. }
  27. }
  28. static void dumpIdxVec(const SmallVectorImpl<unsigned> &V) {
  29. for (unsigned i = 0, e = V.size(); i < e; ++i) {
  30. dbgs() << V[i] << ", ";
  31. }
  32. }
  33. #endif
  34. namespace {
  35. // (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
  36. struct InstrsOp : public SetTheory::Operator {
  37. void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
  38. ArrayRef<SMLoc> Loc) override {
  39. ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
  40. }
  41. };
  42. // (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
  43. //
  44. // TODO: Since this is a prefix match, perform a binary search over the
  45. // instruction names using lower_bound. Note that the predefined instrs must be
  46. // scanned linearly first. However, this is only safe if the regex pattern has
  47. // no top-level bars. The DAG already has a list of patterns, so there's no
  48. // reason to use top-level bars, but we need a way to verify they don't exist
  49. // before implementing the optimization.
  50. struct InstRegexOp : public SetTheory::Operator {
  51. const CodeGenTarget &Target;
  52. InstRegexOp(const CodeGenTarget &t): Target(t) {}
  53. void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
  54. ArrayRef<SMLoc> Loc) override {
  55. SmallVector<Regex, 4> RegexList;
  56. for (DagInit::const_arg_iterator
  57. AI = Expr->arg_begin(), AE = Expr->arg_end(); AI != AE; ++AI) {
  58. StringInit *SI = dyn_cast<StringInit>(*AI);
  59. if (!SI)
  60. PrintFatalError(Loc, "instregex requires pattern string: "
  61. + Expr->getAsString());
  62. std::string pat = SI->getValue();
  63. // Implement a python-style prefix match.
  64. if (pat[0] != '^') {
  65. pat.insert(0, "^(");
  66. pat.insert(pat.end(), ')');
  67. }
  68. RegexList.push_back(Regex(pat));
  69. }
  70. for (const CodeGenInstruction *Inst : Target.instructions()) {
  71. for (auto &R : RegexList) {
  72. if (R.match(Inst->TheDef->getName()))
  73. Elts.insert(Inst->TheDef);
  74. }
  75. }
  76. }
  77. };
  78. } // end anonymous namespace
  79. /// CodeGenModels ctor interprets machine model records and populates maps.
  80. CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
  81. const CodeGenTarget &TGT):
  82. Records(RK), Target(TGT) {
  83. Sets.addFieldExpander("InstRW", "Instrs");
  84. // Allow Set evaluation to recognize the dags used in InstRW records:
  85. // (instrs Op1, Op1...)
  86. Sets.addOperator("instrs", llvm::make_unique<InstrsOp>());
  87. Sets.addOperator("instregex", llvm::make_unique<InstRegexOp>(Target));
  88. // Instantiate a CodeGenProcModel for each SchedMachineModel with the values
  89. // that are explicitly referenced in tablegen records. Resources associated
  90. // with each processor will be derived later. Populate ProcModelMap with the
  91. // CodeGenProcModel instances.
  92. collectProcModels();
  93. // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
  94. // defined, and populate SchedReads and SchedWrites vectors. Implicit
  95. // SchedReadWrites that represent sequences derived from expanded variant will
  96. // be inferred later.
  97. collectSchedRW();
  98. // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
  99. // required by an instruction definition, and populate SchedClassIdxMap. Set
  100. // NumItineraryClasses to the number of explicit itinerary classes referenced
  101. // by instructions. Set NumInstrSchedClasses to the number of itinerary
  102. // classes plus any classes implied by instructions that derive from class
  103. // Sched and provide SchedRW list. This does not infer any new classes from
  104. // SchedVariant.
  105. collectSchedClasses();
  106. // Find instruction itineraries for each processor. Sort and populate
  107. // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
  108. // all itinerary classes to be discovered.
  109. collectProcItins();
  110. // Find ItinRW records for each processor and itinerary class.
  111. // (For per-operand resources mapped to itinerary classes).
  112. collectProcItinRW();
  113. // Infer new SchedClasses from SchedVariant.
  114. inferSchedClasses();
  115. // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
  116. // ProcResourceDefs.
  117. collectProcResources();
  118. }
  119. /// Gather all processor models.
  120. void CodeGenSchedModels::collectProcModels() {
  121. RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
  122. std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName());
  123. // Reserve space because we can. Reallocation would be ok.
  124. ProcModels.reserve(ProcRecords.size()+1);
  125. // Use idx=0 for NoModel/NoItineraries.
  126. Record *NoModelDef = Records.getDef("NoSchedModel");
  127. Record *NoItinsDef = Records.getDef("NoItineraries");
  128. ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef);
  129. ProcModelMap[NoModelDef] = 0;
  130. // For each processor, find a unique machine model.
  131. for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i)
  132. addProcModel(ProcRecords[i]);
  133. }
  134. /// Get a unique processor model based on the defined MachineModel and
  135. /// ProcessorItineraries.
  136. void CodeGenSchedModels::addProcModel(Record *ProcDef) {
  137. Record *ModelKey = getModelOrItinDef(ProcDef);
  138. if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
  139. return;
  140. std::string Name = ModelKey->getName();
  141. if (ModelKey->isSubClassOf("SchedMachineModel")) {
  142. Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
  143. ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef);
  144. }
  145. else {
  146. // An itinerary is defined without a machine model. Infer a new model.
  147. if (!ModelKey->getValueAsListOfDefs("IID").empty())
  148. Name = Name + "Model";
  149. ProcModels.emplace_back(ProcModels.size(), Name,
  150. ProcDef->getValueAsDef("SchedModel"), ModelKey);
  151. }
  152. DEBUG(ProcModels.back().dump());
  153. }
  154. // Recursively find all reachable SchedReadWrite records.
  155. static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
  156. SmallPtrSet<Record*, 16> &RWSet) {
  157. if (!RWSet.insert(RWDef).second)
  158. return;
  159. RWDefs.push_back(RWDef);
  160. // Reads don't current have sequence records, but it can be added later.
  161. if (RWDef->isSubClassOf("WriteSequence")) {
  162. RecVec Seq = RWDef->getValueAsListOfDefs("Writes");
  163. for (RecIter I = Seq.begin(), E = Seq.end(); I != E; ++I)
  164. scanSchedRW(*I, RWDefs, RWSet);
  165. }
  166. else if (RWDef->isSubClassOf("SchedVariant")) {
  167. // Visit each variant (guarded by a different predicate).
  168. RecVec Vars = RWDef->getValueAsListOfDefs("Variants");
  169. for (RecIter VI = Vars.begin(), VE = Vars.end(); VI != VE; ++VI) {
  170. // Visit each RW in the sequence selected by the current variant.
  171. RecVec Selected = (*VI)->getValueAsListOfDefs("Selected");
  172. for (RecIter I = Selected.begin(), E = Selected.end(); I != E; ++I)
  173. scanSchedRW(*I, RWDefs, RWSet);
  174. }
  175. }
  176. }
  177. // Collect and sort all SchedReadWrites reachable via tablegen records.
  178. // More may be inferred later when inferring new SchedClasses from variants.
  179. void CodeGenSchedModels::collectSchedRW() {
  180. // Reserve idx=0 for invalid writes/reads.
  181. SchedWrites.resize(1);
  182. SchedReads.resize(1);
  183. SmallPtrSet<Record*, 16> RWSet;
  184. // Find all SchedReadWrites referenced by instruction defs.
  185. RecVec SWDefs, SRDefs;
  186. for (const CodeGenInstruction *Inst : Target.instructions()) {
  187. Record *SchedDef = Inst->TheDef;
  188. if (SchedDef->isValueUnset("SchedRW"))
  189. continue;
  190. RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW");
  191. for (RecIter RWI = RWs.begin(), RWE = RWs.end(); RWI != RWE; ++RWI) {
  192. if ((*RWI)->isSubClassOf("SchedWrite"))
  193. scanSchedRW(*RWI, SWDefs, RWSet);
  194. else {
  195. assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
  196. scanSchedRW(*RWI, SRDefs, RWSet);
  197. }
  198. }
  199. }
  200. // Find all ReadWrites referenced by InstRW.
  201. RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
  202. for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) {
  203. // For all OperandReadWrites.
  204. RecVec RWDefs = (*OI)->getValueAsListOfDefs("OperandReadWrites");
  205. for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
  206. RWI != RWE; ++RWI) {
  207. if ((*RWI)->isSubClassOf("SchedWrite"))
  208. scanSchedRW(*RWI, SWDefs, RWSet);
  209. else {
  210. assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
  211. scanSchedRW(*RWI, SRDefs, RWSet);
  212. }
  213. }
  214. }
  215. // Find all ReadWrites referenced by ItinRW.
  216. RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
  217. for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
  218. // For all OperandReadWrites.
  219. RecVec RWDefs = (*II)->getValueAsListOfDefs("OperandReadWrites");
  220. for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
  221. RWI != RWE; ++RWI) {
  222. if ((*RWI)->isSubClassOf("SchedWrite"))
  223. scanSchedRW(*RWI, SWDefs, RWSet);
  224. else {
  225. assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
  226. scanSchedRW(*RWI, SRDefs, RWSet);
  227. }
  228. }
  229. }
  230. // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
  231. // for the loop below that initializes Alias vectors.
  232. RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
  233. std::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord());
  234. for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
  235. Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
  236. Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
  237. if (MatchDef->isSubClassOf("SchedWrite")) {
  238. if (!AliasDef->isSubClassOf("SchedWrite"))
  239. PrintFatalError((*AI)->getLoc(), "SchedWrite Alias must be SchedWrite");
  240. scanSchedRW(AliasDef, SWDefs, RWSet);
  241. }
  242. else {
  243. assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
  244. if (!AliasDef->isSubClassOf("SchedRead"))
  245. PrintFatalError((*AI)->getLoc(), "SchedRead Alias must be SchedRead");
  246. scanSchedRW(AliasDef, SRDefs, RWSet);
  247. }
  248. }
  249. // Sort and add the SchedReadWrites directly referenced by instructions or
  250. // itinerary resources. Index reads and writes in separate domains.
  251. std::sort(SWDefs.begin(), SWDefs.end(), LessRecord());
  252. for (RecIter SWI = SWDefs.begin(), SWE = SWDefs.end(); SWI != SWE; ++SWI) {
  253. assert(!getSchedRWIdx(*SWI, /*IsRead=*/false) && "duplicate SchedWrite");
  254. SchedWrites.emplace_back(SchedWrites.size(), *SWI);
  255. }
  256. std::sort(SRDefs.begin(), SRDefs.end(), LessRecord());
  257. for (RecIter SRI = SRDefs.begin(), SRE = SRDefs.end(); SRI != SRE; ++SRI) {
  258. assert(!getSchedRWIdx(*SRI, /*IsRead-*/true) && "duplicate SchedWrite");
  259. SchedReads.emplace_back(SchedReads.size(), *SRI);
  260. }
  261. // Initialize WriteSequence vectors.
  262. for (std::vector<CodeGenSchedRW>::iterator WI = SchedWrites.begin(),
  263. WE = SchedWrites.end(); WI != WE; ++WI) {
  264. if (!WI->IsSequence)
  265. continue;
  266. findRWs(WI->TheDef->getValueAsListOfDefs("Writes"), WI->Sequence,
  267. /*IsRead=*/false);
  268. }
  269. // Initialize Aliases vectors.
  270. for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) {
  271. Record *AliasDef = (*AI)->getValueAsDef("AliasRW");
  272. getSchedRW(AliasDef).IsAlias = true;
  273. Record *MatchDef = (*AI)->getValueAsDef("MatchRW");
  274. CodeGenSchedRW &RW = getSchedRW(MatchDef);
  275. if (RW.IsAlias)
  276. PrintFatalError((*AI)->getLoc(), "Cannot Alias an Alias");
  277. RW.Aliases.push_back(*AI);
  278. }
  279. DEBUG(
  280. for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
  281. dbgs() << WIdx << ": ";
  282. SchedWrites[WIdx].dump();
  283. dbgs() << '\n';
  284. }
  285. for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) {
  286. dbgs() << RIdx << ": ";
  287. SchedReads[RIdx].dump();
  288. dbgs() << '\n';
  289. }
  290. RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
  291. for (RecIter RI = RWDefs.begin(), RE = RWDefs.end();
  292. RI != RE; ++RI) {
  293. if (!getSchedRWIdx(*RI, (*RI)->isSubClassOf("SchedRead"))) {
  294. const std::string &Name = (*RI)->getName();
  295. if (Name != "NoWrite" && Name != "ReadDefault")
  296. dbgs() << "Unused SchedReadWrite " << (*RI)->getName() << '\n';
  297. }
  298. });
  299. }
  300. /// Compute a SchedWrite name from a sequence of writes.
  301. std::string CodeGenSchedModels::genRWName(const IdxVec& Seq, bool IsRead) {
  302. std::string Name("(");
  303. for (IdxIter I = Seq.begin(), E = Seq.end(); I != E; ++I) {
  304. if (I != Seq.begin())
  305. Name += '_';
  306. Name += getSchedRW(*I, IsRead).Name;
  307. }
  308. Name += ')';
  309. return Name;
  310. }
  311. unsigned CodeGenSchedModels::getSchedRWIdx(Record *Def, bool IsRead,
  312. unsigned After) const {
  313. const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
  314. assert(After < RWVec.size() && "start position out of bounds");
  315. for (std::vector<CodeGenSchedRW>::const_iterator I = RWVec.begin() + After,
  316. E = RWVec.end(); I != E; ++I) {
  317. if (I->TheDef == Def)
  318. return I - RWVec.begin();
  319. }
  320. return 0;
  321. }
  322. bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const {
  323. for (unsigned i = 0, e = SchedReads.size(); i < e; ++i) {
  324. Record *ReadDef = SchedReads[i].TheDef;
  325. if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance"))
  326. continue;
  327. RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites");
  328. if (std::find(ValidWrites.begin(), ValidWrites.end(), WriteDef)
  329. != ValidWrites.end()) {
  330. return true;
  331. }
  332. }
  333. return false;
  334. }
  335. namespace llvm {
  336. void splitSchedReadWrites(const RecVec &RWDefs,
  337. RecVec &WriteDefs, RecVec &ReadDefs) {
  338. for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) {
  339. if ((*RWI)->isSubClassOf("SchedWrite"))
  340. WriteDefs.push_back(*RWI);
  341. else {
  342. assert((*RWI)->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
  343. ReadDefs.push_back(*RWI);
  344. }
  345. }
  346. }
  347. } // namespace llvm
  348. // Split the SchedReadWrites defs and call findRWs for each list.
  349. void CodeGenSchedModels::findRWs(const RecVec &RWDefs,
  350. IdxVec &Writes, IdxVec &Reads) const {
  351. RecVec WriteDefs;
  352. RecVec ReadDefs;
  353. splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs);
  354. findRWs(WriteDefs, Writes, false);
  355. findRWs(ReadDefs, Reads, true);
  356. }
  357. // Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
  358. void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs,
  359. bool IsRead) const {
  360. for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); RI != RE; ++RI) {
  361. unsigned Idx = getSchedRWIdx(*RI, IsRead);
  362. assert(Idx && "failed to collect SchedReadWrite");
  363. RWs.push_back(Idx);
  364. }
  365. }
  366. void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq,
  367. bool IsRead) const {
  368. const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
  369. if (!SchedRW.IsSequence) {
  370. RWSeq.push_back(RWIdx);
  371. return;
  372. }
  373. int Repeat =
  374. SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1;
  375. for (int i = 0; i < Repeat; ++i) {
  376. for (IdxIter I = SchedRW.Sequence.begin(), E = SchedRW.Sequence.end();
  377. I != E; ++I) {
  378. expandRWSequence(*I, RWSeq, IsRead);
  379. }
  380. }
  381. }
  382. // Expand a SchedWrite as a sequence following any aliases that coincide with
  383. // the given processor model.
  384. void CodeGenSchedModels::expandRWSeqForProc(
  385. unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
  386. const CodeGenProcModel &ProcModel) const {
  387. const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead);
  388. Record *AliasDef = nullptr;
  389. for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end();
  390. AI != AE; ++AI) {
  391. const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
  392. if ((*AI)->getValueInit("SchedModel")->isComplete()) {
  393. Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
  394. if (&getProcModel(ModelDef) != &ProcModel)
  395. continue;
  396. }
  397. if (AliasDef)
  398. PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
  399. "defined for processor " + ProcModel.ModelName +
  400. " Ensure only one SchedAlias exists per RW.");
  401. AliasDef = AliasRW.TheDef;
  402. }
  403. if (AliasDef) {
  404. expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead),
  405. RWSeq, IsRead,ProcModel);
  406. return;
  407. }
  408. if (!SchedWrite.IsSequence) {
  409. RWSeq.push_back(RWIdx);
  410. return;
  411. }
  412. int Repeat =
  413. SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1;
  414. for (int i = 0; i < Repeat; ++i) {
  415. for (IdxIter I = SchedWrite.Sequence.begin(), E = SchedWrite.Sequence.end();
  416. I != E; ++I) {
  417. expandRWSeqForProc(*I, RWSeq, IsRead, ProcModel);
  418. }
  419. }
  420. }
  421. // Find the existing SchedWrite that models this sequence of writes.
  422. unsigned CodeGenSchedModels::findRWForSequence(const IdxVec &Seq,
  423. bool IsRead) {
  424. std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
  425. for (std::vector<CodeGenSchedRW>::iterator I = RWVec.begin(), E = RWVec.end();
  426. I != E; ++I) {
  427. if (I->Sequence == Seq)
  428. return I - RWVec.begin();
  429. }
  430. // Index zero reserved for invalid RW.
  431. return 0;
  432. }
  433. /// Add this ReadWrite if it doesn't already exist.
  434. unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq,
  435. bool IsRead) {
  436. assert(!Seq.empty() && "cannot insert empty sequence");
  437. if (Seq.size() == 1)
  438. return Seq.back();
  439. unsigned Idx = findRWForSequence(Seq, IsRead);
  440. if (Idx)
  441. return Idx;
  442. unsigned RWIdx = IsRead ? SchedReads.size() : SchedWrites.size();
  443. CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead));
  444. if (IsRead)
  445. SchedReads.push_back(SchedRW);
  446. else
  447. SchedWrites.push_back(SchedRW);
  448. return RWIdx;
  449. }
  450. /// Visit all the instruction definitions for this target to gather and
  451. /// enumerate the itinerary classes. These are the explicitly specified
  452. /// SchedClasses. More SchedClasses may be inferred.
  453. void CodeGenSchedModels::collectSchedClasses() {
  454. // NoItinerary is always the first class at Idx=0
  455. SchedClasses.resize(1);
  456. SchedClasses.back().Index = 0;
  457. SchedClasses.back().Name = "NoInstrModel";
  458. SchedClasses.back().ItinClassDef = Records.getDef("NoItinerary");
  459. SchedClasses.back().ProcIndices.push_back(0);
  460. // Create a SchedClass for each unique combination of itinerary class and
  461. // SchedRW list.
  462. for (const CodeGenInstruction *Inst : Target.instructions()) {
  463. Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary");
  464. IdxVec Writes, Reads;
  465. if (!Inst->TheDef->isValueUnset("SchedRW"))
  466. findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
  467. // ProcIdx == 0 indicates the class applies to all processors.
  468. IdxVec ProcIndices(1, 0);
  469. unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, ProcIndices);
  470. InstrClassMap[Inst->TheDef] = SCIdx;
  471. }
  472. // Create classes for InstRW defs.
  473. RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
  474. std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord());
  475. for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI)
  476. createInstRWClass(*OI);
  477. NumInstrSchedClasses = SchedClasses.size();
  478. bool EnableDump = false;
  479. DEBUG(EnableDump = true);
  480. if (!EnableDump)
  481. return;
  482. for (const CodeGenInstruction *Inst : Target.instructions()) {
  483. std::string InstName = Inst->TheDef->getName();
  484. unsigned SCIdx = InstrClassMap.lookup(Inst->TheDef);
  485. if (!SCIdx) {
  486. dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n';
  487. continue;
  488. }
  489. CodeGenSchedClass &SC = getSchedClass(SCIdx);
  490. if (SC.ProcIndices[0] != 0)
  491. PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class "
  492. "must not be subtarget specific.");
  493. IdxVec ProcIndices;
  494. if (SC.ItinClassDef->getName() != "NoItinerary") {
  495. ProcIndices.push_back(0);
  496. dbgs() << "Itinerary for " << InstName << ": "
  497. << SC.ItinClassDef->getName() << '\n';
  498. }
  499. if (!SC.Writes.empty()) {
  500. ProcIndices.push_back(0);
  501. dbgs() << "SchedRW machine model for " << InstName;
  502. for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE; ++WI)
  503. dbgs() << " " << SchedWrites[*WI].Name;
  504. for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI)
  505. dbgs() << " " << SchedReads[*RI].Name;
  506. dbgs() << '\n';
  507. }
  508. const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
  509. for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end();
  510. RWI != RWE; ++RWI) {
  511. const CodeGenProcModel &ProcModel =
  512. getProcModel((*RWI)->getValueAsDef("SchedModel"));
  513. ProcIndices.push_back(ProcModel.Index);
  514. dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName;
  515. IdxVec Writes;
  516. IdxVec Reads;
  517. findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"),
  518. Writes, Reads);
  519. for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
  520. dbgs() << " " << SchedWrites[*WI].Name;
  521. for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
  522. dbgs() << " " << SchedReads[*RI].Name;
  523. dbgs() << '\n';
  524. }
  525. for (std::vector<CodeGenProcModel>::iterator PI = ProcModels.begin(),
  526. PE = ProcModels.end(); PI != PE; ++PI) {
  527. if (!std::count(ProcIndices.begin(), ProcIndices.end(), PI->Index))
  528. dbgs() << "No machine model for " << Inst->TheDef->getName()
  529. << " on processor " << PI->ModelName << '\n';
  530. }
  531. }
  532. }
  533. /// Find an SchedClass that has been inferred from a per-operand list of
  534. /// SchedWrites and SchedReads.
  535. unsigned CodeGenSchedModels::findSchedClassIdx(Record *ItinClassDef,
  536. const IdxVec &Writes,
  537. const IdxVec &Reads) const {
  538. for (SchedClassIter I = schedClassBegin(), E = schedClassEnd(); I != E; ++I) {
  539. if (I->ItinClassDef == ItinClassDef
  540. && I->Writes == Writes && I->Reads == Reads) {
  541. return I - schedClassBegin();
  542. }
  543. }
  544. return 0;
  545. }
  546. // Get the SchedClass index for an instruction.
  547. unsigned CodeGenSchedModels::getSchedClassIdx(
  548. const CodeGenInstruction &Inst) const {
  549. return InstrClassMap.lookup(Inst.TheDef);
  550. }
  551. std::string CodeGenSchedModels::createSchedClassName(
  552. Record *ItinClassDef, const IdxVec &OperWrites, const IdxVec &OperReads) {
  553. std::string Name;
  554. if (ItinClassDef && ItinClassDef->getName() != "NoItinerary")
  555. Name = ItinClassDef->getName();
  556. for (IdxIter WI = OperWrites.begin(), WE = OperWrites.end(); WI != WE; ++WI) {
  557. if (!Name.empty())
  558. Name += '_';
  559. Name += SchedWrites[*WI].Name;
  560. }
  561. for (IdxIter RI = OperReads.begin(), RE = OperReads.end(); RI != RE; ++RI) {
  562. Name += '_';
  563. Name += SchedReads[*RI].Name;
  564. }
  565. return Name;
  566. }
  567. std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) {
  568. std::string Name;
  569. for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) {
  570. if (I != InstDefs.begin())
  571. Name += '_';
  572. Name += (*I)->getName();
  573. }
  574. return Name;
  575. }
  576. /// Add an inferred sched class from an itinerary class and per-operand list of
  577. /// SchedWrites and SchedReads. ProcIndices contains the set of IDs of
  578. /// processors that may utilize this class.
  579. unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef,
  580. const IdxVec &OperWrites,
  581. const IdxVec &OperReads,
  582. const IdxVec &ProcIndices)
  583. {
  584. assert(!ProcIndices.empty() && "expect at least one ProcIdx");
  585. unsigned Idx = findSchedClassIdx(ItinClassDef, OperWrites, OperReads);
  586. if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) {
  587. IdxVec PI;
  588. std::set_union(SchedClasses[Idx].ProcIndices.begin(),
  589. SchedClasses[Idx].ProcIndices.end(),
  590. ProcIndices.begin(), ProcIndices.end(),
  591. std::back_inserter(PI));
  592. SchedClasses[Idx].ProcIndices.swap(PI);
  593. return Idx;
  594. }
  595. Idx = SchedClasses.size();
  596. SchedClasses.resize(Idx+1);
  597. CodeGenSchedClass &SC = SchedClasses.back();
  598. SC.Index = Idx;
  599. SC.Name = createSchedClassName(ItinClassDef, OperWrites, OperReads);
  600. SC.ItinClassDef = ItinClassDef;
  601. SC.Writes = OperWrites;
  602. SC.Reads = OperReads;
  603. SC.ProcIndices = ProcIndices;
  604. return Idx;
  605. }
  606. // Create classes for each set of opcodes that are in the same InstReadWrite
  607. // definition across all processors.
  608. void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
  609. // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
  610. // intersects with an existing class via a previous InstRWDef. Instrs that do
  611. // not intersect with an existing class refer back to their former class as
  612. // determined from ItinDef or SchedRW.
  613. SmallVector<std::pair<unsigned, SmallVector<Record *, 8> >, 4> ClassInstrs;
  614. // Sort Instrs into sets.
  615. const RecVec *InstDefs = Sets.expand(InstRWDef);
  616. if (InstDefs->empty())
  617. PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes");
  618. for (RecIter I = InstDefs->begin(), E = InstDefs->end(); I != E; ++I) {
  619. InstClassMapTy::const_iterator Pos = InstrClassMap.find(*I);
  620. if (Pos == InstrClassMap.end())
  621. PrintFatalError((*I)->getLoc(), "No sched class for instruction.");
  622. unsigned SCIdx = Pos->second;
  623. unsigned CIdx = 0, CEnd = ClassInstrs.size();
  624. for (; CIdx != CEnd; ++CIdx) {
  625. if (ClassInstrs[CIdx].first == SCIdx)
  626. break;
  627. }
  628. if (CIdx == CEnd) {
  629. ClassInstrs.resize(CEnd + 1);
  630. ClassInstrs[CIdx].first = SCIdx;
  631. }
  632. ClassInstrs[CIdx].second.push_back(*I);
  633. }
  634. // For each set of Instrs, create a new class if necessary, and map or remap
  635. // the Instrs to it.
  636. unsigned CIdx = 0, CEnd = ClassInstrs.size();
  637. for (; CIdx != CEnd; ++CIdx) {
  638. unsigned OldSCIdx = ClassInstrs[CIdx].first;
  639. ArrayRef<Record*> InstDefs = ClassInstrs[CIdx].second;
  640. // If the all instrs in the current class are accounted for, then leave
  641. // them mapped to their old class.
  642. if (OldSCIdx) {
  643. const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs;
  644. if (!RWDefs.empty()) {
  645. const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]);
  646. unsigned OrigNumInstrs = 0;
  647. for (RecIter I = OrigInstDefs->begin(), E = OrigInstDefs->end();
  648. I != E; ++I) {
  649. if (InstrClassMap[*I] == OldSCIdx)
  650. ++OrigNumInstrs;
  651. }
  652. if (OrigNumInstrs == InstDefs.size()) {
  653. assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 &&
  654. "expected a generic SchedClass");
  655. DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":"
  656. << SchedClasses[OldSCIdx].Name << " on "
  657. << InstRWDef->getValueAsDef("SchedModel")->getName() << "\n");
  658. SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef);
  659. continue;
  660. }
  661. }
  662. }
  663. unsigned SCIdx = SchedClasses.size();
  664. SchedClasses.resize(SCIdx+1);
  665. CodeGenSchedClass &SC = SchedClasses.back();
  666. SC.Index = SCIdx;
  667. SC.Name = createSchedClassName(InstDefs);
  668. DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on "
  669. << InstRWDef->getValueAsDef("SchedModel")->getName() << "\n");
  670. // Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
  671. SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
  672. SC.Writes = SchedClasses[OldSCIdx].Writes;
  673. SC.Reads = SchedClasses[OldSCIdx].Reads;
  674. SC.ProcIndices.push_back(0);
  675. // Map each Instr to this new class.
  676. // Note that InstDefs may be a smaller list than InstRWDef's "Instrs".
  677. Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
  678. SmallSet<unsigned, 4> RemappedClassIDs;
  679. for (ArrayRef<Record*>::const_iterator
  680. II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) {
  681. unsigned OldSCIdx = InstrClassMap[*II];
  682. if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx).second) {
  683. for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(),
  684. RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) {
  685. if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) {
  686. PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " +
  687. (*II)->getName() + " also matches " +
  688. (*RI)->getValue("Instrs")->getValue()->getAsString());
  689. }
  690. assert(*RI != InstRWDef && "SchedClass has duplicate InstRW def");
  691. SC.InstRWs.push_back(*RI);
  692. }
  693. }
  694. InstrClassMap[*II] = SCIdx;
  695. }
  696. SC.InstRWs.push_back(InstRWDef);
  697. }
  698. }
  699. // True if collectProcItins found anything.
  700. bool CodeGenSchedModels::hasItineraries() const {
  701. for (CodeGenSchedModels::ProcIter PI = procModelBegin(), PE = procModelEnd();
  702. PI != PE; ++PI) {
  703. if (PI->hasItineraries())
  704. return true;
  705. }
  706. return false;
  707. }
  708. // Gather the processor itineraries.
  709. void CodeGenSchedModels::collectProcItins() {
  710. for (CodeGenProcModel &ProcModel : ProcModels) {
  711. if (!ProcModel.hasItineraries())
  712. continue;
  713. RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID");
  714. assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect");
  715. // Populate ItinDefList with Itinerary records.
  716. ProcModel.ItinDefList.resize(NumInstrSchedClasses);
  717. // Insert each itinerary data record in the correct position within
  718. // the processor model's ItinDefList.
  719. for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) {
  720. Record *ItinData = ItinRecords[i];
  721. Record *ItinDef = ItinData->getValueAsDef("TheClass");
  722. bool FoundClass = false;
  723. for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd();
  724. SCI != SCE; ++SCI) {
  725. // Multiple SchedClasses may share an itinerary. Update all of them.
  726. if (SCI->ItinClassDef == ItinDef) {
  727. ProcModel.ItinDefList[SCI->Index] = ItinData;
  728. FoundClass = true;
  729. }
  730. }
  731. if (!FoundClass) {
  732. DEBUG(dbgs() << ProcModel.ItinsDef->getName()
  733. << " missing class for itinerary " << ItinDef->getName() << '\n');
  734. }
  735. }
  736. // Check for missing itinerary entries.
  737. assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
  738. DEBUG(
  739. for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
  740. if (!ProcModel.ItinDefList[i])
  741. dbgs() << ProcModel.ItinsDef->getName()
  742. << " missing itinerary for class "
  743. << SchedClasses[i].Name << '\n';
  744. });
  745. }
  746. }
  747. // Gather the read/write types for each itinerary class.
  748. void CodeGenSchedModels::collectProcItinRW() {
  749. RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
  750. std::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord());
  751. for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) {
  752. if (!(*II)->getValueInit("SchedModel")->isComplete())
  753. PrintFatalError((*II)->getLoc(), "SchedModel is undefined");
  754. Record *ModelDef = (*II)->getValueAsDef("SchedModel");
  755. ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
  756. if (I == ProcModelMap.end()) {
  757. PrintFatalError((*II)->getLoc(), "Undefined SchedMachineModel "
  758. + ModelDef->getName());
  759. }
  760. ProcModels[I->second].ItinRWDefs.push_back(*II);
  761. }
  762. }
  763. /// Infer new classes from existing classes. In the process, this may create new
  764. /// SchedWrites from sequences of existing SchedWrites.
  765. void CodeGenSchedModels::inferSchedClasses() {
  766. DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
  767. // Visit all existing classes and newly created classes.
  768. for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
  769. assert(SchedClasses[Idx].Index == Idx && "bad SCIdx");
  770. if (SchedClasses[Idx].ItinClassDef)
  771. inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx);
  772. if (!SchedClasses[Idx].InstRWs.empty())
  773. inferFromInstRWs(Idx);
  774. if (!SchedClasses[Idx].Writes.empty()) {
  775. inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads,
  776. Idx, SchedClasses[Idx].ProcIndices);
  777. }
  778. assert(SchedClasses.size() < (NumInstrSchedClasses*6) &&
  779. "too many SchedVariants");
  780. }
  781. }
  782. /// Infer classes from per-processor itinerary resources.
  783. void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef,
  784. unsigned FromClassIdx) {
  785. for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
  786. const CodeGenProcModel &PM = ProcModels[PIdx];
  787. // For all ItinRW entries.
  788. bool HasMatch = false;
  789. for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
  790. II != IE; ++II) {
  791. RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
  792. if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
  793. continue;
  794. if (HasMatch)
  795. PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
  796. + ItinClassDef->getName()
  797. + " in ItinResources for " + PM.ModelName);
  798. HasMatch = true;
  799. IdxVec Writes, Reads;
  800. findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
  801. IdxVec ProcIndices(1, PIdx);
  802. inferFromRW(Writes, Reads, FromClassIdx, ProcIndices);
  803. }
  804. }
  805. }
  806. /// Infer classes from per-processor InstReadWrite definitions.
  807. void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) {
  808. for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) {
  809. assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!");
  810. Record *Rec = SchedClasses[SCIdx].InstRWs[I];
  811. const RecVec *InstDefs = Sets.expand(Rec);
  812. RecIter II = InstDefs->begin(), IE = InstDefs->end();
  813. for (; II != IE; ++II) {
  814. if (InstrClassMap[*II] == SCIdx)
  815. break;
  816. }
  817. // If this class no longer has any instructions mapped to it, it has become
  818. // irrelevant.
  819. if (II == IE)
  820. continue;
  821. IdxVec Writes, Reads;
  822. findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
  823. unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index;
  824. IdxVec ProcIndices(1, PIdx);
  825. inferFromRW(Writes, Reads, SCIdx, ProcIndices); // May mutate SchedClasses.
  826. }
  827. }
  828. namespace {
  829. // Helper for substituteVariantOperand.
  830. struct TransVariant {
  831. Record *VarOrSeqDef; // Variant or sequence.
  832. unsigned RWIdx; // Index of this variant or sequence's matched type.
  833. unsigned ProcIdx; // Processor model index or zero for any.
  834. unsigned TransVecIdx; // Index into PredTransitions::TransVec.
  835. TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti):
  836. VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {}
  837. };
  838. // Associate a predicate with the SchedReadWrite that it guards.
  839. // RWIdx is the index of the read/write variant.
  840. struct PredCheck {
  841. bool IsRead;
  842. unsigned RWIdx;
  843. Record *Predicate;
  844. PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {}
  845. };
  846. // A Predicate transition is a list of RW sequences guarded by a PredTerm.
  847. struct PredTransition {
  848. // A predicate term is a conjunction of PredChecks.
  849. SmallVector<PredCheck, 4> PredTerm;
  850. SmallVector<SmallVector<unsigned,4>, 16> WriteSequences;
  851. SmallVector<SmallVector<unsigned,4>, 16> ReadSequences;
  852. SmallVector<unsigned, 4> ProcIndices;
  853. };
  854. // Encapsulate a set of partially constructed transitions.
  855. // The results are built by repeated calls to substituteVariants.
  856. class PredTransitions {
  857. CodeGenSchedModels &SchedModels;
  858. public:
  859. std::vector<PredTransition> TransVec;
  860. PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {}
  861. void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq,
  862. bool IsRead, unsigned StartIdx);
  863. void substituteVariants(const PredTransition &Trans);
  864. #ifndef NDEBUG
  865. void dump() const;
  866. #endif
  867. private:
  868. bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term);
  869. void getIntersectingVariants(
  870. const CodeGenSchedRW &SchedRW, unsigned TransIdx,
  871. std::vector<TransVariant> &IntersectingVariants);
  872. void pushVariant(const TransVariant &VInfo, bool IsRead);
  873. };
  874. } // anonymous
  875. // Return true if this predicate is mutually exclusive with a PredTerm. This
  876. // degenerates into checking if the predicate is mutually exclusive with any
  877. // predicate in the Term's conjunction.
  878. //
  879. // All predicates associated with a given SchedRW are considered mutually
  880. // exclusive. This should work even if the conditions expressed by the
  881. // predicates are not exclusive because the predicates for a given SchedWrite
  882. // are always checked in the order they are defined in the .td file. Later
  883. // conditions implicitly negate any prior condition.
  884. bool PredTransitions::mutuallyExclusive(Record *PredDef,
  885. ArrayRef<PredCheck> Term) {
  886. for (ArrayRef<PredCheck>::iterator I = Term.begin(), E = Term.end();
  887. I != E; ++I) {
  888. if (I->Predicate == PredDef)
  889. return false;
  890. const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(I->RWIdx, I->IsRead);
  891. assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant");
  892. RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants");
  893. for (RecIter VI = Variants.begin(), VE = Variants.end(); VI != VE; ++VI) {
  894. if ((*VI)->getValueAsDef("Predicate") == PredDef)
  895. return true;
  896. }
  897. }
  898. return false;
  899. }
  900. static bool hasAliasedVariants(const CodeGenSchedRW &RW,
  901. CodeGenSchedModels &SchedModels) {
  902. if (RW.HasVariants)
  903. return true;
  904. for (RecIter I = RW.Aliases.begin(), E = RW.Aliases.end(); I != E; ++I) {
  905. const CodeGenSchedRW &AliasRW =
  906. SchedModels.getSchedRW((*I)->getValueAsDef("AliasRW"));
  907. if (AliasRW.HasVariants)
  908. return true;
  909. if (AliasRW.IsSequence) {
  910. IdxVec ExpandedRWs;
  911. SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead);
  912. for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
  913. SI != SE; ++SI) {
  914. if (hasAliasedVariants(SchedModels.getSchedRW(*SI, AliasRW.IsRead),
  915. SchedModels)) {
  916. return true;
  917. }
  918. }
  919. }
  920. }
  921. return false;
  922. }
  923. static bool hasVariant(ArrayRef<PredTransition> Transitions,
  924. CodeGenSchedModels &SchedModels) {
  925. for (ArrayRef<PredTransition>::iterator
  926. PTI = Transitions.begin(), PTE = Transitions.end();
  927. PTI != PTE; ++PTI) {
  928. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  929. WSI = PTI->WriteSequences.begin(), WSE = PTI->WriteSequences.end();
  930. WSI != WSE; ++WSI) {
  931. for (SmallVectorImpl<unsigned>::const_iterator
  932. WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
  933. if (hasAliasedVariants(SchedModels.getSchedWrite(*WI), SchedModels))
  934. return true;
  935. }
  936. }
  937. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  938. RSI = PTI->ReadSequences.begin(), RSE = PTI->ReadSequences.end();
  939. RSI != RSE; ++RSI) {
  940. for (SmallVectorImpl<unsigned>::const_iterator
  941. RI = RSI->begin(), RE = RSI->end(); RI != RE; ++RI) {
  942. if (hasAliasedVariants(SchedModels.getSchedRead(*RI), SchedModels))
  943. return true;
  944. }
  945. }
  946. }
  947. return false;
  948. }
  949. // Populate IntersectingVariants with any variants or aliased sequences of the
  950. // given SchedRW whose processor indices and predicates are not mutually
  951. // exclusive with the given transition.
  952. void PredTransitions::getIntersectingVariants(
  953. const CodeGenSchedRW &SchedRW, unsigned TransIdx,
  954. std::vector<TransVariant> &IntersectingVariants) {
  955. bool GenericRW = false;
  956. std::vector<TransVariant> Variants;
  957. if (SchedRW.HasVariants) {
  958. unsigned VarProcIdx = 0;
  959. if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) {
  960. Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel");
  961. VarProcIdx = SchedModels.getProcModel(ModelDef).Index;
  962. }
  963. // Push each variant. Assign TransVecIdx later.
  964. const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants");
  965. for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
  966. Variants.push_back(TransVariant(*RI, SchedRW.Index, VarProcIdx, 0));
  967. if (VarProcIdx == 0)
  968. GenericRW = true;
  969. }
  970. for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
  971. AI != AE; ++AI) {
  972. // If either the SchedAlias itself or the SchedReadWrite that it aliases
  973. // to is defined within a processor model, constrain all variants to
  974. // that processor.
  975. unsigned AliasProcIdx = 0;
  976. if ((*AI)->getValueInit("SchedModel")->isComplete()) {
  977. Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
  978. AliasProcIdx = SchedModels.getProcModel(ModelDef).Index;
  979. }
  980. const CodeGenSchedRW &AliasRW =
  981. SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
  982. if (AliasRW.HasVariants) {
  983. const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants");
  984. for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI)
  985. Variants.push_back(TransVariant(*RI, AliasRW.Index, AliasProcIdx, 0));
  986. }
  987. if (AliasRW.IsSequence) {
  988. Variants.push_back(
  989. TransVariant(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0));
  990. }
  991. if (AliasProcIdx == 0)
  992. GenericRW = true;
  993. }
  994. for (unsigned VIdx = 0, VEnd = Variants.size(); VIdx != VEnd; ++VIdx) {
  995. TransVariant &Variant = Variants[VIdx];
  996. // Don't expand variants if the processor models don't intersect.
  997. // A zero processor index means any processor.
  998. SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices;
  999. if (ProcIndices[0] && Variants[VIdx].ProcIdx) {
  1000. unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
  1001. Variant.ProcIdx);
  1002. if (!Cnt)
  1003. continue;
  1004. if (Cnt > 1) {
  1005. const CodeGenProcModel &PM =
  1006. *(SchedModels.procModelBegin() + Variant.ProcIdx);
  1007. PrintFatalError(Variant.VarOrSeqDef->getLoc(),
  1008. "Multiple variants defined for processor " +
  1009. PM.ModelName +
  1010. " Ensure only one SchedAlias exists per RW.");
  1011. }
  1012. }
  1013. if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) {
  1014. Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate");
  1015. if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm))
  1016. continue;
  1017. }
  1018. if (IntersectingVariants.empty()) {
  1019. // The first variant builds on the existing transition.
  1020. Variant.TransVecIdx = TransIdx;
  1021. IntersectingVariants.push_back(Variant);
  1022. }
  1023. else {
  1024. // Push another copy of the current transition for more variants.
  1025. Variant.TransVecIdx = TransVec.size();
  1026. IntersectingVariants.push_back(Variant);
  1027. TransVec.push_back(TransVec[TransIdx]);
  1028. }
  1029. }
  1030. if (GenericRW && IntersectingVariants.empty()) {
  1031. PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has "
  1032. "a matching predicate on any processor");
  1033. }
  1034. }
  1035. // Push the Reads/Writes selected by this variant onto the PredTransition
  1036. // specified by VInfo.
  1037. void PredTransitions::
  1038. pushVariant(const TransVariant &VInfo, bool IsRead) {
  1039. PredTransition &Trans = TransVec[VInfo.TransVecIdx];
  1040. // If this operand transition is reached through a processor-specific alias,
  1041. // then the whole transition is specific to this processor.
  1042. if (VInfo.ProcIdx != 0)
  1043. Trans.ProcIndices.assign(1, VInfo.ProcIdx);
  1044. IdxVec SelectedRWs;
  1045. if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) {
  1046. Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate");
  1047. Trans.PredTerm.push_back(PredCheck(IsRead, VInfo.RWIdx,PredDef));
  1048. RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected");
  1049. SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead);
  1050. }
  1051. else {
  1052. assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") &&
  1053. "variant must be a SchedVariant or aliased WriteSequence");
  1054. SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead));
  1055. }
  1056. const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead);
  1057. SmallVectorImpl<SmallVector<unsigned,4> > &RWSequences = IsRead
  1058. ? Trans.ReadSequences : Trans.WriteSequences;
  1059. if (SchedRW.IsVariadic) {
  1060. unsigned OperIdx = RWSequences.size()-1;
  1061. // Make N-1 copies of this transition's last sequence.
  1062. for (unsigned i = 1, e = SelectedRWs.size(); i != e; ++i) {
  1063. // Create a temporary copy the vector could reallocate.
  1064. RWSequences.reserve(RWSequences.size() + 1);
  1065. RWSequences.push_back(RWSequences[OperIdx]);
  1066. }
  1067. // Push each of the N elements of the SelectedRWs onto a copy of the last
  1068. // sequence (split the current operand into N operands).
  1069. // Note that write sequences should be expanded within this loop--the entire
  1070. // sequence belongs to a single operand.
  1071. for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
  1072. RWI != RWE; ++RWI, ++OperIdx) {
  1073. IdxVec ExpandedRWs;
  1074. if (IsRead)
  1075. ExpandedRWs.push_back(*RWI);
  1076. else
  1077. SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
  1078. RWSequences[OperIdx].insert(RWSequences[OperIdx].end(),
  1079. ExpandedRWs.begin(), ExpandedRWs.end());
  1080. }
  1081. assert(OperIdx == RWSequences.size() && "missed a sequence");
  1082. }
  1083. else {
  1084. // Push this transition's expanded sequence onto this transition's last
  1085. // sequence (add to the current operand's sequence).
  1086. SmallVectorImpl<unsigned> &Seq = RWSequences.back();
  1087. IdxVec ExpandedRWs;
  1088. for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
  1089. RWI != RWE; ++RWI) {
  1090. if (IsRead)
  1091. ExpandedRWs.push_back(*RWI);
  1092. else
  1093. SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
  1094. }
  1095. Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end());
  1096. }
  1097. }
  1098. // RWSeq is a sequence of all Reads or all Writes for the next read or write
  1099. // operand. StartIdx is an index into TransVec where partial results
  1100. // starts. RWSeq must be applied to all transitions between StartIdx and the end
  1101. // of TransVec.
  1102. void PredTransitions::substituteVariantOperand(
  1103. const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) {
  1104. // Visit each original RW within the current sequence.
  1105. for (SmallVectorImpl<unsigned>::const_iterator
  1106. RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) {
  1107. const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead);
  1108. // Push this RW on all partial PredTransitions or distribute variants.
  1109. // New PredTransitions may be pushed within this loop which should not be
  1110. // revisited (TransEnd must be loop invariant).
  1111. for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size();
  1112. TransIdx != TransEnd; ++TransIdx) {
  1113. // In the common case, push RW onto the current operand's sequence.
  1114. if (!hasAliasedVariants(SchedRW, SchedModels)) {
  1115. if (IsRead)
  1116. TransVec[TransIdx].ReadSequences.back().push_back(*RWI);
  1117. else
  1118. TransVec[TransIdx].WriteSequences.back().push_back(*RWI);
  1119. continue;
  1120. }
  1121. // Distribute this partial PredTransition across intersecting variants.
  1122. // This will push a copies of TransVec[TransIdx] on the back of TransVec.
  1123. std::vector<TransVariant> IntersectingVariants;
  1124. getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants);
  1125. // Now expand each variant on top of its copy of the transition.
  1126. for (std::vector<TransVariant>::const_iterator
  1127. IVI = IntersectingVariants.begin(),
  1128. IVE = IntersectingVariants.end();
  1129. IVI != IVE; ++IVI) {
  1130. pushVariant(*IVI, IsRead);
  1131. }
  1132. }
  1133. }
  1134. }
  1135. // For each variant of a Read/Write in Trans, substitute the sequence of
  1136. // Read/Writes guarded by the variant. This is exponential in the number of
  1137. // variant Read/Writes, but in practice detection of mutually exclusive
  1138. // predicates should result in linear growth in the total number variants.
  1139. //
  1140. // This is one step in a breadth-first search of nested variants.
  1141. void PredTransitions::substituteVariants(const PredTransition &Trans) {
  1142. // Build up a set of partial results starting at the back of
  1143. // PredTransitions. Remember the first new transition.
  1144. unsigned StartIdx = TransVec.size();
  1145. TransVec.resize(TransVec.size() + 1);
  1146. TransVec.back().PredTerm = Trans.PredTerm;
  1147. TransVec.back().ProcIndices = Trans.ProcIndices;
  1148. // Visit each original write sequence.
  1149. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  1150. WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end();
  1151. WSI != WSE; ++WSI) {
  1152. // Push a new (empty) write sequence onto all partial Transitions.
  1153. for (std::vector<PredTransition>::iterator I =
  1154. TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
  1155. I->WriteSequences.resize(I->WriteSequences.size() + 1);
  1156. }
  1157. substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx);
  1158. }
  1159. // Visit each original read sequence.
  1160. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  1161. RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end();
  1162. RSI != RSE; ++RSI) {
  1163. // Push a new (empty) read sequence onto all partial Transitions.
  1164. for (std::vector<PredTransition>::iterator I =
  1165. TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
  1166. I->ReadSequences.resize(I->ReadSequences.size() + 1);
  1167. }
  1168. substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx);
  1169. }
  1170. }
  1171. // Create a new SchedClass for each variant found by inferFromRW. Pass
  1172. static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions,
  1173. unsigned FromClassIdx,
  1174. CodeGenSchedModels &SchedModels) {
  1175. // For each PredTransition, create a new CodeGenSchedTransition, which usually
  1176. // requires creating a new SchedClass.
  1177. for (ArrayRef<PredTransition>::iterator
  1178. I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) {
  1179. IdxVec OperWritesVariant;
  1180. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  1181. WSI = I->WriteSequences.begin(), WSE = I->WriteSequences.end();
  1182. WSI != WSE; ++WSI) {
  1183. // Create a new write representing the expanded sequence.
  1184. OperWritesVariant.push_back(
  1185. SchedModels.findOrInsertRW(*WSI, /*IsRead=*/false));
  1186. }
  1187. IdxVec OperReadsVariant;
  1188. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  1189. RSI = I->ReadSequences.begin(), RSE = I->ReadSequences.end();
  1190. RSI != RSE; ++RSI) {
  1191. // Create a new read representing the expanded sequence.
  1192. OperReadsVariant.push_back(
  1193. SchedModels.findOrInsertRW(*RSI, /*IsRead=*/true));
  1194. }
  1195. IdxVec ProcIndices(I->ProcIndices.begin(), I->ProcIndices.end());
  1196. CodeGenSchedTransition SCTrans;
  1197. SCTrans.ToClassIdx =
  1198. SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant,
  1199. OperReadsVariant, ProcIndices);
  1200. SCTrans.ProcIndices = ProcIndices;
  1201. // The final PredTerm is unique set of predicates guarding the transition.
  1202. RecVec Preds;
  1203. for (SmallVectorImpl<PredCheck>::const_iterator
  1204. PI = I->PredTerm.begin(), PE = I->PredTerm.end(); PI != PE; ++PI) {
  1205. Preds.push_back(PI->Predicate);
  1206. }
  1207. RecIter PredsEnd = std::unique(Preds.begin(), Preds.end());
  1208. Preds.resize(PredsEnd - Preds.begin());
  1209. SCTrans.PredTerm = Preds;
  1210. SchedModels.getSchedClass(FromClassIdx).Transitions.push_back(SCTrans);
  1211. }
  1212. }
  1213. // Create new SchedClasses for the given ReadWrite list. If any of the
  1214. // ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
  1215. // of the ReadWrite list, following Aliases if necessary.
  1216. void CodeGenSchedModels::inferFromRW(const IdxVec &OperWrites,
  1217. const IdxVec &OperReads,
  1218. unsigned FromClassIdx,
  1219. const IdxVec &ProcIndices) {
  1220. DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); dbgs() << ") ");
  1221. // Create a seed transition with an empty PredTerm and the expanded sequences
  1222. // of SchedWrites for the current SchedClass.
  1223. std::vector<PredTransition> LastTransitions;
  1224. LastTransitions.resize(1);
  1225. LastTransitions.back().ProcIndices.append(ProcIndices.begin(),
  1226. ProcIndices.end());
  1227. for (IdxIter I = OperWrites.begin(), E = OperWrites.end(); I != E; ++I) {
  1228. IdxVec WriteSeq;
  1229. expandRWSequence(*I, WriteSeq, /*IsRead=*/false);
  1230. unsigned Idx = LastTransitions[0].WriteSequences.size();
  1231. LastTransitions[0].WriteSequences.resize(Idx + 1);
  1232. SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences[Idx];
  1233. for (IdxIter WI = WriteSeq.begin(), WE = WriteSeq.end(); WI != WE; ++WI)
  1234. Seq.push_back(*WI);
  1235. DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
  1236. }
  1237. DEBUG(dbgs() << " Reads: ");
  1238. for (IdxIter I = OperReads.begin(), E = OperReads.end(); I != E; ++I) {
  1239. IdxVec ReadSeq;
  1240. expandRWSequence(*I, ReadSeq, /*IsRead=*/true);
  1241. unsigned Idx = LastTransitions[0].ReadSequences.size();
  1242. LastTransitions[0].ReadSequences.resize(Idx + 1);
  1243. SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences[Idx];
  1244. for (IdxIter RI = ReadSeq.begin(), RE = ReadSeq.end(); RI != RE; ++RI)
  1245. Seq.push_back(*RI);
  1246. DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
  1247. }
  1248. DEBUG(dbgs() << '\n');
  1249. // Collect all PredTransitions for individual operands.
  1250. // Iterate until no variant writes remain.
  1251. while (hasVariant(LastTransitions, *this)) {
  1252. PredTransitions Transitions(*this);
  1253. for (std::vector<PredTransition>::const_iterator
  1254. I = LastTransitions.begin(), E = LastTransitions.end();
  1255. I != E; ++I) {
  1256. Transitions.substituteVariants(*I);
  1257. }
  1258. DEBUG(Transitions.dump());
  1259. LastTransitions.swap(Transitions.TransVec);
  1260. }
  1261. // If the first transition has no variants, nothing to do.
  1262. if (LastTransitions[0].PredTerm.empty())
  1263. return;
  1264. // WARNING: We are about to mutate the SchedClasses vector. Do not refer to
  1265. // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
  1266. inferFromTransitions(LastTransitions, FromClassIdx, *this);
  1267. }
  1268. // Check if any processor resource group contains all resource records in
  1269. // SubUnits.
  1270. bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) {
  1271. for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
  1272. if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
  1273. continue;
  1274. RecVec SuperUnits =
  1275. PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
  1276. RecIter RI = SubUnits.begin(), RE = SubUnits.end();
  1277. for ( ; RI != RE; ++RI) {
  1278. if (std::find(SuperUnits.begin(), SuperUnits.end(), *RI)
  1279. == SuperUnits.end()) {
  1280. break;
  1281. }
  1282. }
  1283. if (RI == RE)
  1284. return true;
  1285. }
  1286. return false;
  1287. }
  1288. // Verify that overlapping groups have a common supergroup.
  1289. void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) {
  1290. for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
  1291. if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
  1292. continue;
  1293. RecVec CheckUnits =
  1294. PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
  1295. for (unsigned j = i+1; j < e; ++j) {
  1296. if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup"))
  1297. continue;
  1298. RecVec OtherUnits =
  1299. PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources");
  1300. if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(),
  1301. OtherUnits.begin(), OtherUnits.end())
  1302. != CheckUnits.end()) {
  1303. // CheckUnits and OtherUnits overlap
  1304. OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(),
  1305. CheckUnits.end());
  1306. if (!hasSuperGroup(OtherUnits, PM)) {
  1307. PrintFatalError((PM.ProcResourceDefs[i])->getLoc(),
  1308. "proc resource group overlaps with "
  1309. + PM.ProcResourceDefs[j]->getName()
  1310. + " but no supergroup contains both.");
  1311. }
  1312. }
  1313. }
  1314. }
  1315. }
  1316. // Collect and sort WriteRes, ReadAdvance, and ProcResources.
  1317. void CodeGenSchedModels::collectProcResources() {
  1318. // Add any subtarget-specific SchedReadWrites that are directly associated
  1319. // with processor resources. Refer to the parent SchedClass's ProcIndices to
  1320. // determine which processors they apply to.
  1321. for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd();
  1322. SCI != SCE; ++SCI) {
  1323. if (SCI->ItinClassDef)
  1324. collectItinProcResources(SCI->ItinClassDef);
  1325. else {
  1326. // This class may have a default ReadWrite list which can be overriden by
  1327. // InstRW definitions.
  1328. if (!SCI->InstRWs.empty()) {
  1329. for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end();
  1330. RWI != RWE; ++RWI) {
  1331. Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel");
  1332. IdxVec ProcIndices(1, getProcModel(RWModelDef).Index);
  1333. IdxVec Writes, Reads;
  1334. findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"),
  1335. Writes, Reads);
  1336. collectRWResources(Writes, Reads, ProcIndices);
  1337. }
  1338. }
  1339. collectRWResources(SCI->Writes, SCI->Reads, SCI->ProcIndices);
  1340. }
  1341. }
  1342. // Add resources separately defined by each subtarget.
  1343. RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes");
  1344. for (RecIter WRI = WRDefs.begin(), WRE = WRDefs.end(); WRI != WRE; ++WRI) {
  1345. Record *ModelDef = (*WRI)->getValueAsDef("SchedModel");
  1346. addWriteRes(*WRI, getProcModel(ModelDef).Index);
  1347. }
  1348. RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes");
  1349. for (RecIter WRI = SWRDefs.begin(), WRE = SWRDefs.end(); WRI != WRE; ++WRI) {
  1350. Record *ModelDef = (*WRI)->getValueAsDef("SchedModel");
  1351. addWriteRes(*WRI, getProcModel(ModelDef).Index);
  1352. }
  1353. RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance");
  1354. for (RecIter RAI = RADefs.begin(), RAE = RADefs.end(); RAI != RAE; ++RAI) {
  1355. Record *ModelDef = (*RAI)->getValueAsDef("SchedModel");
  1356. addReadAdvance(*RAI, getProcModel(ModelDef).Index);
  1357. }
  1358. RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance");
  1359. for (RecIter RAI = SRADefs.begin(), RAE = SRADefs.end(); RAI != RAE; ++RAI) {
  1360. if ((*RAI)->getValueInit("SchedModel")->isComplete()) {
  1361. Record *ModelDef = (*RAI)->getValueAsDef("SchedModel");
  1362. addReadAdvance(*RAI, getProcModel(ModelDef).Index);
  1363. }
  1364. }
  1365. // Add ProcResGroups that are defined within this processor model, which may
  1366. // not be directly referenced but may directly specify a buffer size.
  1367. RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
  1368. for (RecIter RI = ProcResGroups.begin(), RE = ProcResGroups.end();
  1369. RI != RE; ++RI) {
  1370. if (!(*RI)->getValueInit("SchedModel")->isComplete())
  1371. continue;
  1372. CodeGenProcModel &PM = getProcModel((*RI)->getValueAsDef("SchedModel"));
  1373. RecIter I = std::find(PM.ProcResourceDefs.begin(),
  1374. PM.ProcResourceDefs.end(), *RI);
  1375. if (I == PM.ProcResourceDefs.end())
  1376. PM.ProcResourceDefs.push_back(*RI);
  1377. }
  1378. // Finalize each ProcModel by sorting the record arrays.
  1379. for (CodeGenProcModel &PM : ProcModels) {
  1380. std::sort(PM.WriteResDefs.begin(), PM.WriteResDefs.end(),
  1381. LessRecord());
  1382. std::sort(PM.ReadAdvanceDefs.begin(), PM.ReadAdvanceDefs.end(),
  1383. LessRecord());
  1384. std::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(),
  1385. LessRecord());
  1386. DEBUG(
  1387. PM.dump();
  1388. dbgs() << "WriteResDefs: ";
  1389. for (RecIter RI = PM.WriteResDefs.begin(),
  1390. RE = PM.WriteResDefs.end(); RI != RE; ++RI) {
  1391. if ((*RI)->isSubClassOf("WriteRes"))
  1392. dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
  1393. else
  1394. dbgs() << (*RI)->getName() << " ";
  1395. }
  1396. dbgs() << "\nReadAdvanceDefs: ";
  1397. for (RecIter RI = PM.ReadAdvanceDefs.begin(),
  1398. RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) {
  1399. if ((*RI)->isSubClassOf("ReadAdvance"))
  1400. dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
  1401. else
  1402. dbgs() << (*RI)->getName() << " ";
  1403. }
  1404. dbgs() << "\nProcResourceDefs: ";
  1405. for (RecIter RI = PM.ProcResourceDefs.begin(),
  1406. RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) {
  1407. dbgs() << (*RI)->getName() << " ";
  1408. }
  1409. dbgs() << '\n');
  1410. verifyProcResourceGroups(PM);
  1411. }
  1412. }
  1413. // Collect itinerary class resources for each processor.
  1414. void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) {
  1415. for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
  1416. const CodeGenProcModel &PM = ProcModels[PIdx];
  1417. // For all ItinRW entries.
  1418. bool HasMatch = false;
  1419. for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
  1420. II != IE; ++II) {
  1421. RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
  1422. if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
  1423. continue;
  1424. if (HasMatch)
  1425. PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
  1426. + ItinClassDef->getName()
  1427. + " in ItinResources for " + PM.ModelName);
  1428. HasMatch = true;
  1429. IdxVec Writes, Reads;
  1430. findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
  1431. IdxVec ProcIndices(1, PIdx);
  1432. collectRWResources(Writes, Reads, ProcIndices);
  1433. }
  1434. }
  1435. }
  1436. void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
  1437. const IdxVec &ProcIndices) {
  1438. const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
  1439. if (SchedRW.TheDef) {
  1440. if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) {
  1441. for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
  1442. PI != PE; ++PI) {
  1443. addWriteRes(SchedRW.TheDef, *PI);
  1444. }
  1445. }
  1446. else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) {
  1447. for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end();
  1448. PI != PE; ++PI) {
  1449. addReadAdvance(SchedRW.TheDef, *PI);
  1450. }
  1451. }
  1452. }
  1453. for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
  1454. AI != AE; ++AI) {
  1455. IdxVec AliasProcIndices;
  1456. if ((*AI)->getValueInit("SchedModel")->isComplete()) {
  1457. AliasProcIndices.push_back(
  1458. getProcModel((*AI)->getValueAsDef("SchedModel")).Index);
  1459. }
  1460. else
  1461. AliasProcIndices = ProcIndices;
  1462. const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
  1463. assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
  1464. IdxVec ExpandedRWs;
  1465. expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead);
  1466. for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
  1467. SI != SE; ++SI) {
  1468. collectRWResources(*SI, IsRead, AliasProcIndices);
  1469. }
  1470. }
  1471. }
  1472. // Collect resources for a set of read/write types and processor indices.
  1473. void CodeGenSchedModels::collectRWResources(const IdxVec &Writes,
  1474. const IdxVec &Reads,
  1475. const IdxVec &ProcIndices) {
  1476. for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI)
  1477. collectRWResources(*WI, /*IsRead=*/false, ProcIndices);
  1478. for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI)
  1479. collectRWResources(*RI, /*IsRead=*/true, ProcIndices);
  1480. }
  1481. // Find the processor's resource units for this kind of resource.
  1482. Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind,
  1483. const CodeGenProcModel &PM) const {
  1484. if (ProcResKind->isSubClassOf("ProcResourceUnits"))
  1485. return ProcResKind;
  1486. Record *ProcUnitDef = nullptr;
  1487. RecVec ProcResourceDefs =
  1488. Records.getAllDerivedDefinitions("ProcResourceUnits");
  1489. for (RecIter RI = ProcResourceDefs.begin(), RE = ProcResourceDefs.end();
  1490. RI != RE; ++RI) {
  1491. if ((*RI)->getValueAsDef("Kind") == ProcResKind
  1492. && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) {
  1493. if (ProcUnitDef) {
  1494. PrintFatalError((*RI)->getLoc(),
  1495. "Multiple ProcessorResourceUnits associated with "
  1496. + ProcResKind->getName());
  1497. }
  1498. ProcUnitDef = *RI;
  1499. }
  1500. }
  1501. RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
  1502. for (RecIter RI = ProcResGroups.begin(), RE = ProcResGroups.end();
  1503. RI != RE; ++RI) {
  1504. if (*RI == ProcResKind
  1505. && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) {
  1506. if (ProcUnitDef) {
  1507. PrintFatalError((*RI)->getLoc(),
  1508. "Multiple ProcessorResourceUnits associated with "
  1509. + ProcResKind->getName());
  1510. }
  1511. ProcUnitDef = *RI;
  1512. }
  1513. }
  1514. if (!ProcUnitDef) {
  1515. PrintFatalError(ProcResKind->getLoc(),
  1516. "No ProcessorResources associated with "
  1517. + ProcResKind->getName());
  1518. }
  1519. return ProcUnitDef;
  1520. }
  1521. // Iteratively add a resource and its super resources.
  1522. void CodeGenSchedModels::addProcResource(Record *ProcResKind,
  1523. CodeGenProcModel &PM) {
  1524. for (;;) {
  1525. Record *ProcResUnits = findProcResUnits(ProcResKind, PM);
  1526. // See if this ProcResource is already associated with this processor.
  1527. RecIter I = std::find(PM.ProcResourceDefs.begin(),
  1528. PM.ProcResourceDefs.end(), ProcResUnits);
  1529. if (I != PM.ProcResourceDefs.end())
  1530. return;
  1531. PM.ProcResourceDefs.push_back(ProcResUnits);
  1532. if (ProcResUnits->isSubClassOf("ProcResGroup"))
  1533. return;
  1534. if (!ProcResUnits->getValueInit("Super")->isComplete())
  1535. return;
  1536. ProcResKind = ProcResUnits->getValueAsDef("Super");
  1537. }
  1538. }
  1539. // Add resources for a SchedWrite to this processor if they don't exist.
  1540. void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) {
  1541. assert(PIdx && "don't add resources to an invalid Processor model");
  1542. RecVec &WRDefs = ProcModels[PIdx].WriteResDefs;
  1543. RecIter WRI = std::find(WRDefs.begin(), WRDefs.end(), ProcWriteResDef);
  1544. if (WRI != WRDefs.end())
  1545. return;
  1546. WRDefs.push_back(ProcWriteResDef);
  1547. // Visit ProcResourceKinds referenced by the newly discovered WriteRes.
  1548. RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources");
  1549. for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end();
  1550. WritePRI != WritePRE; ++WritePRI) {
  1551. addProcResource(*WritePRI, ProcModels[PIdx]);
  1552. }
  1553. }
  1554. // Add resources for a ReadAdvance to this processor if they don't exist.
  1555. void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef,
  1556. unsigned PIdx) {
  1557. RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs;
  1558. RecIter I = std::find(RADefs.begin(), RADefs.end(), ProcReadAdvanceDef);
  1559. if (I != RADefs.end())
  1560. return;
  1561. RADefs.push_back(ProcReadAdvanceDef);
  1562. }
  1563. unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const {
  1564. RecIter PRPos = std::find(ProcResourceDefs.begin(), ProcResourceDefs.end(),
  1565. PRDef);
  1566. if (PRPos == ProcResourceDefs.end())
  1567. PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in "
  1568. "the ProcResources list for " + ModelName);
  1569. // Idx=0 is reserved for invalid.
  1570. return 1 + (PRPos - ProcResourceDefs.begin());
  1571. }
  1572. #ifndef NDEBUG
  1573. void CodeGenProcModel::dump() const {
  1574. dbgs() << Index << ": " << ModelName << " "
  1575. << (ModelDef ? ModelDef->getName() : "inferred") << " "
  1576. << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n';
  1577. }
  1578. void CodeGenSchedRW::dump() const {
  1579. dbgs() << Name << (IsVariadic ? " (V) " : " ");
  1580. if (IsSequence) {
  1581. dbgs() << "(";
  1582. dumpIdxVec(Sequence);
  1583. dbgs() << ")";
  1584. }
  1585. }
  1586. void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const {
  1587. dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n'
  1588. << " Writes: ";
  1589. for (unsigned i = 0, N = Writes.size(); i < N; ++i) {
  1590. SchedModels->getSchedWrite(Writes[i]).dump();
  1591. if (i < N-1) {
  1592. dbgs() << '\n';
  1593. dbgs().indent(10);
  1594. }
  1595. }
  1596. dbgs() << "\n Reads: ";
  1597. for (unsigned i = 0, N = Reads.size(); i < N; ++i) {
  1598. SchedModels->getSchedRead(Reads[i]).dump();
  1599. if (i < N-1) {
  1600. dbgs() << '\n';
  1601. dbgs().indent(10);
  1602. }
  1603. }
  1604. dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n';
  1605. if (!Transitions.empty()) {
  1606. dbgs() << "\n Transitions for Proc ";
  1607. for (std::vector<CodeGenSchedTransition>::const_iterator
  1608. TI = Transitions.begin(), TE = Transitions.end(); TI != TE; ++TI) {
  1609. dumpIdxVec(TI->ProcIndices);
  1610. }
  1611. }
  1612. }
  1613. void PredTransitions::dump() const {
  1614. dbgs() << "Expanded Variants:\n";
  1615. for (std::vector<PredTransition>::const_iterator
  1616. TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) {
  1617. dbgs() << "{";
  1618. for (SmallVectorImpl<PredCheck>::const_iterator
  1619. PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end();
  1620. PCI != PCE; ++PCI) {
  1621. if (PCI != TI->PredTerm.begin())
  1622. dbgs() << ", ";
  1623. dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name
  1624. << ":" << PCI->Predicate->getName();
  1625. }
  1626. dbgs() << "},\n => {";
  1627. for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator
  1628. WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end();
  1629. WSI != WSE; ++WSI) {
  1630. dbgs() << "(";
  1631. for (SmallVectorImpl<unsigned>::const_iterator
  1632. WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
  1633. if (WI != WSI->begin())
  1634. dbgs() << ", ";
  1635. dbgs() << SchedModels.getSchedWrite(*WI).Name;
  1636. }
  1637. dbgs() << "),";
  1638. }
  1639. dbgs() << "}\n";
  1640. }
  1641. }
  1642. #endif // NDEBUG