inline_constprop.ll 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. ; RUN: opt < %s -inline -inline-threshold=20 -S | FileCheck %s
  2. define internal i32 @callee1(i32 %A, i32 %B) {
  3. %C = sdiv i32 %A, %B
  4. ret i32 %C
  5. }
  6. define i32 @caller1() {
  7. ; CHECK-LABEL: define i32 @caller1(
  8. ; CHECK-NEXT: ret i32 3
  9. %X = call i32 @callee1( i32 10, i32 3 )
  10. ret i32 %X
  11. }
  12. define i32 @caller2() {
  13. ; Check that we can constant-prop through instructions after inlining callee21
  14. ; to get constants in the inlined callsite to callee22.
  15. ; FIXME: Currently, the threshold is fixed at 20 because we don't perform
  16. ; *recursive* cost analysis to realize that the nested call site will definitely
  17. ; inline and be cheap. We should eventually do that and lower the threshold here
  18. ; to 1.
  19. ;
  20. ; CHECK-LABEL: @caller2(
  21. ; CHECK-NOT: call void @callee2
  22. ; CHECK: ret
  23. %x = call i32 @callee21(i32 42, i32 48)
  24. ret i32 %x
  25. }
  26. define i32 @callee21(i32 %x, i32 %y) {
  27. %sub = sub i32 %y, %x
  28. %result = call i32 @callee22(i32 %sub)
  29. ret i32 %result
  30. }
  31. declare i8* @getptr()
  32. define i32 @callee22(i32 %x) {
  33. %icmp = icmp ugt i32 %x, 42
  34. br i1 %icmp, label %bb.true, label %bb.false
  35. bb.true:
  36. ; This block musn't be counted in the inline cost.
  37. %x1 = add i32 %x, 1
  38. %x2 = add i32 %x1, 1
  39. %x3 = add i32 %x2, 1
  40. %x4 = add i32 %x3, 1
  41. %x5 = add i32 %x4, 1
  42. %x6 = add i32 %x5, 1
  43. %x7 = add i32 %x6, 1
  44. %x8 = add i32 %x7, 1
  45. ret i32 %x8
  46. bb.false:
  47. ret i32 %x
  48. }
  49. define i32 @caller3() {
  50. ; Check that even if the expensive path is hidden behind several basic blocks,
  51. ; it doesn't count toward the inline cost when constant-prop proves those paths
  52. ; dead.
  53. ;
  54. ; CHECK-LABEL: @caller3(
  55. ; CHECK-NOT: call
  56. ; CHECK: ret i32 6
  57. entry:
  58. %x = call i32 @callee3(i32 42, i32 48)
  59. ret i32 %x
  60. }
  61. define i32 @callee3(i32 %x, i32 %y) {
  62. %sub = sub i32 %y, %x
  63. %icmp = icmp ugt i32 %sub, 42
  64. br i1 %icmp, label %bb.true, label %bb.false
  65. bb.true:
  66. %icmp2 = icmp ult i32 %sub, 64
  67. br i1 %icmp2, label %bb.true.true, label %bb.true.false
  68. bb.true.true:
  69. ; This block musn't be counted in the inline cost.
  70. %x1 = add i32 %x, 1
  71. %x2 = add i32 %x1, 1
  72. %x3 = add i32 %x2, 1
  73. %x4 = add i32 %x3, 1
  74. %x5 = add i32 %x4, 1
  75. %x6 = add i32 %x5, 1
  76. %x7 = add i32 %x6, 1
  77. %x8 = add i32 %x7, 1
  78. br label %bb.merge
  79. bb.true.false:
  80. ; This block musn't be counted in the inline cost.
  81. %y1 = add i32 %y, 1
  82. %y2 = add i32 %y1, 1
  83. %y3 = add i32 %y2, 1
  84. %y4 = add i32 %y3, 1
  85. %y5 = add i32 %y4, 1
  86. %y6 = add i32 %y5, 1
  87. %y7 = add i32 %y6, 1
  88. %y8 = add i32 %y7, 1
  89. br label %bb.merge
  90. bb.merge:
  91. %result = phi i32 [ %x8, %bb.true.true ], [ %y8, %bb.true.false ]
  92. ret i32 %result
  93. bb.false:
  94. ret i32 %sub
  95. }
  96. declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b)
  97. define i8 @caller4(i8 %z) {
  98. ; Check that we can constant fold through intrinsics such as the
  99. ; overflow-detecting arithmetic instrinsics. These are particularly important
  100. ; as they are used heavily in standard library code and generic C++ code where
  101. ; the arguments are oftent constant but complete generality is required.
  102. ;
  103. ; CHECK-LABEL: @caller4(
  104. ; CHECK-NOT: call
  105. ; CHECK: ret i8 -1
  106. entry:
  107. %x = call i8 @callee4(i8 254, i8 14, i8 %z)
  108. ret i8 %x
  109. }
  110. define i8 @callee4(i8 %x, i8 %y, i8 %z) {
  111. %uadd = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %x, i8 %y)
  112. %o = extractvalue {i8, i1} %uadd, 1
  113. br i1 %o, label %bb.true, label %bb.false
  114. bb.true:
  115. ret i8 -1
  116. bb.false:
  117. ; This block musn't be counted in the inline cost.
  118. %z1 = add i8 %z, 1
  119. %z2 = add i8 %z1, 1
  120. %z3 = add i8 %z2, 1
  121. %z4 = add i8 %z3, 1
  122. %z5 = add i8 %z4, 1
  123. %z6 = add i8 %z5, 1
  124. %z7 = add i8 %z6, 1
  125. %z8 = add i8 %z7, 1
  126. ret i8 %z8
  127. }
  128. define i64 @caller5(i64 %y) {
  129. ; Check that we can round trip constants through various kinds of casts etc w/o
  130. ; losing track of the constant prop in the inline cost analysis.
  131. ;
  132. ; CHECK-LABEL: @caller5(
  133. ; CHECK-NOT: call
  134. ; CHECK: ret i64 -1
  135. entry:
  136. %x = call i64 @callee5(i64 42, i64 %y)
  137. ret i64 %x
  138. }
  139. define i64 @callee5(i64 %x, i64 %y) {
  140. %inttoptr = inttoptr i64 %x to i8*
  141. %bitcast = bitcast i8* %inttoptr to i32*
  142. %ptrtoint = ptrtoint i32* %bitcast to i64
  143. %trunc = trunc i64 %ptrtoint to i32
  144. %zext = zext i32 %trunc to i64
  145. %cmp = icmp eq i64 %zext, 42
  146. br i1 %cmp, label %bb.true, label %bb.false
  147. bb.true:
  148. ret i64 -1
  149. bb.false:
  150. ; This block musn't be counted in the inline cost.
  151. %y1 = add i64 %y, 1
  152. %y2 = add i64 %y1, 1
  153. %y3 = add i64 %y2, 1
  154. %y4 = add i64 %y3, 1
  155. %y5 = add i64 %y4, 1
  156. %y6 = add i64 %y5, 1
  157. %y7 = add i64 %y6, 1
  158. %y8 = add i64 %y7, 1
  159. ret i64 %y8
  160. }
  161. define float @caller6() {
  162. ; Check that we can constant-prop through fcmp instructions
  163. ;
  164. ; CHECK-LABEL: @caller6(
  165. ; CHECK-NOT: call
  166. ; CHECK: ret
  167. %x = call float @callee6(float 42.0)
  168. ret float %x
  169. }
  170. define float @callee6(float %x) {
  171. %icmp = fcmp ugt float %x, 42.0
  172. br i1 %icmp, label %bb.true, label %bb.false
  173. bb.true:
  174. ; This block musn't be counted in the inline cost.
  175. %x1 = fadd float %x, 1.0
  176. %x2 = fadd float %x1, 1.0
  177. %x3 = fadd float %x2, 1.0
  178. %x4 = fadd float %x3, 1.0
  179. %x5 = fadd float %x4, 1.0
  180. %x6 = fadd float %x5, 1.0
  181. %x7 = fadd float %x6, 1.0
  182. %x8 = fadd float %x7, 1.0
  183. ret float %x8
  184. bb.false:
  185. ret float %x
  186. }
  187. define i32 @PR13412.main() {
  188. ; This is a somewhat complicated three layer subprogram that was reported to
  189. ; compute the wrong value for a branch due to assuming that an argument
  190. ; mid-inline couldn't be equal to another pointer.
  191. ;
  192. ; After inlining, the branch should point directly to the exit block, not to
  193. ; the intermediate block.
  194. ; CHECK: @PR13412.main
  195. ; CHECK: br i1 true, label %[[TRUE_DEST:.*]], label %[[FALSE_DEST:.*]]
  196. ; CHECK: [[FALSE_DEST]]:
  197. ; CHECK-NEXT: call void @PR13412.fail()
  198. ; CHECK: [[TRUE_DEST]]:
  199. ; CHECK-NEXT: ret i32 0
  200. entry:
  201. %i1 = alloca i64
  202. store i64 0, i64* %i1
  203. %arraydecay = bitcast i64* %i1 to i32*
  204. %call = call i1 @PR13412.first(i32* %arraydecay, i32* %arraydecay)
  205. br i1 %call, label %cond.end, label %cond.false
  206. cond.false:
  207. call void @PR13412.fail()
  208. br label %cond.end
  209. cond.end:
  210. ret i32 0
  211. }
  212. define internal i1 @PR13412.first(i32* %a, i32* %b) {
  213. entry:
  214. %call = call i32* @PR13412.second(i32* %a, i32* %b)
  215. %cmp = icmp eq i32* %call, %b
  216. ret i1 %cmp
  217. }
  218. declare void @PR13412.fail()
  219. define internal i32* @PR13412.second(i32* %a, i32* %b) {
  220. entry:
  221. %sub.ptr.lhs.cast = ptrtoint i32* %b to i64
  222. %sub.ptr.rhs.cast = ptrtoint i32* %a to i64
  223. %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
  224. %sub.ptr.div = ashr exact i64 %sub.ptr.sub, 2
  225. %cmp = icmp ugt i64 %sub.ptr.div, 1
  226. br i1 %cmp, label %if.then, label %if.end3
  227. if.then:
  228. %0 = load i32, i32* %a
  229. %1 = load i32, i32* %b
  230. %cmp1 = icmp eq i32 %0, %1
  231. br i1 %cmp1, label %return, label %if.end3
  232. if.end3:
  233. br label %return
  234. return:
  235. %retval.0 = phi i32* [ %b, %if.end3 ], [ %a, %if.then ]
  236. ret i32* %retval.0
  237. }