basictest.ll 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612
  1. ; RUN: opt < %s -sroa -S | FileCheck %s
  2. ; RUN: opt < %s -sroa -force-ssa-updater -S | FileCheck %s
  3. target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
  4. declare void @llvm.lifetime.start(i64, i8* nocapture)
  5. declare void @llvm.lifetime.end(i64, i8* nocapture)
  6. define i32 @test0() {
  7. ; CHECK-LABEL: @test0(
  8. ; CHECK-NOT: alloca
  9. ; CHECK: ret i32
  10. entry:
  11. %a1 = alloca i32
  12. %a2 = alloca float
  13. %a1.i8 = bitcast i32* %a1 to i8*
  14. call void @llvm.lifetime.start(i64 4, i8* %a1.i8)
  15. store i32 0, i32* %a1
  16. %v1 = load i32, i32* %a1
  17. call void @llvm.lifetime.end(i64 4, i8* %a1.i8)
  18. %a2.i8 = bitcast float* %a2 to i8*
  19. call void @llvm.lifetime.start(i64 4, i8* %a2.i8)
  20. store float 0.0, float* %a2
  21. %v2 = load float , float * %a2
  22. %v2.int = bitcast float %v2 to i32
  23. %sum1 = add i32 %v1, %v2.int
  24. call void @llvm.lifetime.end(i64 4, i8* %a2.i8)
  25. ret i32 %sum1
  26. }
  27. define i32 @test1() {
  28. ; CHECK-LABEL: @test1(
  29. ; CHECK-NOT: alloca
  30. ; CHECK: ret i32 0
  31. entry:
  32. %X = alloca { i32, float }
  33. %Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
  34. store i32 0, i32* %Y
  35. %Z = load i32, i32* %Y
  36. ret i32 %Z
  37. }
  38. define i64 @test2(i64 %X) {
  39. ; CHECK-LABEL: @test2(
  40. ; CHECK-NOT: alloca
  41. ; CHECK: ret i64 %X
  42. entry:
  43. %A = alloca [8 x i8]
  44. %B = bitcast [8 x i8]* %A to i64*
  45. store i64 %X, i64* %B
  46. br label %L2
  47. L2:
  48. %Z = load i64, i64* %B
  49. ret i64 %Z
  50. }
  51. define void @test3(i8* %dst, i8* %src) {
  52. ; CHECK-LABEL: @test3(
  53. entry:
  54. %a = alloca [300 x i8]
  55. ; CHECK-NOT: alloca
  56. ; CHECK: %[[test3_a1:.*]] = alloca [42 x i8]
  57. ; CHECK-NEXT: %[[test3_a2:.*]] = alloca [99 x i8]
  58. ; CHECK-NEXT: %[[test3_a3:.*]] = alloca [16 x i8]
  59. ; CHECK-NEXT: %[[test3_a4:.*]] = alloca [42 x i8]
  60. ; CHECK-NEXT: %[[test3_a5:.*]] = alloca [7 x i8]
  61. ; CHECK-NEXT: %[[test3_a6:.*]] = alloca [7 x i8]
  62. ; CHECK-NEXT: %[[test3_a7:.*]] = alloca [85 x i8]
  63. %b = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 0
  64. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 300, i32 1, i1 false)
  65. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
  66. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 42
  67. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
  68. ; CHECK-NEXT: %[[test3_r1:.*]] = load i8, i8* %[[gep]]
  69. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
  70. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
  71. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
  72. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 142
  73. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
  74. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
  75. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 158
  76. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
  77. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
  78. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 200
  79. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
  80. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  81. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 207
  82. ; CHECK-NEXT: %[[test3_r2:.*]] = load i8, i8* %[[gep]]
  83. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 208
  84. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
  85. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  86. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 215
  87. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
  88. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
  89. ; Clobber a single element of the array, this should be promotable.
  90. %c = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 42
  91. store i8 0, i8* %c
  92. ; Make a sequence of overlapping stores to the array. These overlap both in
  93. ; forward strides and in shrinking accesses.
  94. %overlap.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 142
  95. %overlap.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 143
  96. %overlap.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 144
  97. %overlap.4.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 145
  98. %overlap.5.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 146
  99. %overlap.6.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 147
  100. %overlap.7.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 148
  101. %overlap.8.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 149
  102. %overlap.9.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 150
  103. %overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
  104. %overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
  105. %overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
  106. %overlap.2.i64 = bitcast i8* %overlap.2.i8 to i64*
  107. %overlap.3.i64 = bitcast i8* %overlap.3.i8 to i64*
  108. %overlap.4.i64 = bitcast i8* %overlap.4.i8 to i64*
  109. %overlap.5.i64 = bitcast i8* %overlap.5.i8 to i64*
  110. %overlap.6.i64 = bitcast i8* %overlap.6.i8 to i64*
  111. %overlap.7.i64 = bitcast i8* %overlap.7.i8 to i64*
  112. %overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
  113. %overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
  114. store i8 1, i8* %overlap.1.i8
  115. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
  116. ; CHECK-NEXT: store i8 1, i8* %[[gep]]
  117. store i16 1, i16* %overlap.1.i16
  118. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i16*
  119. ; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
  120. store i32 1, i32* %overlap.1.i32
  121. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i32*
  122. ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
  123. store i64 1, i64* %overlap.1.i64
  124. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i64*
  125. ; CHECK-NEXT: store i64 1, i64* %[[bitcast]]
  126. store i64 2, i64* %overlap.2.i64
  127. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 1
  128. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  129. ; CHECK-NEXT: store i64 2, i64* %[[bitcast]]
  130. store i64 3, i64* %overlap.3.i64
  131. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 2
  132. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  133. ; CHECK-NEXT: store i64 3, i64* %[[bitcast]]
  134. store i64 4, i64* %overlap.4.i64
  135. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 3
  136. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  137. ; CHECK-NEXT: store i64 4, i64* %[[bitcast]]
  138. store i64 5, i64* %overlap.5.i64
  139. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 4
  140. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  141. ; CHECK-NEXT: store i64 5, i64* %[[bitcast]]
  142. store i64 6, i64* %overlap.6.i64
  143. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 5
  144. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  145. ; CHECK-NEXT: store i64 6, i64* %[[bitcast]]
  146. store i64 7, i64* %overlap.7.i64
  147. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 6
  148. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  149. ; CHECK-NEXT: store i64 7, i64* %[[bitcast]]
  150. store i64 8, i64* %overlap.8.i64
  151. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 7
  152. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  153. ; CHECK-NEXT: store i64 8, i64* %[[bitcast]]
  154. store i64 9, i64* %overlap.9.i64
  155. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 8
  156. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
  157. ; CHECK-NEXT: store i64 9, i64* %[[bitcast]]
  158. ; Make two sequences of overlapping stores with more gaps and irregularities.
  159. %overlap2.1.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 200
  160. %overlap2.1.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 201
  161. %overlap2.1.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 202
  162. %overlap2.1.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 203
  163. %overlap2.2.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 208
  164. %overlap2.2.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 209
  165. %overlap2.2.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 210
  166. %overlap2.2.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 211
  167. %overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
  168. %overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
  169. %overlap2.1.1.i32 = bitcast i8* %overlap2.1.1.i8 to i32*
  170. %overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
  171. %overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
  172. store i8 1, i8* %overlap2.1.0.i8
  173. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
  174. ; CHECK-NEXT: store i8 1, i8* %[[gep]]
  175. store i16 1, i16* %overlap2.1.0.i16
  176. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i16*
  177. ; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
  178. store i32 1, i32* %overlap2.1.0.i32
  179. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i32*
  180. ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
  181. store i32 2, i32* %overlap2.1.1.i32
  182. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 1
  183. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
  184. ; CHECK-NEXT: store i32 2, i32* %[[bitcast]]
  185. store i32 3, i32* %overlap2.1.2.i32
  186. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
  187. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
  188. ; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
  189. store i32 4, i32* %overlap2.1.3.i32
  190. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 3
  191. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
  192. ; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
  193. %overlap2.2.0.i32 = bitcast i8* %overlap2.2.0.i8 to i32*
  194. %overlap2.2.1.i16 = bitcast i8* %overlap2.2.1.i8 to i16*
  195. %overlap2.2.1.i32 = bitcast i8* %overlap2.2.1.i8 to i32*
  196. %overlap2.2.2.i32 = bitcast i8* %overlap2.2.2.i8 to i32*
  197. %overlap2.2.3.i32 = bitcast i8* %overlap2.2.3.i8 to i32*
  198. store i32 1, i32* %overlap2.2.0.i32
  199. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a6]] to i32*
  200. ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
  201. store i8 1, i8* %overlap2.2.1.i8
  202. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
  203. ; CHECK-NEXT: store i8 1, i8* %[[gep]]
  204. store i16 1, i16* %overlap2.2.1.i16
  205. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
  206. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  207. ; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
  208. store i32 1, i32* %overlap2.2.1.i32
  209. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
  210. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
  211. ; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
  212. store i32 3, i32* %overlap2.2.2.i32
  213. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
  214. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
  215. ; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
  216. store i32 4, i32* %overlap2.2.3.i32
  217. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 3
  218. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
  219. ; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
  220. %overlap2.prefix = getelementptr i8, i8* %overlap2.1.1.i8, i64 -4
  221. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i32 1, i1 false)
  222. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 39
  223. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 3
  224. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 3
  225. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
  226. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 5
  227. ; Bridge between the overlapping areas
  228. call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i32 1, i1 false)
  229. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
  230. ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 5
  231. ; ...promoted i8 store...
  232. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
  233. ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 2
  234. ; Entirely within the second overlap.
  235. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i32 1, i1 false)
  236. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
  237. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
  238. ; Trailing past the second overlap.
  239. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i32 1, i1 false)
  240. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
  241. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
  242. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 5
  243. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
  244. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 3
  245. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i32 1, i1 false)
  246. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
  247. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 42
  248. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
  249. ; CHECK-NEXT: store i8 0, i8* %[[gep]]
  250. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
  251. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
  252. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
  253. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 142
  254. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
  255. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
  256. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 158
  257. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
  258. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
  259. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 200
  260. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
  261. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  262. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 207
  263. ; CHECK-NEXT: store i8 42, i8* %[[gep]]
  264. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 208
  265. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
  266. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  267. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 215
  268. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
  269. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
  270. ret void
  271. }
  272. define void @test4(i8* %dst, i8* %src) {
  273. ; CHECK-LABEL: @test4(
  274. entry:
  275. %a = alloca [100 x i8]
  276. ; CHECK-NOT: alloca
  277. ; CHECK: %[[test4_a1:.*]] = alloca [20 x i8]
  278. ; CHECK-NEXT: %[[test4_a2:.*]] = alloca [7 x i8]
  279. ; CHECK-NEXT: %[[test4_a3:.*]] = alloca [10 x i8]
  280. ; CHECK-NEXT: %[[test4_a4:.*]] = alloca [7 x i8]
  281. ; CHECK-NEXT: %[[test4_a5:.*]] = alloca [7 x i8]
  282. ; CHECK-NEXT: %[[test4_a6:.*]] = alloca [40 x i8]
  283. %b = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 0
  284. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i32 1, i1 false)
  285. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
  286. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 20
  287. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 20
  288. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  289. ; CHECK-NEXT: %[[test4_r1:.*]] = load i16, i16* %[[bitcast]]
  290. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 22
  291. ; CHECK-NEXT: %[[test4_r2:.*]] = load i8, i8* %[[gep]]
  292. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 23
  293. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
  294. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  295. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 30
  296. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
  297. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
  298. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 40
  299. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  300. ; CHECK-NEXT: %[[test4_r3:.*]] = load i16, i16* %[[bitcast]]
  301. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
  302. ; CHECK-NEXT: %[[test4_r4:.*]] = load i8, i8* %[[gep]]
  303. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
  304. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
  305. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  306. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 50
  307. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  308. ; CHECK-NEXT: %[[test4_r5:.*]] = load i16, i16* %[[bitcast]]
  309. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 52
  310. ; CHECK-NEXT: %[[test4_r6:.*]] = load i8, i8* %[[gep]]
  311. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 53
  312. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
  313. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  314. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 60
  315. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
  316. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
  317. %a.src.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 20
  318. %a.dst.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 40
  319. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i32 1, i1 false)
  320. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
  321. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
  322. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  323. ; Clobber a single element of the array, this should be promotable, and be deleted.
  324. %c = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 42
  325. store i8 0, i8* %c
  326. %a.src.2 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 50
  327. call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i32 1, i1 false)
  328. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
  329. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
  330. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  331. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i32 1, i1 false)
  332. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
  333. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 20
  334. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 20
  335. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  336. ; CHECK-NEXT: store i16 %[[test4_r1]], i16* %[[bitcast]]
  337. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 22
  338. ; CHECK-NEXT: store i8 %[[test4_r2]], i8* %[[gep]]
  339. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 23
  340. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
  341. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  342. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 30
  343. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
  344. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
  345. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 40
  346. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  347. ; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
  348. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
  349. ; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
  350. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
  351. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
  352. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  353. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 50
  354. ; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
  355. ; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
  356. ; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 52
  357. ; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
  358. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 53
  359. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
  360. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
  361. ; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 60
  362. ; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
  363. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
  364. ret void
  365. }
  366. declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
  367. declare void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* nocapture, i8* nocapture, i32, i32, i1) nounwind
  368. declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind
  369. declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
  370. define i16 @test5() {
  371. ; CHECK-LABEL: @test5(
  372. ; CHECK-NOT: alloca float
  373. ; CHECK: %[[cast:.*]] = bitcast float 0.0{{.*}} to i32
  374. ; CHECK-NEXT: %[[shr:.*]] = lshr i32 %[[cast]], 16
  375. ; CHECK-NEXT: %[[trunc:.*]] = trunc i32 %[[shr]] to i16
  376. ; CHECK-NEXT: ret i16 %[[trunc]]
  377. entry:
  378. %a = alloca [4 x i8]
  379. %fptr = bitcast [4 x i8]* %a to float*
  380. store float 0.0, float* %fptr
  381. %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
  382. %iptr = bitcast i8* %ptr to i16*
  383. %val = load i16, i16* %iptr
  384. ret i16 %val
  385. }
  386. define i32 @test6() {
  387. ; CHECK-LABEL: @test6(
  388. ; CHECK: alloca i32
  389. ; CHECK-NEXT: store volatile i32
  390. ; CHECK-NEXT: load i32, i32*
  391. ; CHECK-NEXT: ret i32
  392. entry:
  393. %a = alloca [4 x i8]
  394. %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
  395. call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i32 1, i1 true)
  396. %iptr = bitcast i8* %ptr to i32*
  397. %val = load i32, i32* %iptr
  398. ret i32 %val
  399. }
  400. define void @test7(i8* %src, i8* %dst) {
  401. ; CHECK-LABEL: @test7(
  402. ; CHECK: alloca i32
  403. ; CHECK-NEXT: bitcast i8* %src to i32*
  404. ; CHECK-NEXT: load volatile i32, i32*
  405. ; CHECK-NEXT: store volatile i32
  406. ; CHECK-NEXT: bitcast i8* %dst to i32*
  407. ; CHECK-NEXT: load volatile i32, i32*
  408. ; CHECK-NEXT: store volatile i32
  409. ; CHECK-NEXT: ret
  410. entry:
  411. %a = alloca [4 x i8]
  412. %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
  413. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
  414. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
  415. ret void
  416. }
  417. %S1 = type { i32, i32, [16 x i8] }
  418. %S2 = type { %S1*, %S2* }
  419. define %S2 @test8(%S2* %s2) {
  420. ; CHECK-LABEL: @test8(
  421. entry:
  422. %new = alloca %S2
  423. ; CHECK-NOT: alloca
  424. %s2.next.ptr = getelementptr %S2, %S2* %s2, i64 0, i32 1
  425. %s2.next = load %S2*, %S2** %s2.next.ptr
  426. ; CHECK: %[[gep:.*]] = getelementptr %S2, %S2* %s2, i64 0, i32 1
  427. ; CHECK-NEXT: %[[next:.*]] = load %S2*, %S2** %[[gep]]
  428. %s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
  429. %s2.next.s1 = load %S1*, %S1** %s2.next.s1.ptr
  430. %new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
  431. store %S1* %s2.next.s1, %S1** %new.s1.ptr
  432. %s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
  433. %s2.next.next = load %S2*, %S2** %s2.next.next.ptr
  434. %new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
  435. store %S2* %s2.next.next, %S2** %new.next.ptr
  436. ; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 0
  437. ; CHECK-NEXT: %[[next_s1:.*]] = load %S1*, %S1** %[[gep]]
  438. ; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 1
  439. ; CHECK-NEXT: %[[next_next:.*]] = load %S2*, %S2** %[[gep]]
  440. %new.s1 = load %S1*, %S1** %new.s1.ptr
  441. %result1 = insertvalue %S2 undef, %S1* %new.s1, 0
  442. ; CHECK-NEXT: %[[result1:.*]] = insertvalue %S2 undef, %S1* %[[next_s1]], 0
  443. %new.next = load %S2*, %S2** %new.next.ptr
  444. %result2 = insertvalue %S2 %result1, %S2* %new.next, 1
  445. ; CHECK-NEXT: %[[result2:.*]] = insertvalue %S2 %[[result1]], %S2* %[[next_next]], 1
  446. ret %S2 %result2
  447. ; CHECK-NEXT: ret %S2 %[[result2]]
  448. }
  449. define i64 @test9() {
  450. ; Ensure we can handle loads off the end of an alloca even when wrapped in
  451. ; weird bit casts and types. This is valid IR due to the alignment and masking
  452. ; off the bits past the end of the alloca.
  453. ;
  454. ; CHECK-LABEL: @test9(
  455. ; CHECK-NOT: alloca
  456. ; CHECK: %[[b2:.*]] = zext i8 26 to i64
  457. ; CHECK-NEXT: %[[s2:.*]] = shl i64 %[[b2]], 16
  458. ; CHECK-NEXT: %[[m2:.*]] = and i64 undef, -16711681
  459. ; CHECK-NEXT: %[[i2:.*]] = or i64 %[[m2]], %[[s2]]
  460. ; CHECK-NEXT: %[[b1:.*]] = zext i8 0 to i64
  461. ; CHECK-NEXT: %[[s1:.*]] = shl i64 %[[b1]], 8
  462. ; CHECK-NEXT: %[[m1:.*]] = and i64 %[[i2]], -65281
  463. ; CHECK-NEXT: %[[i1:.*]] = or i64 %[[m1]], %[[s1]]
  464. ; CHECK-NEXT: %[[b0:.*]] = zext i8 0 to i64
  465. ; CHECK-NEXT: %[[m0:.*]] = and i64 %[[i1]], -256
  466. ; CHECK-NEXT: %[[i0:.*]] = or i64 %[[m0]], %[[b0]]
  467. ; CHECK-NEXT: %[[result:.*]] = and i64 %[[i0]], 16777215
  468. ; CHECK-NEXT: ret i64 %[[result]]
  469. entry:
  470. %a = alloca { [3 x i8] }, align 8
  471. %gep1 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 0
  472. store i8 0, i8* %gep1, align 1
  473. %gep2 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 1
  474. store i8 0, i8* %gep2, align 1
  475. %gep3 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 2
  476. store i8 26, i8* %gep3, align 1
  477. %cast = bitcast { [3 x i8] }* %a to { i64 }*
  478. %elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
  479. %load = load i64, i64* %elt
  480. %result = and i64 %load, 16777215
  481. ret i64 %result
  482. }
  483. define %S2* @test10() {
  484. ; CHECK-LABEL: @test10(
  485. ; CHECK-NOT: alloca %S2*
  486. ; CHECK: ret %S2* null
  487. entry:
  488. %a = alloca [8 x i8]
  489. %ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
  490. call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i32 1, i1 false)
  491. %s2ptrptr = bitcast i8* %ptr to %S2**
  492. %s2ptr = load %S2*, %S2** %s2ptrptr
  493. ret %S2* %s2ptr
  494. }
  495. define i32 @test11() {
  496. ; CHECK-LABEL: @test11(
  497. ; CHECK-NOT: alloca
  498. ; CHECK: ret i32 0
  499. entry:
  500. %X = alloca i32
  501. br i1 undef, label %good, label %bad
  502. good:
  503. %Y = getelementptr i32, i32* %X, i64 0
  504. store i32 0, i32* %Y
  505. %Z = load i32, i32* %Y
  506. ret i32 %Z
  507. bad:
  508. %Y2 = getelementptr i32, i32* %X, i64 1
  509. store i32 0, i32* %Y2
  510. %Z2 = load i32, i32* %Y2
  511. ret i32 %Z2
  512. }
  513. define i8 @test12() {
  514. ; We fully promote these to the i24 load or store size, resulting in just masks
  515. ; and other operations that instcombine will fold, but no alloca.
  516. ;
  517. ; CHECK-LABEL: @test12(
  518. entry:
  519. %a = alloca [3 x i8]
  520. %b = alloca [3 x i8]
  521. ; CHECK-NOT: alloca
  522. %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
  523. store i8 0, i8* %a0ptr
  524. %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
  525. store i8 0, i8* %a1ptr
  526. %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
  527. store i8 0, i8* %a2ptr
  528. %aiptr = bitcast [3 x i8]* %a to i24*
  529. %ai = load i24, i24* %aiptr
  530. ; CHECK-NOT: store
  531. ; CHECK-NOT: load
  532. ; CHECK: %[[ext2:.*]] = zext i8 0 to i24
  533. ; CHECK-NEXT: %[[shift2:.*]] = shl i24 %[[ext2]], 16
  534. ; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, 65535
  535. ; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[shift2]]
  536. ; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
  537. ; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
  538. ; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
  539. ; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
  540. ; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
  541. ; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], -256
  542. ; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[ext0]]
  543. %biptr = bitcast [3 x i8]* %b to i24*
  544. store i24 %ai, i24* %biptr
  545. %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
  546. %b0 = load i8, i8* %b0ptr
  547. %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
  548. %b1 = load i8, i8* %b1ptr
  549. %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
  550. %b2 = load i8, i8* %b2ptr
  551. ; CHECK-NOT: store
  552. ; CHECK-NOT: load
  553. ; CHECK: %[[trunc0:.*]] = trunc i24 %[[insert0]] to i8
  554. ; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
  555. ; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
  556. ; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[insert0]], 16
  557. ; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[shift2]] to i8
  558. %bsum0 = add i8 %b0, %b1
  559. %bsum1 = add i8 %bsum0, %b2
  560. ret i8 %bsum1
  561. ; CHECK: %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]]
  562. ; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]]
  563. ; CHECK-NEXT: ret i8 %[[sum1]]
  564. }
  565. define i32 @test13() {
  566. ; Ensure we don't crash and handle undefined loads that straddle the end of the
  567. ; allocation.
  568. ; CHECK-LABEL: @test13(
  569. ; CHECK: %[[value:.*]] = zext i8 0 to i16
  570. ; CHECK-NEXT: %[[ret:.*]] = zext i16 %[[value]] to i32
  571. ; CHECK-NEXT: ret i32 %[[ret]]
  572. entry:
  573. %a = alloca [3 x i8], align 2
  574. %b0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
  575. store i8 0, i8* %b0ptr
  576. %b1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
  577. store i8 0, i8* %b1ptr
  578. %b2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
  579. store i8 0, i8* %b2ptr
  580. %iptrcast = bitcast [3 x i8]* %a to i16*
  581. %iptrgep = getelementptr i16, i16* %iptrcast, i64 1
  582. %i = load i16, i16* %iptrgep
  583. %ret = zext i16 %i to i32
  584. ret i32 %ret
  585. }
  586. %test14.struct = type { [3 x i32] }
  587. define void @test14(...) nounwind uwtable {
  588. ; This is a strange case where we split allocas into promotable partitions, but
  589. ; also gain enough data to prove they must be dead allocas due to GEPs that walk
  590. ; across two adjacent allocas. Test that we don't try to promote or otherwise
  591. ; do bad things to these dead allocas, they should just be removed.
  592. ; CHECK-LABEL: @test14(
  593. ; CHECK-NEXT: entry:
  594. ; CHECK-NEXT: ret void
  595. entry:
  596. %a = alloca %test14.struct
  597. %p = alloca %test14.struct*
  598. %0 = bitcast %test14.struct* %a to i8*
  599. %1 = getelementptr i8, i8* %0, i64 12
  600. %2 = bitcast i8* %1 to %test14.struct*
  601. %3 = getelementptr inbounds %test14.struct, %test14.struct* %2, i32 0, i32 0
  602. %4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
  603. %5 = bitcast [3 x i32]* %3 to i32*
  604. %6 = bitcast [3 x i32]* %4 to i32*
  605. %7 = load i32, i32* %6, align 4
  606. store i32 %7, i32* %5, align 4
  607. %8 = getelementptr inbounds i32, i32* %5, i32 1
  608. %9 = getelementptr inbounds i32, i32* %6, i32 1
  609. %10 = load i32, i32* %9, align 4
  610. store i32 %10, i32* %8, align 4
  611. %11 = getelementptr inbounds i32, i32* %5, i32 2
  612. %12 = getelementptr inbounds i32, i32* %6, i32 2
  613. %13 = load i32, i32* %12, align 4
  614. store i32 %13, i32* %11, align 4
  615. ret void
  616. }
  617. define i32 @test15(i1 %flag) nounwind uwtable {
  618. ; Ensure that when there are dead instructions using an alloca that are not
  619. ; loads or stores we still delete them during partitioning and rewriting.
  620. ; Otherwise we'll go to promote them while thy still have unpromotable uses.
  621. ; CHECK-LABEL: @test15(
  622. ; CHECK-NEXT: entry:
  623. ; CHECK-NEXT: br label %loop
  624. ; CHECK: loop:
  625. ; CHECK-NEXT: br label %loop
  626. entry:
  627. %l0 = alloca i64
  628. %l1 = alloca i64
  629. %l2 = alloca i64
  630. %l3 = alloca i64
  631. br label %loop
  632. loop:
  633. %dead3 = phi i8* [ %gep3, %loop ], [ null, %entry ]
  634. store i64 1879048192, i64* %l0, align 8
  635. %bc0 = bitcast i64* %l0 to i8*
  636. %gep0 = getelementptr i8, i8* %bc0, i64 3
  637. %dead0 = bitcast i8* %gep0 to i64*
  638. store i64 1879048192, i64* %l1, align 8
  639. %bc1 = bitcast i64* %l1 to i8*
  640. %gep1 = getelementptr i8, i8* %bc1, i64 3
  641. %dead1 = getelementptr i8, i8* %gep1, i64 1
  642. store i64 1879048192, i64* %l2, align 8
  643. %bc2 = bitcast i64* %l2 to i8*
  644. %gep2.1 = getelementptr i8, i8* %bc2, i64 1
  645. %gep2.2 = getelementptr i8, i8* %bc2, i64 3
  646. ; Note that this select should get visited multiple times due to using two
  647. ; different GEPs off the same alloca. We should only delete it once.
  648. %dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
  649. store i64 1879048192, i64* %l3, align 8
  650. %bc3 = bitcast i64* %l3 to i8*
  651. %gep3 = getelementptr i8, i8* %bc3, i64 3
  652. br label %loop
  653. }
  654. define void @test16(i8* %src, i8* %dst) {
  655. ; Ensure that we can promote an alloca of [3 x i8] to an i24 SSA value.
  656. ; CHECK-LABEL: @test16(
  657. ; CHECK-NOT: alloca
  658. ; CHECK: %[[srccast:.*]] = bitcast i8* %src to i24*
  659. ; CHECK-NEXT: load i24, i24* %[[srccast]]
  660. ; CHECK-NEXT: %[[dstcast:.*]] = bitcast i8* %dst to i24*
  661. ; CHECK-NEXT: store i24 0, i24* %[[dstcast]]
  662. ; CHECK-NEXT: ret void
  663. entry:
  664. %a = alloca [3 x i8]
  665. %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
  666. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 false)
  667. %cast = bitcast i8* %ptr to i24*
  668. store i24 0, i24* %cast
  669. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 false)
  670. ret void
  671. }
  672. define void @test17(i8* %src, i8* %dst) {
  673. ; Ensure that we can rewrite unpromotable memcpys which extend past the end of
  674. ; the alloca.
  675. ; CHECK-LABEL: @test17(
  676. ; CHECK: %[[a:.*]] = alloca [3 x i8]
  677. ; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8], [3 x i8]* %[[a]], i32 0, i32 0
  678. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[ptr]], i8* %src,
  679. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[ptr]],
  680. ; CHECK-NEXT: ret void
  681. entry:
  682. %a = alloca [3 x i8]
  683. %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
  684. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
  685. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
  686. ret void
  687. }
  688. define void @test18(i8* %src, i8* %dst, i32 %size) {
  689. ; Preserve transfer instrinsics with a variable size, even if they overlap with
  690. ; fixed size operations. Further, continue to split and promote allocas preceding
  691. ; the variable sized intrinsic.
  692. ; CHECK-LABEL: @test18(
  693. ; CHECK: %[[a:.*]] = alloca [34 x i8]
  694. ; CHECK: %[[srcgep1:.*]] = getelementptr inbounds i8, i8* %src, i64 4
  695. ; CHECK-NEXT: %[[srccast1:.*]] = bitcast i8* %[[srcgep1]] to i32*
  696. ; CHECK-NEXT: %[[srcload:.*]] = load i32, i32* %[[srccast1]]
  697. ; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
  698. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[agep1]], i8* %src, i32 %size,
  699. ; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
  700. ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[agep2]], i8 42, i32 %size,
  701. ; CHECK-NEXT: %[[dstcast1:.*]] = bitcast i8* %dst to i32*
  702. ; CHECK-NEXT: store i32 42, i32* %[[dstcast1]]
  703. ; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8, i8* %dst, i64 4
  704. ; CHECK-NEXT: %[[dstcast2:.*]] = bitcast i8* %[[dstgep1]] to i32*
  705. ; CHECK-NEXT: store i32 %[[srcload]], i32* %[[dstcast2]]
  706. ; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
  707. ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[agep3]], i32 %size,
  708. ; CHECK-NEXT: ret void
  709. entry:
  710. %a = alloca [42 x i8]
  711. %ptr = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 0
  712. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 false)
  713. %ptr2 = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 8
  714. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i32 1, i1 false)
  715. call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i32 1, i1 false)
  716. %cast = bitcast i8* %ptr to i32*
  717. store i32 42, i32* %cast
  718. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i32 1, i1 false)
  719. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr2, i32 %size, i32 1, i1 false)
  720. ret void
  721. }
  722. %opaque = type opaque
  723. define i32 @test19(%opaque* %x) {
  724. ; This input will cause us to try to compute a natural GEP when rewriting
  725. ; pointers in such a way that we try to GEP through the opaque type. Previously,
  726. ; a check for an unsized type was missing and this crashed. Ensure it behaves
  727. ; reasonably now.
  728. ; CHECK-LABEL: @test19(
  729. ; CHECK-NOT: alloca
  730. ; CHECK: ret i32 undef
  731. entry:
  732. %a = alloca { i64, i8* }
  733. %cast1 = bitcast %opaque* %x to i8*
  734. %cast2 = bitcast { i64, i8* }* %a to i8*
  735. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i32 1, i1 false)
  736. %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
  737. %val = load i64, i64* %gep
  738. ret i32 undef
  739. }
  740. define i32 @test20() {
  741. ; Ensure we can track negative offsets (before the beginning of the alloca) and
  742. ; negative relative offsets from offsets starting past the end of the alloca.
  743. ; CHECK-LABEL: @test20(
  744. ; CHECK-NOT: alloca
  745. ; CHECK: %[[sum1:.*]] = add i32 1, 2
  746. ; CHECK: %[[sum2:.*]] = add i32 %[[sum1]], 3
  747. ; CHECK: ret i32 %[[sum2]]
  748. entry:
  749. %a = alloca [3 x i32]
  750. %gep1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 0
  751. store i32 1, i32* %gep1
  752. %gep2.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 -2
  753. %gep2.2 = getelementptr i32, i32* %gep2.1, i32 3
  754. store i32 2, i32* %gep2.2
  755. %gep3.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 14
  756. %gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
  757. store i32 3, i32* %gep3.2
  758. %load1 = load i32, i32* %gep1
  759. %load2 = load i32, i32* %gep2.2
  760. %load3 = load i32, i32* %gep3.2
  761. %sum1 = add i32 %load1, %load2
  762. %sum2 = add i32 %sum1, %load3
  763. ret i32 %sum2
  764. }
  765. declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
  766. define i8 @test21() {
  767. ; Test allocations and offsets which border on overflow of the int64_t used
  768. ; internally. This is really awkward to really test as LLVM doesn't really
  769. ; support such extreme constructs cleanly.
  770. ; CHECK-LABEL: @test21(
  771. ; CHECK-NOT: alloca
  772. ; CHECK: or i8 -1, -1
  773. entry:
  774. %a = alloca [2305843009213693951 x i8]
  775. %gep0 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
  776. store i8 255, i8* %gep0
  777. %gep1 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
  778. %gep2 = getelementptr i8, i8* %gep1, i64 -1
  779. call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i32 1, i1 false)
  780. %gep3 = getelementptr i8, i8* %gep1, i64 9223372036854775807
  781. %gep4 = getelementptr i8, i8* %gep3, i64 9223372036854775807
  782. %gep5 = getelementptr i8, i8* %gep4, i64 -6917529027641081857
  783. store i8 255, i8* %gep5
  784. %cast1 = bitcast i8* %gep4 to i32*
  785. store i32 0, i32* %cast1
  786. %load = load i8, i8* %gep0
  787. %gep6 = getelementptr i8, i8* %gep0, i32 1
  788. %load2 = load i8, i8* %gep6
  789. %result = or i8 %load, %load2
  790. ret i8 %result
  791. }
  792. %PR13916.struct = type { i8 }
  793. define void @PR13916.1() {
  794. ; Ensure that we handle overlapping memcpy intrinsics correctly, especially in
  795. ; the case where there is a directly identical value for both source and dest.
  796. ; CHECK: @PR13916.1
  797. ; CHECK-NOT: alloca
  798. ; CHECK: ret void
  799. entry:
  800. %a = alloca i8
  801. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 1, i32 1, i1 false)
  802. %tmp2 = load i8, i8* %a
  803. ret void
  804. }
  805. define void @PR13916.2() {
  806. ; Check whether we continue to handle them correctly when they start off with
  807. ; different pointer value chains, but during rewriting we coalesce them into the
  808. ; same value.
  809. ; CHECK: @PR13916.2
  810. ; CHECK-NOT: alloca
  811. ; CHECK: ret void
  812. entry:
  813. %a = alloca %PR13916.struct, align 1
  814. br i1 undef, label %if.then, label %if.end
  815. if.then:
  816. %tmp0 = bitcast %PR13916.struct* %a to i8*
  817. %tmp1 = bitcast %PR13916.struct* %a to i8*
  818. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp0, i8* %tmp1, i32 1, i32 1, i1 false)
  819. br label %if.end
  820. if.end:
  821. %gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
  822. %tmp2 = load i8, i8* %gep
  823. ret void
  824. }
  825. define void @PR13990() {
  826. ; Ensure we can handle cases where processing one alloca causes the other
  827. ; alloca to become dead and get deleted. This might crash or fail under
  828. ; Valgrind if we regress.
  829. ; CHECK-LABEL: @PR13990(
  830. ; CHECK-NOT: alloca
  831. ; CHECK: unreachable
  832. ; CHECK: unreachable
  833. entry:
  834. %tmp1 = alloca i8*
  835. %tmp2 = alloca i8*
  836. br i1 undef, label %bb1, label %bb2
  837. bb1:
  838. store i8* undef, i8** %tmp2
  839. br i1 undef, label %bb2, label %bb3
  840. bb2:
  841. %tmp50 = select i1 undef, i8** %tmp2, i8** %tmp1
  842. br i1 undef, label %bb3, label %bb4
  843. bb3:
  844. unreachable
  845. bb4:
  846. unreachable
  847. }
  848. define double @PR13969(double %x) {
  849. ; Check that we detect when promotion will un-escape an alloca and iterate to
  850. ; re-try running SROA over that alloca. Without that, the two allocas that are
  851. ; stored into a dead alloca don't get rewritten and promoted.
  852. ; CHECK-LABEL: @PR13969(
  853. entry:
  854. %a = alloca double
  855. %b = alloca double*
  856. %c = alloca double
  857. ; CHECK-NOT: alloca
  858. store double %x, double* %a
  859. store double* %c, double** %b
  860. store double* %a, double** %b
  861. store double %x, double* %c
  862. %ret = load double, double* %a
  863. ; CHECK-NOT: store
  864. ; CHECK-NOT: load
  865. ret double %ret
  866. ; CHECK: ret double %x
  867. }
  868. %PR14034.struct = type { { {} }, i32, %PR14034.list }
  869. %PR14034.list = type { %PR14034.list*, %PR14034.list* }
  870. define void @PR14034() {
  871. ; This test case tries to form GEPs into the empty leading struct members, and
  872. ; subsequently crashed (under valgrind) before we fixed the PR. The important
  873. ; thing is to handle empty structs gracefully.
  874. ; CHECK-LABEL: @PR14034(
  875. entry:
  876. %a = alloca %PR14034.struct
  877. %list = getelementptr %PR14034.struct, %PR14034.struct* %a, i32 0, i32 2
  878. %prev = getelementptr %PR14034.list, %PR14034.list* %list, i32 0, i32 1
  879. store %PR14034.list* undef, %PR14034.list** %prev
  880. %cast0 = bitcast %PR14034.struct* undef to i8*
  881. %cast1 = bitcast %PR14034.struct* %a to i8*
  882. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast0, i8* %cast1, i32 12, i32 0, i1 false)
  883. ret void
  884. }
  885. define i32 @test22(i32 %x) {
  886. ; Test that SROA and promotion is not confused by a grab bax mixture of pointer
  887. ; types involving wrapper aggregates and zero-length aggregate members.
  888. ; CHECK-LABEL: @test22(
  889. entry:
  890. %a1 = alloca { { [1 x { i32 }] } }
  891. %a2 = alloca { {}, { float }, [0 x i8] }
  892. %a3 = alloca { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }
  893. ; CHECK-NOT: alloca
  894. %wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
  895. %gep1 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
  896. store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
  897. %gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
  898. %ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
  899. %load1 = load { [1 x { float }] }, { [1 x { float }] }* %ptrcast1
  900. %unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
  901. %wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
  902. store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
  903. %gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
  904. %ptrcast2 = bitcast float* %gep3 to <4 x i8>*
  905. %load3 = load <4 x i8>, <4 x i8>* %ptrcast2
  906. %valcast1 = bitcast <4 x i8> %load3 to i32
  907. %wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
  908. %wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
  909. %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
  910. %ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
  911. store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
  912. %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
  913. %ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
  914. %load4 = load { {}, float, {} }, { {}, float, {} }* %ptrcast4
  915. %unwrap2 = extractvalue { {}, float, {} } %load4, 1
  916. %valcast2 = bitcast float %unwrap2 to i32
  917. ret i32 %valcast2
  918. ; CHECK: ret i32
  919. }
  920. define void @PR14059.1(double* %d) {
  921. ; In PR14059 a peculiar construct was identified as something that is used
  922. ; pervasively in ARM's ABI-calling-convention lowering: the passing of a struct
  923. ; of doubles via an array of i32 in order to place the data into integer
  924. ; registers. This in turn was missed as an optimization by SROA due to the
  925. ; partial loads and stores of integers to the double alloca we were trying to
  926. ; form and promote. The solution is to widen the integer operations to be
  927. ; whole-alloca operations, and perform the appropriate bitcasting on the
  928. ; *values* rather than the pointers. When this works, partial reads and writes
  929. ; via integers can be promoted away.
  930. ; CHECK: @PR14059.1
  931. ; CHECK-NOT: alloca
  932. ; CHECK: ret void
  933. entry:
  934. %X.sroa.0.i = alloca double, align 8
  935. %0 = bitcast double* %X.sroa.0.i to i8*
  936. call void @llvm.lifetime.start(i64 -1, i8* %0)
  937. ; Store to the low 32-bits...
  938. %X.sroa.0.0.cast2.i = bitcast double* %X.sroa.0.i to i32*
  939. store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
  940. ; Also use a memset to the middle 32-bits for fun.
  941. %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8, i8* %0, i32 2
  942. call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i32 1, i1 false)
  943. ; Or a memset of the whole thing.
  944. call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 1, i1 false)
  945. ; Write to the high 32-bits with a memcpy.
  946. %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8, i8* %0, i32 4
  947. %d.raw = bitcast double* %d to i8*
  948. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i32 1, i1 false)
  949. ; Store to the high 32-bits...
  950. %X.sroa.0.4.cast5.i = bitcast i8* %X.sroa.0.4.raw_idx4.i to i32*
  951. store i32 1072693248, i32* %X.sroa.0.4.cast5.i, align 4
  952. ; Do the actual math...
  953. %X.sroa.0.0.load1.i = load double, double* %X.sroa.0.i, align 8
  954. %accum.real.i = load double, double* %d, align 8
  955. %add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
  956. store double %add.r.i, double* %d, align 8
  957. call void @llvm.lifetime.end(i64 -1, i8* %0)
  958. ret void
  959. }
  960. define i64 @PR14059.2({ float, float }* %phi) {
  961. ; Check that SROA can split up alloca-wide integer loads and stores where the
  962. ; underlying alloca has smaller components that are accessed independently. This
  963. ; shows up particularly with ABI lowering patterns coming out of Clang that rely
  964. ; on the particular register placement of a single large integer return value.
  965. ; CHECK: @PR14059.2
  966. entry:
  967. %retval = alloca { float, float }, align 4
  968. ; CHECK-NOT: alloca
  969. %0 = bitcast { float, float }* %retval to i64*
  970. store i64 0, i64* %0
  971. ; CHECK-NOT: store
  972. %phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
  973. %phi.real = load float, float* %phi.realp
  974. %phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
  975. %phi.imag = load float, float* %phi.imagp
  976. ; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
  977. ; CHECK-NEXT: %[[real:.*]] = load float, float* %[[realp]]
  978. ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
  979. ; CHECK-NEXT: %[[imag:.*]] = load float, float* %[[imagp]]
  980. %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
  981. %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
  982. store float %phi.real, float* %real
  983. store float %phi.imag, float* %imag
  984. ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
  985. ; CHECK-NEXT: %[[imag_convert:.*]] = bitcast float %[[imag]] to i32
  986. ; CHECK-NEXT: %[[imag_ext:.*]] = zext i32 %[[imag_convert]] to i64
  987. ; CHECK-NEXT: %[[imag_shift:.*]] = shl i64 %[[imag_ext]], 32
  988. ; CHECK-NEXT: %[[imag_mask:.*]] = and i64 undef, 4294967295
  989. ; CHECK-NEXT: %[[imag_insert:.*]] = or i64 %[[imag_mask]], %[[imag_shift]]
  990. ; CHECK-NEXT: %[[real_ext:.*]] = zext i32 %[[real_convert]] to i64
  991. ; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296
  992. ; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]]
  993. %1 = load i64, i64* %0, align 1
  994. ret i64 %1
  995. ; CHECK-NEXT: ret i64 %[[real_insert]]
  996. }
  997. define void @PR14105({ [16 x i8] }* %ptr) {
  998. ; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is
  999. ; sign as negative. We use a volatile memcpy to ensure promotion never actually
  1000. ; occurs.
  1001. ; CHECK-LABEL: @PR14105(
  1002. entry:
  1003. %a = alloca { [16 x i8] }, align 8
  1004. ; CHECK: alloca [16 x i8], align 8
  1005. %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1
  1006. ; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
  1007. %cast1 = bitcast { [16 x i8 ] }* %gep to i8*
  1008. %cast2 = bitcast { [16 x i8 ] }* %a to i8*
  1009. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast2, i32 16, i32 8, i1 true)
  1010. ret void
  1011. ; CHECK: ret
  1012. }
  1013. define void @PR14105_as1({ [16 x i8] } addrspace(1)* %ptr) {
  1014. ; Make sure this the right address space pointer is used for type check.
  1015. ; CHECK-LABEL: @PR14105_as1(
  1016. entry:
  1017. %a = alloca { [16 x i8] }, align 8
  1018. ; CHECK: alloca [16 x i8], align 8
  1019. %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
  1020. ; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i16 -1, i32 0, i16 0
  1021. %cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
  1022. %cast2 = bitcast { [16 x i8 ] }* %a to i8*
  1023. call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* %cast1, i8* %cast2, i32 16, i32 8, i1 true)
  1024. ret void
  1025. ; CHECK: ret
  1026. }
  1027. define void @PR14465() {
  1028. ; Ensure that we don't crash when analyzing a alloca larger than the maximum
  1029. ; integer type width (MAX_INT_BITS) supported by llvm (1048576*32 > (1<<23)-1).
  1030. ; CHECK-LABEL: @PR14465(
  1031. %stack = alloca [1048576 x i32], align 16
  1032. ; CHECK: alloca [1048576 x i32]
  1033. %cast = bitcast [1048576 x i32]* %stack to i8*
  1034. call void @llvm.memset.p0i8.i64(i8* %cast, i8 -2, i64 4194304, i32 16, i1 false)
  1035. ret void
  1036. ; CHECK: ret
  1037. }
  1038. define void @PR14548(i1 %x) {
  1039. ; Handle a mixture of i1 and i8 loads and stores to allocas. This particular
  1040. ; pattern caused crashes and invalid output in the PR, and its nature will
  1041. ; trigger a mixture in several permutations as we resolve each alloca
  1042. ; iteratively.
  1043. ; Note that we don't do a particularly good *job* of handling these mixtures,
  1044. ; but the hope is that this is very rare.
  1045. ; CHECK-LABEL: @PR14548(
  1046. entry:
  1047. %a = alloca <{ i1 }>, align 8
  1048. %b = alloca <{ i1 }>, align 8
  1049. ; CHECK: %[[a:.*]] = alloca i8, align 8
  1050. ; CHECK-NEXT: %[[b:.*]] = alloca i8, align 8
  1051. %b.i1 = bitcast <{ i1 }>* %b to i1*
  1052. store i1 %x, i1* %b.i1, align 8
  1053. %b.i8 = bitcast <{ i1 }>* %b to i8*
  1054. %foo = load i8, i8* %b.i8, align 1
  1055. ; CHECK-NEXT: %[[b_cast:.*]] = bitcast i8* %[[b]] to i1*
  1056. ; CHECK-NEXT: store i1 %x, i1* %[[b_cast]], align 8
  1057. ; CHECK-NEXT: {{.*}} = load i8, i8* %[[b]], align 8
  1058. %a.i8 = bitcast <{ i1 }>* %a to i8*
  1059. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i32 1, i1 false) nounwind
  1060. %bar = load i8, i8* %a.i8, align 1
  1061. %a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
  1062. %baz = load i1, i1* %a.i1, align 1
  1063. ; CHECK-NEXT: %[[copy:.*]] = load i8, i8* %[[b]], align 8
  1064. ; CHECK-NEXT: store i8 %[[copy]], i8* %[[a]], align 8
  1065. ; CHECK-NEXT: {{.*}} = load i8, i8* %[[a]], align 8
  1066. ; CHECK-NEXT: %[[a_cast:.*]] = bitcast i8* %[[a]] to i1*
  1067. ; CHECK-NEXT: {{.*}} = load i1, i1* %[[a_cast]], align 8
  1068. ret void
  1069. }
  1070. define <3 x i8> @PR14572.1(i32 %x) {
  1071. ; Ensure that a split integer store which is wider than the type size of the
  1072. ; alloca (relying on the alloc size padding) doesn't trigger an assert.
  1073. ; CHECK: @PR14572.1
  1074. entry:
  1075. %a = alloca <3 x i8>, align 4
  1076. ; CHECK-NOT: alloca
  1077. %cast = bitcast <3 x i8>* %a to i32*
  1078. store i32 %x, i32* %cast, align 1
  1079. %y = load <3 x i8>, <3 x i8>* %a, align 4
  1080. ret <3 x i8> %y
  1081. ; CHECK: ret <3 x i8>
  1082. }
  1083. define i32 @PR14572.2(<3 x i8> %x) {
  1084. ; Ensure that a split integer load which is wider than the type size of the
  1085. ; alloca (relying on the alloc size padding) doesn't trigger an assert.
  1086. ; CHECK: @PR14572.2
  1087. entry:
  1088. %a = alloca <3 x i8>, align 4
  1089. ; CHECK-NOT: alloca
  1090. store <3 x i8> %x, <3 x i8>* %a, align 1
  1091. %cast = bitcast <3 x i8>* %a to i32*
  1092. %y = load i32, i32* %cast, align 4
  1093. ret i32 %y
  1094. ; CHECK: ret i32
  1095. }
  1096. define i32 @PR14601(i32 %x) {
  1097. ; Don't try to form a promotable integer alloca when there is a variable length
  1098. ; memory intrinsic.
  1099. ; CHECK-LABEL: @PR14601(
  1100. entry:
  1101. %a = alloca i32
  1102. ; CHECK: alloca
  1103. %a.i8 = bitcast i32* %a to i8*
  1104. call void @llvm.memset.p0i8.i32(i8* %a.i8, i8 0, i32 %x, i32 1, i1 false)
  1105. %v = load i32, i32* %a
  1106. ret i32 %v
  1107. }
  1108. define void @PR15674(i8* %data, i8* %src, i32 %size) {
  1109. ; Arrange (via control flow) to have unmerged stores of a particular width to
  1110. ; an alloca where we incrementally store from the end of the array toward the
  1111. ; beginning of the array. Ensure that the final integer store, despite being
  1112. ; convertable to the integer type that we end up promoting this alloca toward,
  1113. ; doesn't get widened to a full alloca store.
  1114. ; CHECK-LABEL: @PR15674(
  1115. entry:
  1116. %tmp = alloca [4 x i8], align 1
  1117. ; CHECK: alloca i32
  1118. switch i32 %size, label %end [
  1119. i32 4, label %bb4
  1120. i32 3, label %bb3
  1121. i32 2, label %bb2
  1122. i32 1, label %bb1
  1123. ]
  1124. bb4:
  1125. %src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
  1126. %src.3 = load i8, i8* %src.gep3
  1127. %tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
  1128. store i8 %src.3, i8* %tmp.gep3
  1129. ; CHECK: store i8
  1130. br label %bb3
  1131. bb3:
  1132. %src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
  1133. %src.2 = load i8, i8* %src.gep2
  1134. %tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
  1135. store i8 %src.2, i8* %tmp.gep2
  1136. ; CHECK: store i8
  1137. br label %bb2
  1138. bb2:
  1139. %src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
  1140. %src.1 = load i8, i8* %src.gep1
  1141. %tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
  1142. store i8 %src.1, i8* %tmp.gep1
  1143. ; CHECK: store i8
  1144. br label %bb1
  1145. bb1:
  1146. %src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
  1147. %src.0 = load i8, i8* %src.gep0
  1148. %tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
  1149. store i8 %src.0, i8* %tmp.gep0
  1150. ; CHECK: store i8
  1151. br label %end
  1152. end:
  1153. %tmp.raw = bitcast [4 x i8]* %tmp to i8*
  1154. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %data, i8* %tmp.raw, i32 %size, i32 1, i1 false)
  1155. ret void
  1156. ; CHECK: ret void
  1157. }
  1158. define void @PR15805(i1 %a, i1 %b) {
  1159. ; CHECK-LABEL: @PR15805(
  1160. ; CHECK-NOT: alloca
  1161. ; CHECK: ret void
  1162. %c = alloca i64, align 8
  1163. %p.0.c = select i1 undef, i64* %c, i64* %c
  1164. %cond.in = select i1 undef, i64* %p.0.c, i64* %c
  1165. %cond = load i64, i64* %cond.in, align 8
  1166. ret void
  1167. }
  1168. define void @PR15805.1(i1 %a, i1 %b) {
  1169. ; Same as the normal PR15805, but rigged to place the use before the def inside
  1170. ; of looping unreachable code. This helps ensure that we aren't sensitive to the
  1171. ; order in which the uses of the alloca are visited.
  1172. ;
  1173. ; CHECK-LABEL: @PR15805.1(
  1174. ; CHECK-NOT: alloca
  1175. ; CHECK: ret void
  1176. %c = alloca i64, align 8
  1177. br label %exit
  1178. loop:
  1179. %cond.in = select i1 undef, i64* %c, i64* %p.0.c
  1180. %p.0.c = select i1 undef, i64* %c, i64* %c
  1181. %cond = load i64, i64* %cond.in, align 8
  1182. br i1 undef, label %loop, label %exit
  1183. exit:
  1184. ret void
  1185. }
  1186. define void @PR16651.1(i8* %a) {
  1187. ; This test case caused a crash due to the volatile memcpy in combination with
  1188. ; lowering to integer loads and stores of a width other than that of the original
  1189. ; memcpy.
  1190. ;
  1191. ; CHECK-LABEL: @PR16651.1(
  1192. ; CHECK: alloca i16
  1193. ; CHECK: alloca i8
  1194. ; CHECK: alloca i8
  1195. ; CHECK: unreachable
  1196. entry:
  1197. %b = alloca i32, align 4
  1198. %b.cast = bitcast i32* %b to i8*
  1199. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b.cast, i8* %a, i32 4, i32 4, i1 true)
  1200. %b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
  1201. load i8, i8* %b.gep, align 2
  1202. unreachable
  1203. }
  1204. define void @PR16651.2() {
  1205. ; This test case caused a crash due to failing to promote given a select that
  1206. ; can't be speculated. It shouldn't be promoted, but we missed that fact when
  1207. ; analyzing whether we could form a vector promotion because that code didn't
  1208. ; bail on select instructions.
  1209. ;
  1210. ; CHECK-LABEL: @PR16651.2(
  1211. ; CHECK: alloca <2 x float>
  1212. ; CHECK: ret void
  1213. entry:
  1214. %tv1 = alloca { <2 x float>, <2 x float> }, align 8
  1215. %0 = getelementptr { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
  1216. store <2 x float> undef, <2 x float>* %0, align 8
  1217. %1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
  1218. %cond105.in.i.i = select i1 undef, float* null, float* %1
  1219. %cond105.i.i = load float, float* %cond105.in.i.i, align 8
  1220. ret void
  1221. }
  1222. define void @test23(i32 %x) {
  1223. ; CHECK-LABEL: @test23(
  1224. ; CHECK-NOT: alloca
  1225. ; CHECK: ret void
  1226. entry:
  1227. %a = alloca i32, align 4
  1228. store i32 %x, i32* %a, align 4
  1229. %gep1 = getelementptr inbounds i32, i32* %a, i32 1
  1230. %gep0 = getelementptr inbounds i32, i32* %a, i32 0
  1231. %cast1 = bitcast i32* %gep1 to i8*
  1232. %cast0 = bitcast i32* %gep0 to i8*
  1233. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i32 1, i1 false)
  1234. ret void
  1235. }
  1236. define void @PR18615() {
  1237. ; CHECK-LABEL: @PR18615(
  1238. ; CHECK-NOT: alloca
  1239. ; CHECK: ret void
  1240. entry:
  1241. %f = alloca i8
  1242. %gep = getelementptr i8, i8* %f, i64 -1
  1243. call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i32 1, i1 false)
  1244. ret void
  1245. }
  1246. define void @test24(i8* %src, i8* %dst) {
  1247. ; CHECK-LABEL: @test24(
  1248. ; CHECK: alloca i64, align 16
  1249. ; CHECK: load volatile i64, i64* %{{[^,]*}}, align 1
  1250. ; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 16
  1251. ; CHECK: load volatile i64, i64* %{{[^,]*}}, align 16
  1252. ; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 1
  1253. entry:
  1254. %a = alloca i64, align 16
  1255. %ptr = bitcast i64* %a to i8*
  1256. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 true)
  1257. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i32 1, i1 true)
  1258. ret void
  1259. }
  1260. define float @test25() {
  1261. ; Check that we split up stores in order to promote the smaller SSA values.. These types
  1262. ; of patterns can arise because LLVM maps small memcpy's to integer load and
  1263. ; stores. If we get a memcpy of an aggregate (such as C and C++ frontends would
  1264. ; produce, but so might any language frontend), this will in many cases turn into
  1265. ; an integer load and store. SROA needs to be extremely powerful to correctly
  1266. ; handle these cases and form splitable and promotable SSA values.
  1267. ;
  1268. ; CHECK-LABEL: @test25(
  1269. ; CHECK-NOT: alloca
  1270. ; CHECK: %[[F1:.*]] = bitcast i32 0 to float
  1271. ; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
  1272. ; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
  1273. ; CHECK: ret float %[[SUM]]
  1274. entry:
  1275. %a = alloca i64
  1276. %b = alloca i64
  1277. %a.cast = bitcast i64* %a to [2 x float]*
  1278. %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
  1279. %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
  1280. %b.cast = bitcast i64* %b to [2 x float]*
  1281. %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
  1282. %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
  1283. store float 0.0, float* %a.gep1
  1284. store float 1.0, float* %a.gep2
  1285. %v = load i64, i64* %a
  1286. store i64 %v, i64* %b
  1287. %f1 = load float, float* %b.gep1
  1288. %f2 = load float, float* %b.gep2
  1289. %ret = fadd float %f1, %f2
  1290. ret float %ret
  1291. }
  1292. @complex1 = external global [2 x float]
  1293. @complex2 = external global [2 x float]
  1294. define void @test26() {
  1295. ; Test a case of splitting up loads and stores against a globals.
  1296. ;
  1297. ; CHECK-LABEL: @test26(
  1298. ; CHECK-NOT: alloca
  1299. ; CHECK: %[[L1:.*]] = load i32, i32* bitcast
  1300. ; CHECK: %[[L2:.*]] = load i32, i32* bitcast
  1301. ; CHECK: %[[F1:.*]] = bitcast i32 %[[L1]] to float
  1302. ; CHECK: %[[F2:.*]] = bitcast i32 %[[L2]] to float
  1303. ; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
  1304. ; CHECK: %[[C1:.*]] = bitcast float %[[SUM]] to i32
  1305. ; CHECK: %[[C2:.*]] = bitcast float %[[SUM]] to i32
  1306. ; CHECK: store i32 %[[C1]], i32* bitcast
  1307. ; CHECK: store i32 %[[C2]], i32* bitcast
  1308. ; CHECK: ret void
  1309. entry:
  1310. %a = alloca i64
  1311. %a.cast = bitcast i64* %a to [2 x float]*
  1312. %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
  1313. %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
  1314. %v1 = load i64, i64* bitcast ([2 x float]* @complex1 to i64*)
  1315. store i64 %v1, i64* %a
  1316. %f1 = load float, float* %a.gep1
  1317. %f2 = load float, float* %a.gep2
  1318. %sum = fadd float %f1, %f2
  1319. store float %sum, float* %a.gep1
  1320. store float %sum, float* %a.gep2
  1321. %v2 = load i64, i64* %a
  1322. store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
  1323. ret void
  1324. }
  1325. define float @test27() {
  1326. ; Another, more complex case of splittable i64 loads and stores. This example
  1327. ; is a particularly challenging one because the load and store both point into
  1328. ; the alloca SROA is processing, and they overlap but at an offset.
  1329. ;
  1330. ; CHECK-LABEL: @test27(
  1331. ; CHECK-NOT: alloca
  1332. ; CHECK: %[[F1:.*]] = bitcast i32 0 to float
  1333. ; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
  1334. ; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
  1335. ; CHECK: ret float %[[SUM]]
  1336. entry:
  1337. %a = alloca [12 x i8]
  1338. %gep1 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 0
  1339. %gep2 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 4
  1340. %gep3 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 8
  1341. %iptr1 = bitcast i8* %gep1 to i64*
  1342. %iptr2 = bitcast i8* %gep2 to i64*
  1343. %fptr1 = bitcast i8* %gep1 to float*
  1344. %fptr2 = bitcast i8* %gep2 to float*
  1345. %fptr3 = bitcast i8* %gep3 to float*
  1346. store float 0.0, float* %fptr1
  1347. store float 1.0, float* %fptr2
  1348. %v = load i64, i64* %iptr1
  1349. store i64 %v, i64* %iptr2
  1350. %f1 = load float, float* %fptr2
  1351. %f2 = load float, float* %fptr3
  1352. %ret = fadd float %f1, %f2
  1353. ret float %ret
  1354. }
  1355. define i32 @PR22093() {
  1356. ; Test that we don't try to pre-split a splittable store of a splittable but
  1357. ; not pre-splittable load over the same alloca. We "handle" this case when the
  1358. ; load is unsplittable but unrelated to this alloca by just generating extra
  1359. ; loads without touching the original, but when the original load was out of
  1360. ; this alloca we need to handle it specially to ensure the splits line up
  1361. ; properly for rewriting.
  1362. ;
  1363. ; CHECK-LABEL: @PR22093(
  1364. ; CHECK-NOT: alloca
  1365. ; CHECK: alloca i16
  1366. ; CHECK-NOT: alloca
  1367. ; CHECK: store volatile i16
  1368. entry:
  1369. %a = alloca i32
  1370. %a.cast = bitcast i32* %a to i16*
  1371. store volatile i16 42, i16* %a.cast
  1372. %load = load i32, i32* %a
  1373. store i32 %load, i32* %a
  1374. ret i32 %load
  1375. }
  1376. define void @PR22093.2() {
  1377. ; Another way that we end up being unable to split a particular set of loads
  1378. ; and stores can even have ordering importance. Here we have a load which is
  1379. ; pre-splittable by itself, and the first store is also compatible. But the
  1380. ; second store of the load makes the load unsplittable because of a mismatch of
  1381. ; splits. Because this makes the load unsplittable, we also have to go back and
  1382. ; remove the first store from the presplit candidates as its load won't be
  1383. ; presplit.
  1384. ;
  1385. ; CHECK-LABEL: @PR22093.2(
  1386. ; CHECK-NOT: alloca
  1387. ; CHECK: alloca i16
  1388. ; CHECK-NEXT: alloca i8
  1389. ; CHECK-NOT: alloca
  1390. ; CHECK: store volatile i16
  1391. ; CHECK: store volatile i8
  1392. entry:
  1393. %a = alloca i64
  1394. %a.cast1 = bitcast i64* %a to i32*
  1395. %a.cast2 = bitcast i64* %a to i16*
  1396. store volatile i16 42, i16* %a.cast2
  1397. %load = load i32, i32* %a.cast1
  1398. store i32 %load, i32* %a.cast1
  1399. %a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
  1400. %a.cast3 = bitcast i32* %a.gep1 to i8*
  1401. store volatile i8 13, i8* %a.cast3
  1402. store i32 %load, i32* %a.gep1
  1403. ret void
  1404. }
  1405. define void @PR23737() {
  1406. ; CHECK-LABEL: @PR23737(
  1407. ; CHECK: store atomic volatile {{.*}} seq_cst
  1408. ; CHECK: load atomic volatile {{.*}} seq_cst
  1409. entry:
  1410. %ptr = alloca i64, align 8
  1411. store atomic volatile i64 0, i64* %ptr seq_cst, align 8
  1412. %load = load atomic volatile i64, i64* %ptr seq_cst, align 8
  1413. ret void
  1414. }