memInstructions.3.2.ll 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. ; RUN: llvm-dis < %s.bc| FileCheck %s
  2. ; RUN: verify-uselistorder < %s.bc
  3. ; memOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
  4. ; The test checks that LLVM does not misread memory related instructions of
  5. ; older bitcode files.
  6. define void @alloca(){
  7. entry:
  8. ; CHECK: %res1 = alloca i8
  9. %res1 = alloca i8
  10. ; CHECK-NEXT: %res2 = alloca i8, i32 2
  11. %res2 = alloca i8, i32 2
  12. ; CHECK-NEXT: %res3 = alloca i8, i32 2, align 4
  13. %res3 = alloca i8, i32 2, align 4
  14. ; CHECK-NEXT: %res4 = alloca i8, align 4
  15. %res4 = alloca i8, align 4
  16. ret void
  17. }
  18. define void @load(){
  19. entry:
  20. %ptr1 = alloca i8
  21. store i8 2, i8* %ptr1
  22. ; CHECK: %res1 = load i8, i8* %ptr1
  23. %res1 = load i8, i8* %ptr1
  24. ; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1
  25. %res2 = load volatile i8, i8* %ptr1
  26. ; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1
  27. %res3 = load i8, i8* %ptr1, align 1
  28. ; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1
  29. %res4 = load volatile i8, i8* %ptr1, align 1
  30. ; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0
  31. %res5 = load i8, i8* %ptr1, !nontemporal !0
  32. ; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
  33. %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
  34. ; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
  35. %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
  36. ; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
  37. %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
  38. ; CHECK-NEXT: %res9 = load i8, i8* %ptr1, !invariant.load !1
  39. %res9 = load i8, i8* %ptr1, !invariant.load !1
  40. ; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
  41. %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
  42. ; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
  43. %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
  44. ; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
  45. %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
  46. ; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
  47. %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
  48. ; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
  49. %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
  50. ; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
  51. %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
  52. ; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
  53. %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
  54. ret void
  55. }
  56. define void @loadAtomic(){
  57. entry:
  58. %ptr1 = alloca i8
  59. store i8 2, i8* %ptr1
  60. ; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1
  61. %res1 = load atomic i8, i8* %ptr1 unordered, align 1
  62. ; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
  63. %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
  64. ; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1
  65. %res3 = load atomic i8, i8* %ptr1 acquire, align 1
  66. ; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
  67. %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
  68. ; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
  69. %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
  70. ; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
  71. %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
  72. ; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
  73. %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
  74. ; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
  75. %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
  76. ; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
  77. %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
  78. ; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
  79. %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
  80. ; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
  81. %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
  82. ; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
  83. %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
  84. ; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
  85. %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
  86. ; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
  87. %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
  88. ; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
  89. %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
  90. ; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
  91. %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
  92. ret void
  93. }
  94. define void @store(){
  95. entry:
  96. %ptr1 = alloca i8
  97. ; CHECK: store i8 2, i8* %ptr1
  98. store i8 2, i8* %ptr1
  99. ; CHECK-NEXT: store volatile i8 2, i8* %ptr1
  100. store volatile i8 2, i8* %ptr1
  101. ; CHECK-NEXT: store i8 2, i8* %ptr1, align 1
  102. store i8 2, i8* %ptr1, align 1
  103. ; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1
  104. store volatile i8 2, i8* %ptr1, align 1
  105. ; CHECK-NEXT: store i8 2, i8* %ptr1, !nontemporal !0
  106. store i8 2, i8* %ptr1, !nontemporal !0
  107. ; CHECK-NEXT: store volatile i8 2, i8* %ptr1, !nontemporal !0
  108. store volatile i8 2, i8* %ptr1, !nontemporal !0
  109. ; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
  110. store i8 2, i8* %ptr1, align 1, !nontemporal !0
  111. ; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
  112. store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
  113. ret void
  114. }
  115. define void @storeAtomic(){
  116. entry:
  117. %ptr1 = alloca i8
  118. ; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1
  119. store atomic i8 2, i8* %ptr1 unordered, align 1
  120. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1
  121. store atomic i8 2, i8* %ptr1 monotonic, align 1
  122. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1
  123. store atomic i8 2, i8* %ptr1 release, align 1
  124. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1
  125. store atomic i8 2, i8* %ptr1 seq_cst, align 1
  126. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1
  127. store atomic volatile i8 2, i8* %ptr1 unordered, align 1
  128. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
  129. store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
  130. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1
  131. store atomic volatile i8 2, i8* %ptr1 release, align 1
  132. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
  133. store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
  134. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
  135. store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
  136. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
  137. store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
  138. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread release, align 1
  139. store atomic i8 2, i8* %ptr1 singlethread release, align 1
  140. ; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
  141. store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
  142. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
  143. store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
  144. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
  145. store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
  146. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
  147. store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
  148. ; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
  149. store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
  150. ret void
  151. }
  152. define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
  153. entry:
  154. ;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
  155. ; CHECK: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
  156. ; CHECK-NEXT: %res1 = extractvalue { i32, i1 } [[TMP]], 0
  157. %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
  158. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
  159. ; CHECK-NEXT: %res2 = extractvalue { i32, i1 } [[TMP]], 0
  160. %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic monotonic
  161. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
  162. ; CHECK-NEXT: %res3 = extractvalue { i32, i1 } [[TMP]], 0
  163. %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
  164. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
  165. ; CHECK-NEXT: %res4 = extractvalue { i32, i1 } [[TMP]], 0
  166. %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic monotonic
  167. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
  168. ; CHECK-NEXT: %res5 = extractvalue { i32, i1 } [[TMP]], 0
  169. %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire acquire
  170. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
  171. ; CHECK-NEXT: %res6 = extractvalue { i32, i1 } [[TMP]], 0
  172. %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire acquire
  173. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
  174. ; CHECK-NEXT: %res7 = extractvalue { i32, i1 } [[TMP]], 0
  175. %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
  176. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
  177. ; CHECK-NEXT: %res8 = extractvalue { i32, i1 } [[TMP]], 0
  178. %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire acquire
  179. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
  180. ; CHECK-NEXT: %res9 = extractvalue { i32, i1 } [[TMP]], 0
  181. %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release monotonic
  182. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
  183. ; CHECK-NEXT: %res10 = extractvalue { i32, i1 } [[TMP]], 0
  184. %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release monotonic
  185. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
  186. ; CHECK-NEXT: %res11 = extractvalue { i32, i1 } [[TMP]], 0
  187. %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
  188. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
  189. ; CHECK-NEXT: %res12 = extractvalue { i32, i1 } [[TMP]], 0
  190. %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release monotonic
  191. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
  192. ; CHECK-NEXT: %res13 = extractvalue { i32, i1 } [[TMP]], 0
  193. %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
  194. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
  195. ; CHECK-NEXT: %res14 = extractvalue { i32, i1 } [[TMP]], 0
  196. %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel acquire
  197. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
  198. ; CHECK-NEXT: %res15 = extractvalue { i32, i1 } [[TMP]], 0
  199. %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
  200. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
  201. ; CHECK-NEXT: %res16 = extractvalue { i32, i1 } [[TMP]], 0
  202. %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel acquire
  203. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
  204. ; CHECK-NEXT: %res17 = extractvalue { i32, i1 } [[TMP]], 0
  205. %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
  206. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
  207. ; CHECK-NEXT: %res18 = extractvalue { i32, i1 } [[TMP]], 0
  208. %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
  209. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
  210. ; CHECK-NEXT: %res19 = extractvalue { i32, i1 } [[TMP]], 0
  211. %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
  212. ; CHECK-NEXT: [[TMP:%[a-z0-9]+]] = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
  213. ; CHECK-NEXT: %res20 = extractvalue { i32, i1 } [[TMP]], 0
  214. %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst seq_cst
  215. ret void
  216. }
  217. define void @getelementptr({i8, i8}, {i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){
  218. entry:
  219. ; CHECK: %res1 = getelementptr { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
  220. %res1 = getelementptr {i8, i8}, {i8, i8}* %s, i32 1, i32 1
  221. ; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
  222. %res2 = getelementptr inbounds {i8, i8}, {i8, i8}* %s, i32 1, i32 1
  223. ; CHECK-NEXT: %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
  224. %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
  225. ret void
  226. }
  227. !0 = metadata !{ i32 1 }
  228. !1 = metadata !{}