atomics.ll 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
  2. ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
  3. ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
  4. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
  5. target triple = "x86_64-unknown-linux-gnu"
  6. ; atomicrmw xchg: store clean shadow, return clean shadow
  7. define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
  8. entry:
  9. %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
  10. ret i32 %0
  11. }
  12. ; CHECK: @AtomicRmwXchg
  13. ; CHECK: store i32 0,
  14. ; CHECK: atomicrmw xchg {{.*}} seq_cst
  15. ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
  16. ; CHECK: ret i32
  17. ; atomicrmw max: exactly the same as above
  18. define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
  19. entry:
  20. %0 = atomicrmw max i32* %p, i32 %x seq_cst
  21. ret i32 %0
  22. }
  23. ; CHECK: @AtomicRmwMax
  24. ; CHECK: store i32 0,
  25. ; CHECK: atomicrmw max {{.*}} seq_cst
  26. ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
  27. ; CHECK: ret i32
  28. ; cmpxchg: the same as above, but also check %a shadow
  29. define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
  30. entry:
  31. %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
  32. %0 = extractvalue { i32, i1 } %pair, 0
  33. ret i32 %0
  34. }
  35. ; CHECK: @Cmpxchg
  36. ; CHECK: store { i32, i1 } zeroinitializer,
  37. ; CHECK: icmp
  38. ; CHECK: br
  39. ; CHECK: @__msan_warning
  40. ; CHECK: cmpxchg {{.*}} seq_cst seq_cst
  41. ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
  42. ; CHECK: ret i32
  43. ; relaxed cmpxchg: bump up to "release monotonic"
  44. define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
  45. entry:
  46. %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
  47. %0 = extractvalue { i32, i1 } %pair, 0
  48. ret i32 %0
  49. }
  50. ; CHECK: @CmpxchgMonotonic
  51. ; CHECK: store { i32, i1 } zeroinitializer,
  52. ; CHECK: icmp
  53. ; CHECK: br
  54. ; CHECK: @__msan_warning
  55. ; CHECK: cmpxchg {{.*}} release monotonic
  56. ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
  57. ; CHECK: ret i32
  58. ; atomic load: preserve alignment, load shadow value after app value
  59. define i32 @AtomicLoad(i32* %p) sanitize_memory {
  60. entry:
  61. %0 = load atomic i32, i32* %p seq_cst, align 16
  62. ret i32 %0
  63. }
  64. ; CHECK: @AtomicLoad
  65. ; CHECK: load atomic i32, i32* {{.*}} seq_cst, align 16
  66. ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
  67. ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
  68. ; CHECK: ret i32
  69. ; atomic load: preserve alignment, load shadow value after app value
  70. define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
  71. entry:
  72. %0 = load atomic i32, i32* %p acquire, align 16
  73. ret i32 %0
  74. }
  75. ; CHECK: @AtomicLoadAcquire
  76. ; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
  77. ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
  78. ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
  79. ; CHECK: ret i32
  80. ; atomic load monotonic: bump up to load acquire
  81. define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
  82. entry:
  83. %0 = load atomic i32, i32* %p monotonic, align 16
  84. ret i32 %0
  85. }
  86. ; CHECK: @AtomicLoadMonotonic
  87. ; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
  88. ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
  89. ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
  90. ; CHECK: ret i32
  91. ; atomic load unordered: bump up to load acquire
  92. define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
  93. entry:
  94. %0 = load atomic i32, i32* %p unordered, align 16
  95. ret i32 %0
  96. }
  97. ; CHECK: @AtomicLoadUnordered
  98. ; CHECK: load atomic i32, i32* {{.*}} acquire, align 16
  99. ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32, i32* {{.*}}, align 16
  100. ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
  101. ; CHECK: ret i32
  102. ; atomic store: preserve alignment, store clean shadow value before app value
  103. define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
  104. entry:
  105. store atomic i32 %x, i32* %p seq_cst, align 16
  106. ret void
  107. }
  108. ; CHECK: @AtomicStore
  109. ; CHECK-NOT: @__msan_param_tls
  110. ; CHECK: store i32 0, i32* {{.*}}, align 16
  111. ; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
  112. ; CHECK: ret void
  113. ; atomic store: preserve alignment, store clean shadow value before app value
  114. define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
  115. entry:
  116. store atomic i32 %x, i32* %p release, align 16
  117. ret void
  118. }
  119. ; CHECK: @AtomicStoreRelease
  120. ; CHECK-NOT: @__msan_param_tls
  121. ; CHECK: store i32 0, i32* {{.*}}, align 16
  122. ; CHECK: store atomic i32 %x, i32* %p release, align 16
  123. ; CHECK: ret void
  124. ; atomic store monotonic: bumped up to store release
  125. define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
  126. entry:
  127. store atomic i32 %x, i32* %p monotonic, align 16
  128. ret void
  129. }
  130. ; CHECK: @AtomicStoreMonotonic
  131. ; CHECK-NOT: @__msan_param_tls
  132. ; CHECK: store i32 0, i32* {{.*}}, align 16
  133. ; CHECK: store atomic i32 %x, i32* %p release, align 16
  134. ; CHECK: ret void
  135. ; atomic store unordered: bumped up to store release
  136. define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
  137. entry:
  138. store atomic i32 %x, i32* %p unordered, align 16
  139. ret void
  140. }
  141. ; CHECK: @AtomicStoreUnordered
  142. ; CHECK-NOT: @__msan_param_tls
  143. ; CHECK: store i32 0, i32* {{.*}}, align 16
  144. ; CHECK: store atomic i32 %x, i32* %p release, align 16
  145. ; CHECK: ret void