alignment.ll 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. ; RUN: opt < %s -sroa -S | FileCheck %s
  2. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
  3. declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
  4. define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
  5. ; CHECK-LABEL: @test1(
  6. ; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
  7. ; CHECK: %[[a0:.*]] = load i8, i8* %[[gep_a0]], align 16
  8. ; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
  9. ; CHECK: %[[a1:.*]] = load i8, i8* %[[gep_a1]], align 1
  10. ; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
  11. ; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
  12. ; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
  13. ; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
  14. ; CHECK: ret void
  15. entry:
  16. %alloca = alloca { i8, i8 }, align 16
  17. %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0
  18. %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0
  19. %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0
  20. store i8 420, i8* %gep_alloca, align 16
  21. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_alloca, i8* %gep_a, i32 2, i32 16, i1 false)
  22. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %gep_b, i8* %gep_alloca, i32 2, i32 16, i1 false)
  23. ret void
  24. }
  25. define void @test2() {
  26. ; CHECK-LABEL: @test2(
  27. ; CHECK: alloca i16
  28. ; CHECK: load i8, i8* %{{.*}}
  29. ; CHECK: store i8 42, i8* %{{.*}}
  30. ; CHECK: ret void
  31. entry:
  32. %a = alloca { i8, i8, i8, i8 }, align 2
  33. %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1
  34. %cast1 = bitcast i8* %gep1 to i16*
  35. store volatile i16 0, i16* %cast1
  36. %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
  37. %result = load i8, i8* %gep2
  38. store i8 42, i8* %gep2
  39. ret void
  40. }
  41. define void @PR13920(<2 x i64>* %a, i16* %b) {
  42. ; Test that alignments on memcpy intrinsics get propagated to loads and stores.
  43. ; CHECK-LABEL: @PR13920(
  44. ; CHECK: load <2 x i64>, <2 x i64>* %a, align 2
  45. ; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
  46. ; CHECK: ret void
  47. entry:
  48. %aa = alloca <2 x i64>, align 16
  49. %aptr = bitcast <2 x i64>* %a to i8*
  50. %aaptr = bitcast <2 x i64>* %aa to i8*
  51. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %aaptr, i8* %aptr, i32 16, i32 2, i1 false)
  52. %bptr = bitcast i16* %b to i8*
  53. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %bptr, i8* %aaptr, i32 16, i32 2, i1 false)
  54. ret void
  55. }
  56. define void @test3(i8* %x) {
  57. ; Test that when we promote an alloca to a type with lower ABI alignment, we
  58. ; provide the needed explicit alignment that code using the alloca may be
  59. ; expecting. However, also check that any offset within an alloca can in turn
  60. ; reduce the alignment.
  61. ; CHECK-LABEL: @test3(
  62. ; CHECK: alloca [22 x i8], align 8
  63. ; CHECK: alloca [18 x i8], align 2
  64. ; CHECK: ret void
  65. entry:
  66. %a = alloca { i8*, i8*, i8* }
  67. %b = alloca { i8*, i8*, i8* }
  68. %a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
  69. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
  70. %b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
  71. %b_gep = getelementptr i8, i8* %b_raw, i32 6
  72. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
  73. ret void
  74. }
  75. define void @test5() {
  76. ; Test that we preserve underaligned loads and stores when splitting. The use
  77. ; of volatile in this test case is just to force the loads and stores to not be
  78. ; split or promoted out of existence.
  79. ;
  80. ; CHECK-LABEL: @test5(
  81. ; CHECK: alloca [9 x i8]
  82. ; CHECK: alloca [9 x i8]
  83. ; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
  84. ; CHECK: load volatile i16, i16* %{{.*}}, align 1
  85. ; CHECK: load double, double* %{{.*}}, align 1
  86. ; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
  87. ; CHECK: load volatile i16, i16* %{{.*}}, align 1
  88. ; CHECK: ret void
  89. entry:
  90. %a = alloca [18 x i8]
  91. %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0
  92. %ptr1 = bitcast i8* %raw1 to double*
  93. store volatile double 0.0, double* %ptr1, align 1
  94. %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
  95. %weird_cast1 = bitcast i8* %weird_gep1 to i16*
  96. %weird_load1 = load volatile i16, i16* %weird_cast1, align 1
  97. %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
  98. %ptr2 = bitcast i8* %raw2 to double*
  99. %d1 = load double, double* %ptr1, align 1
  100. store volatile double %d1, double* %ptr2, align 1
  101. %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
  102. %weird_cast2 = bitcast i8* %weird_gep2 to i16*
  103. %weird_load2 = load volatile i16, i16* %weird_cast2, align 1
  104. ret void
  105. }
  106. define void @test6() {
  107. ; Test that we promote alignment when the underlying alloca switches to one
  108. ; that innately provides it.
  109. ; CHECK-LABEL: @test6(
  110. ; CHECK: alloca double
  111. ; CHECK: alloca double
  112. ; CHECK-NOT: align
  113. ; CHECK: ret void
  114. entry:
  115. %a = alloca [16 x i8]
  116. %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
  117. %ptr1 = bitcast i8* %raw1 to double*
  118. store volatile double 0.0, double* %ptr1, align 1
  119. %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
  120. %ptr2 = bitcast i8* %raw2 to double*
  121. %val = load double, double* %ptr1, align 1
  122. store volatile double %val, double* %ptr2, align 1
  123. ret void
  124. }
  125. define void @test7(i8* %out) {
  126. ; Test that we properly compute the destination alignment when rewriting
  127. ; memcpys as direct loads or stores.
  128. ; CHECK-LABEL: @test7(
  129. ; CHECK-NOT: alloca
  130. entry:
  131. %a = alloca [16 x i8]
  132. %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
  133. %ptr1 = bitcast i8* %raw1 to double*
  134. %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
  135. %ptr2 = bitcast i8* %raw2 to double*
  136. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
  137. ; CHECK: %[[val2:.*]] = load double, double* %{{.*}}, align 1
  138. ; CHECK: %[[val1:.*]] = load double, double* %{{.*}}, align 1
  139. %val1 = load double, double* %ptr2, align 1
  140. %val2 = load double, double* %ptr1, align 1
  141. store double %val1, double* %ptr1, align 1
  142. store double %val2, double* %ptr2, align 1
  143. call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %raw1, i32 16, i32 0, i1 false)
  144. ; CHECK: store double %[[val1]], double* %{{.*}}, align 1
  145. ; CHECK: store double %[[val2]], double* %{{.*}}, align 1
  146. ret void
  147. ; CHECK: ret void
  148. }