version-mem-access.ll 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. ; RUN: opt -basicaa -loop-vectorize -enable-mem-access-versioning -force-vector-width=2 -force-vector-interleave=1 < %s -S | FileCheck %s
  2. target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
  3. ; Check that we version this loop with speculating the value 1 for symbolic
  4. ; strides. This also checks that the symbolic stride information is correctly
  5. ; propagated to the memcheck generation. Without this the loop wouldn't
  6. ; vectorize because we couldn't determine the array bounds for the required
  7. ; memchecks.
  8. ; CHECK-LABEL: test
  9. define void @test(i32* %A, i64 %AStride,
  10. i32* %B, i32 %BStride,
  11. i32* %C, i64 %CStride, i32 %N) {
  12. entry:
  13. %cmp13 = icmp eq i32 %N, 0
  14. br i1 %cmp13, label %for.end, label %for.body.preheader
  15. ; CHECK-DAG: icmp ne i64 %AStride, 1
  16. ; CHECK-DAG: icmp ne i32 %BStride, 1
  17. ; CHECK-DAG: icmp ne i64 %CStride, 1
  18. ; CHECK: or
  19. ; CHECK: or
  20. ; CHECK: br
  21. ; CHECK: vector.body
  22. ; CHECK: load <2 x i32>
  23. for.body.preheader:
  24. br label %for.body
  25. for.body:
  26. %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
  27. %iv.trunc = trunc i64 %indvars.iv to i32
  28. %mul = mul i32 %iv.trunc, %BStride
  29. %mul64 = zext i32 %mul to i64
  30. %arrayidx = getelementptr inbounds i32, i32* %B, i64 %mul64
  31. %0 = load i32, i32* %arrayidx, align 4
  32. %mul2 = mul nsw i64 %indvars.iv, %CStride
  33. %arrayidx3 = getelementptr inbounds i32, i32* %C, i64 %mul2
  34. %1 = load i32, i32* %arrayidx3, align 4
  35. %mul4 = mul nsw i32 %1, %0
  36. %mul3 = mul nsw i64 %indvars.iv, %AStride
  37. %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
  38. store i32 %mul4, i32* %arrayidx7, align 4
  39. %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  40. %lftr.wideiv = trunc i64 %indvars.iv.next to i32
  41. %exitcond = icmp eq i32 %lftr.wideiv, %N
  42. br i1 %exitcond, label %for.end.loopexit, label %for.body
  43. for.end.loopexit:
  44. br label %for.end
  45. for.end:
  46. ret void
  47. }
  48. ; We used to crash on this function because we removed the fptosi cast when
  49. ; replacing the symbolic stride '%conv'.
  50. ; PR18480
  51. ; CHECK-LABEL: fn1
  52. ; CHECK: load <2 x double>
  53. define void @fn1(double* noalias %x, double* noalias %c, double %a) {
  54. entry:
  55. %conv = fptosi double %a to i32
  56. %cmp8 = icmp sgt i32 %conv, 0
  57. br i1 %cmp8, label %for.body.preheader, label %for.end
  58. for.body.preheader:
  59. br label %for.body
  60. for.body:
  61. %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
  62. %0 = trunc i64 %indvars.iv to i32
  63. %mul = mul nsw i32 %0, %conv
  64. %idxprom = sext i32 %mul to i64
  65. %arrayidx = getelementptr inbounds double, double* %x, i64 %idxprom
  66. %1 = load double, double* %arrayidx, align 8
  67. %arrayidx3 = getelementptr inbounds double, double* %c, i64 %indvars.iv
  68. store double %1, double* %arrayidx3, align 8
  69. %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
  70. %lftr.wideiv = trunc i64 %indvars.iv.next to i32
  71. %exitcond = icmp eq i32 %lftr.wideiv, %conv
  72. br i1 %exitcond, label %for.end.loopexit, label %for.body
  73. for.end.loopexit:
  74. br label %for.end
  75. for.end:
  76. ret void
  77. }