loop1.ll 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
  2. target triple = "x86_64-unknown-linux-gnu"
  3. ; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
  4. ; RUN: opt < %s -basicaa -loop-unroll -unroll-threshold=45 -unroll-allow-partial -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s -check-prefix=CHECK-UNRL
  5. ; The second check covers the use of alias analysis (with loop unrolling).
  6. define void @test1(double* noalias %out, double* noalias %in1, double* noalias %in2) nounwind uwtable {
  7. entry:
  8. br label %for.body
  9. ; CHECK-LABEL: @test1(
  10. ; CHECK-UNRL-LABEL: @test1(
  11. for.body: ; preds = %for.body, %entry
  12. %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
  13. %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
  14. %0 = load double, double* %arrayidx, align 8
  15. %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
  16. %1 = load double, double* %arrayidx2, align 8
  17. %mul = fmul double %0, %0
  18. %mul3 = fmul double %0, %1
  19. %add = fadd double %mul, %mul3
  20. %add4 = fadd double %1, %1
  21. %add5 = fadd double %add4, %0
  22. %mul6 = fmul double %0, %add5
  23. %add7 = fadd double %add, %mul6
  24. %mul8 = fmul double %1, %1
  25. %add9 = fadd double %0, %0
  26. %add10 = fadd double %add9, %0
  27. %mul11 = fmul double %mul8, %add10
  28. %add12 = fadd double %add7, %mul11
  29. %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
  30. store double %add12, double* %arrayidx14, align 8
  31. %indvars.iv.next = add i64 %indvars.iv, 1
  32. %lftr.wideiv = trunc i64 %indvars.iv.next to i32
  33. %exitcond = icmp eq i32 %lftr.wideiv, 10
  34. br i1 %exitcond, label %for.end, label %for.body
  35. ; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
  36. ; CHECK: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
  37. ; CHECK: %0 = load double, double* %arrayidx, align 8
  38. ; CHECK: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
  39. ; CHECK: %1 = load double, double* %arrayidx2, align 8
  40. ; CHECK: %mul = fmul double %0, %0
  41. ; CHECK: %mul3 = fmul double %0, %1
  42. ; CHECK: %add = fadd double %mul, %mul3
  43. ; CHECK: %mul8 = fmul double %1, %1
  44. ; CHECK: %add4.v.i1.1 = insertelement <2 x double> undef, double %1, i32 0
  45. ; CHECK: %add4.v.i1.2 = insertelement <2 x double> %add4.v.i1.1, double %0, i32 1
  46. ; CHECK: %add4 = fadd <2 x double> %add4.v.i1.2, %add4.v.i1.2
  47. ; CHECK: %add5.v.i1.1 = insertelement <2 x double> undef, double %0, i32 0
  48. ; CHECK: %add5.v.i1.2 = insertelement <2 x double> %add5.v.i1.1, double %0, i32 1
  49. ; CHECK: %add5 = fadd <2 x double> %add4, %add5.v.i1.2
  50. ; CHECK: %mul6.v.i0.2 = insertelement <2 x double> %add5.v.i1.1, double %mul8, i32 1
  51. ; CHECK: %mul6 = fmul <2 x double> %mul6.v.i0.2, %add5
  52. ; CHECK: %mul6.v.r1 = extractelement <2 x double> %mul6, i32 0
  53. ; CHECK: %mul6.v.r2 = extractelement <2 x double> %mul6, i32 1
  54. ; CHECK: %add7 = fadd double %add, %mul6.v.r1
  55. ; CHECK: %add12 = fadd double %add7, %mul6.v.r2
  56. ; CHECK: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
  57. ; CHECK: store double %add12, double* %arrayidx14, align 8
  58. ; CHECK: %indvars.iv.next = add i64 %indvars.iv, 1
  59. ; CHECK: %lftr.wideiv = trunc i64 %indvars.iv.next to i32
  60. ; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, 10
  61. ; CHECK: br i1 %exitcond, label %for.end, label %for.body
  62. ; CHECK-UNRL: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next.1, %for.body ]
  63. ; CHECK-UNRL: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
  64. ; CHECK-UNRL: %0 = bitcast double* %arrayidx to <2 x double>*
  65. ; CHECK-UNRL: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
  66. ; CHECK-UNRL: %1 = bitcast double* %arrayidx2 to <2 x double>*
  67. ; CHECK-UNRL: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
  68. ; CHECK-UNRL: %2 = load <2 x double>, <2 x double>* %0, align 8
  69. ; CHECK-UNRL: %3 = load <2 x double>, <2 x double>* %1, align 8
  70. ; CHECK-UNRL: %mul = fmul <2 x double> %2, %2
  71. ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
  72. ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
  73. ; CHECK-UNRL: %add4 = fadd <2 x double> %3, %3
  74. ; CHECK-UNRL: %add5 = fadd <2 x double> %add4, %2
  75. ; CHECK-UNRL: %mul6 = fmul <2 x double> %2, %add5
  76. ; CHECK-UNRL: %add7 = fadd <2 x double> %add, %mul6
  77. ; CHECK-UNRL: %mul8 = fmul <2 x double> %3, %3
  78. ; CHECK-UNRL: %add9 = fadd <2 x double> %2, %2
  79. ; CHECK-UNRL: %add10 = fadd <2 x double> %add9, %2
  80. ; CHECK-UNRL: %mul11 = fmul <2 x double> %mul8, %add10
  81. ; CHECK-UNRL: %add12 = fadd <2 x double> %add7, %mul11
  82. ; CHECK-UNRL: %4 = bitcast double* %arrayidx14 to <2 x double>*
  83. ; CHECK-UNRL: store <2 x double> %add12, <2 x double>* %4, align 8
  84. ; CHECK-UNRL: %indvars.iv.next.1 = add nsw i64 %indvars.iv, 2
  85. ; CHECK-UNRL: %lftr.wideiv.1 = trunc i64 %indvars.iv.next.1 to i32
  86. ; CHECK-UNRL: %exitcond.1 = icmp eq i32 %lftr.wideiv.1, 10
  87. ; CHECK-UNRL: br i1 %exitcond.1, label %for.end, label %for.body
  88. for.end: ; preds = %for.body
  89. ret void
  90. }