vectorized-loop.ll 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. ; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
  2. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
  3. target triple = "x86_64-apple-macosx10.8.0"
  4. define i32 @foo(i32* noalias nocapture %A, i32* noalias nocapture %B, i32 %start, i32 %end) nounwind uwtable ssp {
  5. entry:
  6. ;CHECK: cost of 1 {{.*}} icmp
  7. %cmp7 = icmp slt i32 %start, %end
  8. br i1 %cmp7, label %for.body.lr.ph, label %for.end
  9. for.body.lr.ph: ; preds = %entry
  10. ;CHECK: cost of 1 {{.*}} sext
  11. %0 = sext i32 %start to i64
  12. %1 = sub i32 %end, %start
  13. %2 = zext i32 %1 to i64
  14. %end.idx = add i64 %2, %0
  15. ;CHECK: cost of 1 {{.*}} add
  16. %n.vec = and i64 %2, 4294967288
  17. %end.idx.rnd.down = add i64 %n.vec, %0
  18. ;CHECK: cost of 1 {{.*}} icmp
  19. %cmp.zero = icmp eq i64 %n.vec, 0
  20. br i1 %cmp.zero, label %middle.block, label %vector.body
  21. vector.body: ; preds = %for.body.lr.ph, %vector.body
  22. %index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body.lr.ph ]
  23. %3 = add i64 %index, 2
  24. %4 = getelementptr inbounds i32, i32* %B, i64 %3
  25. ;CHECK: cost of 0 {{.*}} bitcast
  26. %5 = bitcast i32* %4 to <8 x i32>*
  27. ;CHECK: cost of 2 {{.*}} load
  28. %6 = load <8 x i32>, <8 x i32>* %5, align 4
  29. ;CHECK: cost of 4 {{.*}} mul
  30. %7 = mul nsw <8 x i32> %6, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
  31. %8 = getelementptr inbounds i32, i32* %A, i64 %index
  32. %9 = bitcast i32* %8 to <8 x i32>*
  33. ;CHECK: cost of 2 {{.*}} load
  34. %10 = load <8 x i32>, <8 x i32>* %9, align 4
  35. ;CHECK: cost of 4 {{.*}} add
  36. %11 = add nsw <8 x i32> %10, %7
  37. ;CHECK: cost of 2 {{.*}} store
  38. store <8 x i32> %11, <8 x i32>* %9, align 4
  39. %index.next = add i64 %index, 8
  40. %12 = icmp eq i64 %index.next, %end.idx.rnd.down
  41. ;CHECK: cost of 0 {{.*}} br
  42. br i1 %12, label %middle.block, label %vector.body
  43. middle.block: ; preds = %vector.body, %for.body.lr.ph
  44. %cmp.n = icmp eq i64 %end.idx, %end.idx.rnd.down
  45. br i1 %cmp.n, label %for.end, label %for.body
  46. for.body: ; preds = %middle.block, %for.body
  47. %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %end.idx.rnd.down, %middle.block ]
  48. %13 = add nsw i64 %indvars.iv, 2
  49. %arrayidx = getelementptr inbounds i32, i32* %B, i64 %13
  50. ;CHECK: cost of 1 {{.*}} load
  51. %14 = load i32, i32* %arrayidx, align 4
  52. ;CHECK: cost of 1 {{.*}} mul
  53. %mul = mul nsw i32 %14, 5
  54. %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
  55. ;CHECK: cost of 1 {{.*}} load
  56. %15 = load i32, i32* %arrayidx2, align 4
  57. %add3 = add nsw i32 %15, %mul
  58. store i32 %add3, i32* %arrayidx2, align 4
  59. %indvars.iv.next = add i64 %indvars.iv, 1
  60. ;CHECK: cost of 0 {{.*}} trunc
  61. %16 = trunc i64 %indvars.iv.next to i32
  62. %cmp = icmp slt i32 %16, %end
  63. ;CHECK: cost of 0 {{.*}} br
  64. br i1 %cmp, label %for.body, label %for.end
  65. for.end: ; preds = %middle.block, %for.body, %entry
  66. ;CHECK: cost of 0 {{.*}} ret
  67. ret i32 undef
  68. }