ashr-crash.ll 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. ; RUN: opt -basicaa -loop-rotate -licm -instcombine -indvars -loop-unroll -S %s | FileCheck %s
  2. ;
  3. ; PR18361: ScalarEvolution::getAddRecExpr():
  4. ; Assertion `isLoopInvariant(Operands[i],...
  5. ;
  6. ; After a series of loop optimizations, SCEV's LoopDispositions grow stale.
  7. ; In particular, LoopSimplify hoists %cmp4, resulting in this SCEV for %add:
  8. ; {(zext i1 %cmp4 to i32),+,1}<nw><%for.cond1.preheader>
  9. ;
  10. ; When recomputing the SCEV for %ashr, we truncate the operands to get:
  11. ; (zext i1 %cmp4 to i16)
  12. ;
  13. ; This SCEV was never mapped to a value so never invalidated. It's
  14. ; loop disposition is still marked as non-loop-invariant, which is
  15. ; inconsistent with the AddRec.
  16. target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
  17. target triple = "x86_64-apple-macosx"
  18. @d = common global i32 0, align 4
  19. @a = common global i32 0, align 4
  20. @c = common global i32 0, align 4
  21. @b = common global i32 0, align 4
  22. ; Check that the def-use chain that leads to the bad SCEV is still
  23. ; there.
  24. ;
  25. ; CHECK-LABEL: @foo
  26. ; CHECK-LABEL: entry:
  27. ; CHECK-LABEL: for.cond1.preheader:
  28. ; CHECK-LABEL: for.body3:
  29. ; CHECK: %cmp4.le.le
  30. ; CHECK: %conv.le.le = zext i1 %cmp4.le.le to i32
  31. ; CHECK: %xor.le.le = xor i32 %conv6.le.le, 1
  32. define void @foo() {
  33. entry:
  34. br label %for.cond
  35. for.cond: ; preds = %for.inc7, %entry
  36. %storemerge = phi i32 [ 0, %entry ], [ %inc8, %for.inc7 ]
  37. %f.0 = phi i32 [ undef, %entry ], [ %f.1, %for.inc7 ]
  38. store i32 %storemerge, i32* @d, align 4
  39. %cmp = icmp slt i32 %storemerge, 1
  40. br i1 %cmp, label %for.cond1, label %for.end9
  41. for.cond1: ; preds = %for.cond, %for.body3
  42. %storemerge1 = phi i32 [ %inc, %for.body3 ], [ 0, %for.cond ]
  43. %f.1 = phi i32 [ %xor, %for.body3 ], [ %f.0, %for.cond ]
  44. store i32 %storemerge1, i32* @a, align 4
  45. %cmp2 = icmp slt i32 %storemerge1, 1
  46. br i1 %cmp2, label %for.body3, label %for.inc7
  47. for.body3: ; preds = %for.cond1
  48. %0 = load i32, i32* @c, align 4
  49. %cmp4 = icmp sge i32 %storemerge1, %0
  50. %conv = zext i1 %cmp4 to i32
  51. %1 = load i32, i32* @d, align 4
  52. %add = add nsw i32 %conv, %1
  53. %sext = shl i32 %add, 16
  54. %conv6 = ashr exact i32 %sext, 16
  55. %xor = xor i32 %conv6, 1
  56. %inc = add nsw i32 %storemerge1, 1
  57. br label %for.cond1
  58. for.inc7: ; preds = %for.cond1
  59. %2 = load i32, i32* @d, align 4
  60. %inc8 = add nsw i32 %2, 1
  61. br label %for.cond
  62. for.end9: ; preds = %for.cond
  63. %cmp10 = icmp sgt i32 %f.0, 0
  64. br i1 %cmp10, label %if.then, label %if.end
  65. if.then: ; preds = %for.end9
  66. store i32 0, i32* @b, align 4
  67. br label %if.end
  68. if.end: ; preds = %if.then, %for.end9
  69. ret void
  70. }