long_chains.ll 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647
  1. ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
  2. target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
  3. target triple = "x86_64-apple-macosx10.8.0"
  4. ; At this point we can't vectorize only parts of the tree.
  5. ; CHECK: test
  6. ; CHECK: insertelement <2 x i8>
  7. ; CHECK: insertelement <2 x i8>
  8. ; CHECK: sitofp <2 x i8>
  9. ; CHECK: fmul <2 x double>
  10. ; CHECK: ret
  11. define i32 @test(double* nocapture %A, i8* nocapture %B) {
  12. entry:
  13. %0 = load i8, i8* %B, align 1
  14. %arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1
  15. %1 = load i8, i8* %arrayidx1, align 1
  16. %add = add i8 %0, 3
  17. %add4 = add i8 %1, 3
  18. %conv6 = sitofp i8 %add to double
  19. %conv7 = sitofp i8 %add4 to double
  20. %mul = fmul double %conv6, %conv6
  21. %add8 = fadd double %mul, 1.000000e+00
  22. %mul9 = fmul double %conv7, %conv7
  23. %add10 = fadd double %mul9, 1.000000e+00
  24. %mul11 = fmul double %add8, %add8
  25. %add12 = fadd double %mul11, 1.000000e+00
  26. %mul13 = fmul double %add10, %add10
  27. %add14 = fadd double %mul13, 1.000000e+00
  28. %mul15 = fmul double %add12, %add12
  29. %add16 = fadd double %mul15, 1.000000e+00
  30. %mul17 = fmul double %add14, %add14
  31. %add18 = fadd double %mul17, 1.000000e+00
  32. %mul19 = fmul double %add16, %add16
  33. %add20 = fadd double %mul19, 1.000000e+00
  34. %mul21 = fmul double %add18, %add18
  35. %add22 = fadd double %mul21, 1.000000e+00
  36. %mul23 = fmul double %add20, %add20
  37. %add24 = fadd double %mul23, 1.000000e+00
  38. %mul25 = fmul double %add22, %add22
  39. %add26 = fadd double %mul25, 1.000000e+00
  40. store double %add24, double* %A, align 8
  41. %arrayidx28 = getelementptr inbounds double, double* %A, i64 1
  42. store double %add26, double* %arrayidx28, align 8
  43. ret i32 undef
  44. }