sroa.ll 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. ; RUN: opt -S -mcpu=swift -mtriple=thumbv7-apple-ios -basicaa -slp-vectorizer < %s | FileCheck %s
  2. target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
  3. %class.Complex = type { double, double }
  4. ; Code like this is the result of SROA. Make sure we don't vectorize this
  5. ; because the scalar version of the shl/or are handled by the
  6. ; backend and disappear, the vectorized code stays.
  7. ; CHECK-LABEL: SROAed
  8. ; CHECK-NOT: shl nuw <2 x i64>
  9. ; CHECK-NOT: or <2 x i64>
  10. define void @SROAed(%class.Complex* noalias nocapture sret %agg.result, [4 x i32] %a.coerce, [4 x i32] %b.coerce) {
  11. entry:
  12. %a.coerce.fca.0.extract = extractvalue [4 x i32] %a.coerce, 0
  13. %a.sroa.0.0.insert.ext = zext i32 %a.coerce.fca.0.extract to i64
  14. %a.coerce.fca.1.extract = extractvalue [4 x i32] %a.coerce, 1
  15. %a.sroa.0.4.insert.ext = zext i32 %a.coerce.fca.1.extract to i64
  16. %a.sroa.0.4.insert.shift = shl nuw i64 %a.sroa.0.4.insert.ext, 32
  17. %a.sroa.0.4.insert.insert = or i64 %a.sroa.0.4.insert.shift, %a.sroa.0.0.insert.ext
  18. %0 = bitcast i64 %a.sroa.0.4.insert.insert to double
  19. %a.coerce.fca.2.extract = extractvalue [4 x i32] %a.coerce, 2
  20. %a.sroa.3.8.insert.ext = zext i32 %a.coerce.fca.2.extract to i64
  21. %a.coerce.fca.3.extract = extractvalue [4 x i32] %a.coerce, 3
  22. %a.sroa.3.12.insert.ext = zext i32 %a.coerce.fca.3.extract to i64
  23. %a.sroa.3.12.insert.shift = shl nuw i64 %a.sroa.3.12.insert.ext, 32
  24. %a.sroa.3.12.insert.insert = or i64 %a.sroa.3.12.insert.shift, %a.sroa.3.8.insert.ext
  25. %1 = bitcast i64 %a.sroa.3.12.insert.insert to double
  26. %b.coerce.fca.0.extract = extractvalue [4 x i32] %b.coerce, 0
  27. %b.sroa.0.0.insert.ext = zext i32 %b.coerce.fca.0.extract to i64
  28. %b.coerce.fca.1.extract = extractvalue [4 x i32] %b.coerce, 1
  29. %b.sroa.0.4.insert.ext = zext i32 %b.coerce.fca.1.extract to i64
  30. %b.sroa.0.4.insert.shift = shl nuw i64 %b.sroa.0.4.insert.ext, 32
  31. %b.sroa.0.4.insert.insert = or i64 %b.sroa.0.4.insert.shift, %b.sroa.0.0.insert.ext
  32. %2 = bitcast i64 %b.sroa.0.4.insert.insert to double
  33. %b.coerce.fca.2.extract = extractvalue [4 x i32] %b.coerce, 2
  34. %b.sroa.3.8.insert.ext = zext i32 %b.coerce.fca.2.extract to i64
  35. %b.coerce.fca.3.extract = extractvalue [4 x i32] %b.coerce, 3
  36. %b.sroa.3.12.insert.ext = zext i32 %b.coerce.fca.3.extract to i64
  37. %b.sroa.3.12.insert.shift = shl nuw i64 %b.sroa.3.12.insert.ext, 32
  38. %b.sroa.3.12.insert.insert = or i64 %b.sroa.3.12.insert.shift, %b.sroa.3.8.insert.ext
  39. %3 = bitcast i64 %b.sroa.3.12.insert.insert to double
  40. %add = fadd double %0, %2
  41. %add3 = fadd double %1, %3
  42. %re.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 0
  43. store double %add, double* %re.i.i, align 4
  44. %im.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 1
  45. store double %add3, double* %im.i.i, align 4
  46. ret void
  47. }