2
0

access-non-generic.ll 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. ; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
  2. ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
  3. ; RUN: opt < %s -S -nvptx-favor-non-generic -dce | FileCheck %s --check-prefix IR
  4. @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
  5. @scalar = internal addrspace(3) global float 0.000000e+00, align 4
  6. ; Verifies nvptx-favor-non-generic correctly optimizes generic address space
  7. ; usage to non-generic address space usage for the patterns we claim to handle:
  8. ; 1. load cast
  9. ; 2. store cast
  10. ; 3. load gep cast
  11. ; 4. store gep cast
  12. ; gep and cast can be an instruction or a constant expression. This function
  13. ; tries all possible combinations.
  14. define float @ld_st_shared_f32(i32 %i, float %v) {
  15. ; IR-LABEL: @ld_st_shared_f32
  16. ; IR-NOT: addrspacecast
  17. ; PTX-LABEL: ld_st_shared_f32(
  18. ; load cast
  19. %1 = load float, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
  20. ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
  21. ; store cast
  22. store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
  23. ; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
  24. ; use syncthreads to disable optimizations across components
  25. call void @llvm.cuda.syncthreads()
  26. ; PTX: bar.sync 0;
  27. ; cast; load
  28. %2 = addrspacecast float addrspace(3)* @scalar to float*
  29. %3 = load float, float* %2, align 4
  30. ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
  31. ; cast; store
  32. store float %v, float* %2, align 4
  33. ; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
  34. call void @llvm.cuda.syncthreads()
  35. ; PTX: bar.sync 0;
  36. ; load gep cast
  37. %4 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
  38. ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
  39. ; store gep cast
  40. store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
  41. ; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
  42. call void @llvm.cuda.syncthreads()
  43. ; PTX: bar.sync 0;
  44. ; gep cast; load
  45. %5 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
  46. %6 = load float, float* %5, align 4
  47. ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
  48. ; gep cast; store
  49. store float %v, float* %5, align 4
  50. ; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
  51. call void @llvm.cuda.syncthreads()
  52. ; PTX: bar.sync 0;
  53. ; cast; gep; load
  54. %7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
  55. %8 = getelementptr inbounds [10 x float], [10 x float]* %7, i32 0, i32 %i
  56. %9 = load float, float* %8, align 4
  57. ; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
  58. ; cast; gep; store
  59. store float %v, float* %8, align 4
  60. ; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}};
  61. call void @llvm.cuda.syncthreads()
  62. ; PTX: bar.sync 0;
  63. %sum2 = fadd float %1, %3
  64. %sum3 = fadd float %sum2, %4
  65. %sum4 = fadd float %sum3, %6
  66. %sum5 = fadd float %sum4, %9
  67. ret float %sum5
  68. }
  69. ; When hoisting an addrspacecast between different pointer types, replace the
  70. ; addrspacecast with a bitcast.
  71. define i32 @ld_int_from_float() {
  72. ; IR-LABEL: @ld_int_from_float
  73. ; IR: load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*)
  74. ; PTX-LABEL: ld_int_from_float(
  75. ; PTX: ld.shared.u{{(32|64)}}
  76. %1 = load i32, i32* addrspacecast(float addrspace(3)* @scalar to i32*), align 4
  77. ret i32 %1
  78. }
  79. define i32 @ld_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) {
  80. ; IR-LABEL: @ld_int_from_global_float(
  81. ; PTX-LABEL: ld_int_from_global_float(
  82. %1 = addrspacecast float addrspace(1)* %input to float*
  83. %2 = getelementptr float, float* %1, i32 %i
  84. ; IR-NEXT: getelementptr float, float addrspace(1)* %input, i32 %i
  85. %3 = getelementptr float, float* %2, i32 %j
  86. ; IR-NEXT: getelementptr float, float addrspace(1)* {{%[^,]+}}, i32 %j
  87. %4 = bitcast float* %3 to i32*
  88. ; IR-NEXT: bitcast float addrspace(1)* {{%[^ ]+}} to i32 addrspace(1)*
  89. %5 = load i32, i32* %4
  90. ; IR-NEXT: load i32, i32 addrspace(1)* {{%.+}}
  91. ; PTX-LABEL: ld.global
  92. ret i32 %5
  93. }
  94. define void @nested_const_expr() {
  95. ; PTX-LABEL: nested_const_expr(
  96. ; store 1 to bitcast(gep(addrspacecast(array), 0, 1))
  97. store i32 1, i32* bitcast (float* getelementptr ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i64 0, i64 1) to i32*), align 4
  98. ; PTX: mov.u32 %r1, 1;
  99. ; PTX-NEXT: st.shared.u32 [array+4], %r1;
  100. ret void
  101. }
  102. define void @rauw(float addrspace(1)* %input) {
  103. %generic_input = addrspacecast float addrspace(1)* %input to float*
  104. %addr = getelementptr float, float* %generic_input, i64 10
  105. %v = load float, float* %addr
  106. store float %v, float* %addr
  107. ret void
  108. ; IR-LABEL: @rauw(
  109. ; IR-NEXT: %1 = getelementptr float, float addrspace(1)* %input, i64 10
  110. ; IR-NEXT: %v = load float, float addrspace(1)* %1
  111. ; IR-NEXT: store float %v, float addrspace(1)* %1
  112. ; IR-NEXT: ret void
  113. }
  114. declare void @llvm.cuda.syncthreads() #3
  115. attributes #3 = { noduplicate nounwind }