SemaCUDA.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. /// \file
  10. /// \brief This file implements semantic analysis for CUDA constructs.
  11. ///
  12. //===----------------------------------------------------------------------===//
  13. #include "clang/Sema/Sema.h"
  14. #include "clang/AST/ASTContext.h"
  15. #include "clang/AST/Decl.h"
  16. #include "clang/Lex/Preprocessor.h"
  17. #include "clang/Sema/SemaDiagnostic.h"
  18. #include "llvm/ADT/Optional.h"
  19. #include "llvm/ADT/SmallVector.h"
  20. using namespace clang;
  21. ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
  22. MultiExprArg ExecConfig,
  23. SourceLocation GGGLoc) {
  24. FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
  25. if (!ConfigDecl)
  26. return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
  27. << "cudaConfigureCall");
  28. QualType ConfigQTy = ConfigDecl->getType();
  29. DeclRefExpr *ConfigDR = new (Context)
  30. DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
  31. MarkFunctionReferenced(LLLLoc, ConfigDecl);
  32. return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
  33. /*IsExecConfig=*/true);
  34. }
  35. /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
  36. Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
  37. if (D->hasAttr<CUDAInvalidTargetAttr>())
  38. return CFT_InvalidTarget;
  39. if (D->hasAttr<CUDAGlobalAttr>())
  40. return CFT_Global;
  41. if (D->hasAttr<CUDADeviceAttr>()) {
  42. if (D->hasAttr<CUDAHostAttr>())
  43. return CFT_HostDevice;
  44. return CFT_Device;
  45. } else if (D->hasAttr<CUDAHostAttr>()) {
  46. return CFT_Host;
  47. } else if (D->isImplicit()) {
  48. // Some implicit declarations (like intrinsic functions) are not marked.
  49. // Set the most lenient target on them for maximal flexibility.
  50. return CFT_HostDevice;
  51. }
  52. return CFT_Host;
  53. }
  54. bool Sema::CheckCUDATarget(const FunctionDecl *Caller,
  55. const FunctionDecl *Callee) {
  56. // The CUDADisableTargetCallChecks short-circuits this check: we assume all
  57. // cross-target calls are valid.
  58. if (getLangOpts().CUDADisableTargetCallChecks)
  59. return false;
  60. CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
  61. CalleeTarget = IdentifyCUDATarget(Callee);
  62. // If one of the targets is invalid, the check always fails, no matter what
  63. // the other target is.
  64. if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget)
  65. return true;
  66. // CUDA B.1.1 "The __device__ qualifier declares a function that is [...]
  67. // Callable from the device only."
  68. if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
  69. return true;
  70. // CUDA B.1.2 "The __global__ qualifier declares a function that is [...]
  71. // Callable from the host only."
  72. // CUDA B.1.3 "The __host__ qualifier declares a function that is [...]
  73. // Callable from the host only."
  74. if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
  75. (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
  76. return true;
  77. // CUDA B.1.3 "The __device__ and __host__ qualifiers can be used together
  78. // however, in which case the function is compiled for both the host and the
  79. // device. The __CUDA_ARCH__ macro [...] can be used to differentiate code
  80. // paths between host and device."
  81. if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice) {
  82. // If the caller is implicit then the check always passes.
  83. if (Caller->isImplicit()) return false;
  84. bool InDeviceMode = getLangOpts().CUDAIsDevice;
  85. if (!InDeviceMode && CalleeTarget != CFT_Host)
  86. return true;
  87. if (InDeviceMode && CalleeTarget != CFT_Device) {
  88. // Allow host device functions to call host functions if explicitly
  89. // requested.
  90. if (CalleeTarget == CFT_Host &&
  91. getLangOpts().CUDAAllowHostCallsFromHostDevice) {
  92. Diag(Caller->getLocation(),
  93. diag::warn_host_calls_from_host_device)
  94. << Callee->getNameAsString() << Caller->getNameAsString();
  95. return false;
  96. }
  97. return true;
  98. }
  99. }
  100. return false;
  101. }
  102. /// When an implicitly-declared special member has to invoke more than one
  103. /// base/field special member, conflicts may occur in the targets of these
  104. /// members. For example, if one base's member __host__ and another's is
  105. /// __device__, it's a conflict.
  106. /// This function figures out if the given targets \param Target1 and
  107. /// \param Target2 conflict, and if they do not it fills in
  108. /// \param ResolvedTarget with a target that resolves for both calls.
  109. /// \return true if there's a conflict, false otherwise.
  110. static bool
  111. resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1,
  112. Sema::CUDAFunctionTarget Target2,
  113. Sema::CUDAFunctionTarget *ResolvedTarget) {
  114. if (Target1 == Sema::CFT_Global && Target2 == Sema::CFT_Global) {
  115. // TODO: this shouldn't happen, really. Methods cannot be marked __global__.
  116. // Clang should detect this earlier and produce an error. Then this
  117. // condition can be changed to an assertion.
  118. return true;
  119. }
  120. if (Target1 == Sema::CFT_HostDevice) {
  121. *ResolvedTarget = Target2;
  122. } else if (Target2 == Sema::CFT_HostDevice) {
  123. *ResolvedTarget = Target1;
  124. } else if (Target1 != Target2) {
  125. return true;
  126. } else {
  127. *ResolvedTarget = Target1;
  128. }
  129. return false;
  130. }
  131. bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
  132. CXXSpecialMember CSM,
  133. CXXMethodDecl *MemberDecl,
  134. bool ConstRHS,
  135. bool Diagnose) {
  136. llvm::Optional<CUDAFunctionTarget> InferredTarget;
  137. // We're going to invoke special member lookup; mark that these special
  138. // members are called from this one, and not from its caller.
  139. ContextRAII MethodContext(*this, MemberDecl);
  140. // Look for special members in base classes that should be invoked from here.
  141. // Infer the target of this member base on the ones it should call.
  142. // Skip direct and indirect virtual bases for abstract classes.
  143. llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases;
  144. for (const auto &B : ClassDecl->bases()) {
  145. if (!B.isVirtual()) {
  146. Bases.push_back(&B);
  147. }
  148. }
  149. if (!ClassDecl->isAbstract()) {
  150. for (const auto &VB : ClassDecl->vbases()) {
  151. Bases.push_back(&VB);
  152. }
  153. }
  154. for (const auto *B : Bases) {
  155. const RecordType *BaseType = B->getType()->getAs<RecordType>();
  156. if (!BaseType) {
  157. continue;
  158. }
  159. CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
  160. Sema::SpecialMemberOverloadResult *SMOR =
  161. LookupSpecialMember(BaseClassDecl, CSM,
  162. /* ConstArg */ ConstRHS,
  163. /* VolatileArg */ false,
  164. /* RValueThis */ false,
  165. /* ConstThis */ false,
  166. /* VolatileThis */ false);
  167. if (!SMOR || !SMOR->getMethod()) {
  168. continue;
  169. }
  170. CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod());
  171. if (!InferredTarget.hasValue()) {
  172. InferredTarget = BaseMethodTarget;
  173. } else {
  174. bool ResolutionError = resolveCalleeCUDATargetConflict(
  175. InferredTarget.getValue(), BaseMethodTarget,
  176. InferredTarget.getPointer());
  177. if (ResolutionError) {
  178. if (Diagnose) {
  179. Diag(ClassDecl->getLocation(),
  180. diag::note_implicit_member_target_infer_collision)
  181. << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
  182. }
  183. MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
  184. return true;
  185. }
  186. }
  187. }
  188. // Same as for bases, but now for special members of fields.
  189. for (const auto *F : ClassDecl->fields()) {
  190. if (F->isInvalidDecl()) {
  191. continue;
  192. }
  193. const RecordType *FieldType =
  194. Context.getBaseElementType(F->getType())->getAs<RecordType>();
  195. if (!FieldType) {
  196. continue;
  197. }
  198. CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl());
  199. Sema::SpecialMemberOverloadResult *SMOR =
  200. LookupSpecialMember(FieldRecDecl, CSM,
  201. /* ConstArg */ ConstRHS && !F->isMutable(),
  202. /* VolatileArg */ false,
  203. /* RValueThis */ false,
  204. /* ConstThis */ false,
  205. /* VolatileThis */ false);
  206. if (!SMOR || !SMOR->getMethod()) {
  207. continue;
  208. }
  209. CUDAFunctionTarget FieldMethodTarget =
  210. IdentifyCUDATarget(SMOR->getMethod());
  211. if (!InferredTarget.hasValue()) {
  212. InferredTarget = FieldMethodTarget;
  213. } else {
  214. bool ResolutionError = resolveCalleeCUDATargetConflict(
  215. InferredTarget.getValue(), FieldMethodTarget,
  216. InferredTarget.getPointer());
  217. if (ResolutionError) {
  218. if (Diagnose) {
  219. Diag(ClassDecl->getLocation(),
  220. diag::note_implicit_member_target_infer_collision)
  221. << (unsigned)CSM << InferredTarget.getValue()
  222. << FieldMethodTarget;
  223. }
  224. MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
  225. return true;
  226. }
  227. }
  228. }
  229. if (InferredTarget.hasValue()) {
  230. if (InferredTarget.getValue() == CFT_Device) {
  231. MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
  232. } else if (InferredTarget.getValue() == CFT_Host) {
  233. MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
  234. } else {
  235. MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
  236. MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
  237. }
  238. } else {
  239. // If no target was inferred, mark this member as __host__ __device__;
  240. // it's the least restrictive option that can be invoked from any target.
  241. MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context));
  242. MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context));
  243. }
  244. return false;
  245. }