CGCall.cpp 145 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778
  1. //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // These classes wrap the information about a call or function
  11. // definition used to handle ABI compliancy.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #include "CGCall.h"
  15. #include "ABIInfo.h"
  16. #include "CGCXXABI.h"
  17. #include "CodeGenFunction.h"
  18. #include "CodeGenModule.h"
  19. #include "CGHLSLRuntime.h" // HLSL Change
  20. #include "TargetInfo.h"
  21. #include "clang/AST/Decl.h"
  22. #include "clang/AST/DeclCXX.h"
  23. #include "clang/AST/DeclObjC.h"
  24. #include "clang/Basic/TargetInfo.h"
  25. #include "clang/CodeGen/CGFunctionInfo.h"
  26. #include "clang/Frontend/CodeGenOptions.h"
  27. #include "llvm/ADT/StringExtras.h"
  28. #include "llvm/IR/Attributes.h"
  29. #include "llvm/IR/CallSite.h"
  30. #include "llvm/IR/DataLayout.h"
  31. #include "llvm/IR/InlineAsm.h"
  32. #include "llvm/IR/Intrinsics.h"
  33. #include "llvm/IR/IntrinsicInst.h"
  34. #include "llvm/Transforms/Utils/Local.h"
  35. using namespace clang;
  36. using namespace CodeGen;
  37. /***/
  38. static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
  39. switch (CC) {
  40. default: return llvm::CallingConv::C;
  41. case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
  42. case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
  43. case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
  44. case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
  45. case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
  46. case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
  47. case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
  48. case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
  49. // TODO: Add support for __pascal to LLVM.
  50. case CC_X86Pascal: return llvm::CallingConv::C;
  51. // TODO: Add support for __vectorcall to LLVM.
  52. case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
  53. case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
  54. case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
  55. }
  56. }
  57. /// Derives the 'this' type for codegen purposes, i.e. ignoring method
  58. /// qualification.
  59. /// FIXME: address space qualification?
  60. static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
  61. QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
  62. return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
  63. }
  64. /// Returns the canonical formal type of the given C++ method.
  65. static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
  66. return MD->getType()->getCanonicalTypeUnqualified()
  67. .getAs<FunctionProtoType>();
  68. }
  69. /// Returns the "extra-canonicalized" return type, which discards
  70. /// qualifiers on the return type. Codegen doesn't care about them,
  71. /// and it makes ABI code a little easier to be able to assume that
  72. /// all parameter and return types are top-level unqualified.
  73. static CanQualType GetReturnType(QualType RetTy) {
  74. return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
  75. }
  76. /// Arrange the argument and result information for a value of the given
  77. /// unprototyped freestanding function type.
  78. const CGFunctionInfo &
  79. CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
  80. // When translating an unprototyped function type, always use a
  81. // variadic type.
  82. return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
  83. /*instanceMethod=*/false,
  84. /*chainCall=*/false, None,
  85. FTNP->getExtInfo(), RequiredArgs(0));
  86. }
  87. /// Arrange the LLVM function layout for a value of the given function
  88. /// type, on top of any implicit parameters already stored.
  89. static const CGFunctionInfo &
  90. arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
  91. SmallVectorImpl<CanQualType> &prefix,
  92. CanQual<FunctionProtoType> FTP) {
  93. RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
  94. // FIXME: Kill copy.
  95. prefix.append(FTP->param_type_begin(), FTP->param_type_end());
  96. CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
  97. return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
  98. /*chainCall=*/false, prefix,
  99. FTP->getExtInfo(), required);
  100. }
  101. /// Arrange the argument and result information for a value of the
  102. /// given freestanding function type.
  103. const CGFunctionInfo &
  104. CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
  105. SmallVector<CanQualType, 16> argTypes;
  106. return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
  107. FTP);
  108. }
  109. static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
  110. // Set the appropriate calling convention for the Function.
  111. if (D->hasAttr<StdCallAttr>())
  112. return CC_X86StdCall;
  113. if (D->hasAttr<FastCallAttr>())
  114. return CC_X86FastCall;
  115. if (D->hasAttr<ThisCallAttr>())
  116. return CC_X86ThisCall;
  117. if (D->hasAttr<VectorCallAttr>())
  118. return CC_X86VectorCall;
  119. if (D->hasAttr<PascalAttr>())
  120. return CC_X86Pascal;
  121. if (PcsAttr *PCS = D->getAttr<PcsAttr>())
  122. return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
  123. if (D->hasAttr<IntelOclBiccAttr>())
  124. return CC_IntelOclBicc;
  125. if (D->hasAttr<MSABIAttr>())
  126. return IsWindows ? CC_C : CC_X86_64Win64;
  127. if (D->hasAttr<SysVABIAttr>())
  128. return IsWindows ? CC_X86_64SysV : CC_C;
  129. return CC_C;
  130. }
  131. /// Arrange the argument and result information for a call to an
  132. /// unknown C++ non-static member function of the given abstract type.
  133. /// (Zero value of RD means we don't have any meaningful "this" argument type,
  134. /// so fall back to a generic pointer type).
  135. /// The member function must be an ordinary function, i.e. not a
  136. /// constructor or destructor.
  137. const CGFunctionInfo &
  138. CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
  139. const FunctionProtoType *FTP) {
  140. SmallVector<CanQualType, 16> argTypes;
  141. // Add the 'this' pointer.
  142. if (RD)
  143. argTypes.push_back(GetThisType(Context, RD));
  144. else
  145. argTypes.push_back(Context.VoidPtrTy);
  146. return ::arrangeLLVMFunctionInfo(
  147. *this, true, argTypes,
  148. FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
  149. }
  150. /// Arrange the argument and result information for a declaration or
  151. /// definition of the given C++ non-static member function. The
  152. /// member function must be an ordinary function, i.e. not a
  153. /// constructor or destructor.
  154. const CGFunctionInfo &
  155. CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
  156. assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
  157. assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
  158. CanQual<FunctionProtoType> prototype = GetFormalType(MD);
  159. if (MD->isInstance()) {
  160. // The abstract case is perfectly fine.
  161. const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
  162. return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
  163. }
  164. return arrangeFreeFunctionType(prototype);
  165. }
  166. const CGFunctionInfo &
  167. CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
  168. StructorType Type) {
  169. SmallVector<CanQualType, 16> argTypes;
  170. argTypes.push_back(GetThisType(Context, MD->getParent()));
  171. GlobalDecl GD;
  172. if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
  173. GD = GlobalDecl(CD, toCXXCtorType(Type));
  174. } else {
  175. auto *DD = dyn_cast<CXXDestructorDecl>(MD);
  176. GD = GlobalDecl(DD, toCXXDtorType(Type));
  177. }
  178. CanQual<FunctionProtoType> FTP = GetFormalType(MD);
  179. // Add the formal parameters.
  180. argTypes.append(FTP->param_type_begin(), FTP->param_type_end());
  181. TheCXXABI.buildStructorSignature(MD, Type, argTypes);
  182. RequiredArgs required =
  183. (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
  184. FunctionType::ExtInfo extInfo = FTP->getExtInfo();
  185. CanQualType resultType = TheCXXABI.HasThisReturn(GD)
  186. ? argTypes.front()
  187. : TheCXXABI.hasMostDerivedReturn(GD)
  188. ? CGM.getContext().VoidPtrTy
  189. : Context.VoidTy;
  190. return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
  191. /*chainCall=*/false, argTypes, extInfo,
  192. required);
  193. }
  194. /// Arrange a call to a C++ method, passing the given arguments.
  195. const CGFunctionInfo &
  196. CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
  197. const CXXConstructorDecl *D,
  198. CXXCtorType CtorKind,
  199. unsigned ExtraArgs) {
  200. // FIXME: Kill copy.
  201. SmallVector<CanQualType, 16> ArgTypes;
  202. for (const auto &Arg : args)
  203. ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
  204. CanQual<FunctionProtoType> FPT = GetFormalType(D);
  205. RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
  206. GlobalDecl GD(D, CtorKind);
  207. CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
  208. ? ArgTypes.front()
  209. : TheCXXABI.hasMostDerivedReturn(GD)
  210. ? CGM.getContext().VoidPtrTy
  211. : Context.VoidTy;
  212. FunctionType::ExtInfo Info = FPT->getExtInfo();
  213. return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
  214. /*chainCall=*/false, ArgTypes, Info,
  215. Required);
  216. }
  217. /// Arrange the argument and result information for the declaration or
  218. /// definition of the given function.
  219. const CGFunctionInfo &
  220. CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
  221. if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
  222. if (MD->isInstance())
  223. return arrangeCXXMethodDeclaration(MD);
  224. CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
  225. assert(isa<FunctionType>(FTy));
  226. // When declaring a function without a prototype, always use a
  227. // non-variadic type.
  228. if (isa<FunctionNoProtoType>(FTy)) {
  229. CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
  230. return arrangeLLVMFunctionInfo(
  231. noProto->getReturnType(), /*instanceMethod=*/false,
  232. /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
  233. }
  234. assert(isa<FunctionProtoType>(FTy));
  235. return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
  236. }
  237. /// Arrange the argument and result information for the declaration or
  238. /// definition of an Objective-C method.
  239. const CGFunctionInfo &
  240. CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
  241. // It happens that this is the same as a call with no optional
  242. // arguments, except also using the formal 'self' type.
  243. return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
  244. }
  245. /// Arrange the argument and result information for the function type
  246. /// through which to perform a send to the given Objective-C method,
  247. /// using the given receiver type. The receiver type is not always
  248. /// the 'self' type of the method or even an Objective-C pointer type.
  249. /// This is *not* the right method for actually performing such a
  250. /// message send, due to the possibility of optional arguments.
  251. const CGFunctionInfo &
  252. CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
  253. QualType receiverType) {
  254. SmallVector<CanQualType, 16> argTys;
  255. argTys.push_back(Context.getCanonicalParamType(receiverType));
  256. argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
  257. // FIXME: Kill copy?
  258. for (const auto *I : MD->params()) {
  259. argTys.push_back(Context.getCanonicalParamType(I->getType()));
  260. }
  261. FunctionType::ExtInfo einfo;
  262. bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
  263. einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
  264. if (getContext().getLangOpts().ObjCAutoRefCount &&
  265. MD->hasAttr<NSReturnsRetainedAttr>())
  266. einfo = einfo.withProducesResult(true);
  267. RequiredArgs required =
  268. (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
  269. return arrangeLLVMFunctionInfo(
  270. GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
  271. /*chainCall=*/false, argTys, einfo, required);
  272. }
  273. const CGFunctionInfo &
  274. CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
  275. // FIXME: Do we need to handle ObjCMethodDecl?
  276. const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
  277. if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
  278. return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
  279. if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
  280. return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
  281. return arrangeFunctionDeclaration(FD);
  282. }
  283. /// Arrange a thunk that takes 'this' as the first parameter followed by
  284. /// varargs. Return a void pointer, regardless of the actual return type.
  285. /// The body of the thunk will end in a musttail call to a function of the
  286. /// correct type, and the caller will bitcast the function to the correct
  287. /// prototype.
  288. const CGFunctionInfo &
  289. CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
  290. assert(MD->isVirtual() && "only virtual memptrs have thunks");
  291. CanQual<FunctionProtoType> FTP = GetFormalType(MD);
  292. CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
  293. return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
  294. /*chainCall=*/false, ArgTys,
  295. FTP->getExtInfo(), RequiredArgs(1));
  296. }
  297. const CGFunctionInfo &
  298. CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
  299. CXXCtorType CT) {
  300. assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
  301. CanQual<FunctionProtoType> FTP = GetFormalType(CD);
  302. SmallVector<CanQualType, 2> ArgTys;
  303. const CXXRecordDecl *RD = CD->getParent();
  304. ArgTys.push_back(GetThisType(Context, RD));
  305. if (CT == Ctor_CopyingClosure)
  306. ArgTys.push_back(*FTP->param_type_begin());
  307. if (RD->getNumVBases() > 0)
  308. ArgTys.push_back(Context.IntTy);
  309. CallingConv CC = Context.getDefaultCallingConvention(
  310. /*IsVariadic=*/false, /*IsCXXMethod=*/true);
  311. return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
  312. /*chainCall=*/false, ArgTys,
  313. FunctionType::ExtInfo(CC), RequiredArgs::All);
  314. }
  315. /// Arrange a call as unto a free function, except possibly with an
  316. /// additional number of formal parameters considered required.
  317. static const CGFunctionInfo &
  318. arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
  319. CodeGenModule &CGM,
  320. const CallArgList &args,
  321. const FunctionType *fnType,
  322. unsigned numExtraRequiredArgs,
  323. bool chainCall) {
  324. assert(args.size() >= numExtraRequiredArgs);
  325. // In most cases, there are no optional arguments.
  326. RequiredArgs required = RequiredArgs::All;
  327. // If we have a variadic prototype, the required arguments are the
  328. // extra prefix plus the arguments in the prototype.
  329. if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
  330. if (proto->isVariadic())
  331. required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
  332. // If we don't have a prototype at all, but we're supposed to
  333. // explicitly use the variadic convention for unprototyped calls,
  334. // treat all of the arguments as required but preserve the nominal
  335. // possibility of variadics.
  336. } else if (CGM.getTargetCodeGenInfo()
  337. .isNoProtoCallVariadic(args,
  338. cast<FunctionNoProtoType>(fnType))) {
  339. required = RequiredArgs(args.size());
  340. }
  341. // FIXME: Kill copy.
  342. SmallVector<CanQualType, 16> argTypes;
  343. for (const auto &arg : args)
  344. argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
  345. return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
  346. /*instanceMethod=*/false, chainCall,
  347. argTypes, fnType->getExtInfo(), required);
  348. }
  349. /// Figure out the rules for calling a function with the given formal
  350. /// type using the given arguments. The arguments are necessary
  351. /// because the function might be unprototyped, in which case it's
  352. /// target-dependent in crazy ways.
  353. const CGFunctionInfo &
  354. CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
  355. const FunctionType *fnType,
  356. bool chainCall) {
  357. return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
  358. chainCall ? 1 : 0, chainCall);
  359. }
  360. /// A block function call is essentially a free-function call with an
  361. /// extra implicit argument.
  362. const CGFunctionInfo &
  363. CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
  364. const FunctionType *fnType) {
  365. return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
  366. /*chainCall=*/false);
  367. }
  368. const CGFunctionInfo &
  369. CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
  370. const CallArgList &args,
  371. FunctionType::ExtInfo info,
  372. RequiredArgs required) {
  373. // FIXME: Kill copy.
  374. SmallVector<CanQualType, 16> argTypes;
  375. for (const auto &Arg : args)
  376. argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
  377. return arrangeLLVMFunctionInfo(
  378. GetReturnType(resultType), /*instanceMethod=*/false,
  379. /*chainCall=*/false, argTypes, info, required);
  380. }
  381. /// Arrange a call to a C++ method, passing the given arguments.
  382. const CGFunctionInfo &
  383. CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
  384. const FunctionProtoType *FPT,
  385. RequiredArgs required) {
  386. // FIXME: Kill copy.
  387. SmallVector<CanQualType, 16> argTypes;
  388. for (const auto &Arg : args)
  389. argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
  390. FunctionType::ExtInfo info = FPT->getExtInfo();
  391. return arrangeLLVMFunctionInfo(
  392. GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
  393. /*chainCall=*/false, argTypes, info, required);
  394. }
  395. const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
  396. QualType resultType, const FunctionArgList &args,
  397. const FunctionType::ExtInfo &info, bool isVariadic) {
  398. // FIXME: Kill copy.
  399. SmallVector<CanQualType, 16> argTypes;
  400. for (auto Arg : args)
  401. argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
  402. RequiredArgs required =
  403. (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
  404. return arrangeLLVMFunctionInfo(
  405. GetReturnType(resultType), /*instanceMethod=*/false,
  406. /*chainCall=*/false, argTypes, info, required);
  407. }
  408. const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
  409. return arrangeLLVMFunctionInfo(
  410. getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
  411. None, FunctionType::ExtInfo(), RequiredArgs::All);
  412. }
  413. /// Arrange the argument and result information for an abstract value
  414. /// of a given function type. This is the method which all of the
  415. /// above functions ultimately defer to.
  416. const CGFunctionInfo &
  417. CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
  418. bool instanceMethod,
  419. bool chainCall,
  420. ArrayRef<CanQualType> argTypes,
  421. FunctionType::ExtInfo info,
  422. RequiredArgs required) {
  423. // HLSL Change Starts
  424. ASTContext &context = getContext();
  425. auto isCanonicalAsParam = [&context](const CanQualType &Ty) {
  426. return Ty.isCanonicalAsParam() ||
  427. (context.getLangOpts().HLSL && Ty->isArrayType());
  428. };
  429. // HLSL Change Ends
  430. assert(std::all_of(argTypes.begin(), argTypes.end(),
  431. isCanonicalAsParam)); // HLSL Change - skip array when
  432. // check isCanonicalAsParam
  433. (void)isCanonicalAsParam;
  434. unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
  435. // Lookup or create unique function info.
  436. llvm::FoldingSetNodeID ID;
  437. CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
  438. resultType, argTypes);
  439. void *insertPos = nullptr;
  440. CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
  441. if (FI)
  442. return *FI;
  443. // Construct the function info. We co-allocate the ArgInfos.
  444. FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
  445. resultType, argTypes, required);
  446. FunctionInfos.InsertNode(FI, insertPos);
  447. bool inserted = FunctionsBeingProcessed.insert(FI).second;
  448. (void)inserted;
  449. assert(inserted && "Recursively being processed?");
  450. // Compute ABI information.
  451. getABIInfo().computeInfo(*FI);
  452. // Loop over all of the computed argument and return value info. If any of
  453. // them are direct or extend without a specified coerce type, specify the
  454. // default now.
  455. ABIArgInfo &retInfo = FI->getReturnInfo();
  456. if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
  457. retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
  458. for (auto &I : FI->arguments())
  459. if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
  460. I.info.setCoerceToType(ConvertType(I.type));
  461. bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
  462. assert(erased && "Not in set?");
  463. return *FI;
  464. }
  465. CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
  466. bool instanceMethod,
  467. bool chainCall,
  468. const FunctionType::ExtInfo &info,
  469. CanQualType resultType,
  470. ArrayRef<CanQualType> argTypes,
  471. RequiredArgs required) {
  472. void *buffer = operator new(sizeof(CGFunctionInfo) +
  473. sizeof(ArgInfo) * (argTypes.size() + 1));
  474. CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
  475. FI->CallingConvention = llvmCC;
  476. FI->EffectiveCallingConvention = llvmCC;
  477. FI->ASTCallingConvention = info.getCC();
  478. FI->InstanceMethod = instanceMethod;
  479. FI->ChainCall = chainCall;
  480. FI->NoReturn = info.getNoReturn();
  481. FI->ReturnsRetained = info.getProducesResult();
  482. FI->Required = required;
  483. FI->HasRegParm = info.getHasRegParm();
  484. FI->RegParm = info.getRegParm();
  485. FI->ArgStruct = nullptr;
  486. FI->NumArgs = argTypes.size();
  487. FI->getArgsBuffer()[0].type = resultType;
  488. for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
  489. FI->getArgsBuffer()[i + 1].type = argTypes[i];
  490. return FI;
  491. }
  492. /***/
  493. namespace {
  494. // ABIArgInfo::Expand implementation.
  495. // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
  496. struct TypeExpansion {
  497. enum TypeExpansionKind {
  498. // Elements of constant arrays are expanded recursively.
  499. TEK_ConstantArray,
  500. // Record fields are expanded recursively (but if record is a union, only
  501. // the field with the largest size is expanded).
  502. TEK_Record,
  503. // For complex types, real and imaginary parts are expanded recursively.
  504. TEK_Complex,
  505. // All other types are not expandable.
  506. TEK_None
  507. };
  508. const TypeExpansionKind Kind;
  509. TypeExpansion(TypeExpansionKind K) : Kind(K) {}
  510. virtual ~TypeExpansion() {}
  511. };
  512. struct ConstantArrayExpansion : TypeExpansion {
  513. QualType EltTy;
  514. uint64_t NumElts;
  515. ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
  516. : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
  517. static bool classof(const TypeExpansion *TE) {
  518. return TE->Kind == TEK_ConstantArray;
  519. }
  520. };
  521. struct RecordExpansion : TypeExpansion {
  522. SmallVector<const CXXBaseSpecifier *, 1> Bases;
  523. SmallVector<const FieldDecl *, 1> Fields;
  524. RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
  525. SmallVector<const FieldDecl *, 1> &&Fields)
  526. : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
  527. static bool classof(const TypeExpansion *TE) {
  528. return TE->Kind == TEK_Record;
  529. }
  530. };
  531. struct ComplexExpansion : TypeExpansion {
  532. QualType EltTy;
  533. ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
  534. static bool classof(const TypeExpansion *TE) {
  535. return TE->Kind == TEK_Complex;
  536. }
  537. };
  538. struct NoExpansion : TypeExpansion {
  539. NoExpansion() : TypeExpansion(TEK_None) {}
  540. static bool classof(const TypeExpansion *TE) {
  541. return TE->Kind == TEK_None;
  542. }
  543. };
  544. } // namespace
  545. static std::unique_ptr<TypeExpansion>
  546. getTypeExpansion(QualType Ty, const ASTContext &Context) {
  547. if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
  548. return llvm::make_unique<ConstantArrayExpansion>(
  549. AT->getElementType(), AT->getSize().getZExtValue());
  550. }
  551. if (const RecordType *RT = Ty->getAs<RecordType>()) {
  552. SmallVector<const CXXBaseSpecifier *, 1> Bases;
  553. SmallVector<const FieldDecl *, 1> Fields;
  554. const RecordDecl *RD = RT->getDecl();
  555. assert(!RD->hasFlexibleArrayMember() &&
  556. "Cannot expand structure with flexible array.");
  557. if (RD->isUnion()) {
  558. // Unions can be here only in degenerative cases - all the fields are same
  559. // after flattening. Thus we have to use the "largest" field.
  560. const FieldDecl *LargestFD = nullptr;
  561. CharUnits UnionSize = CharUnits::Zero();
  562. for (const auto *FD : RD->fields()) {
  563. // Skip zero length bitfields.
  564. if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
  565. continue;
  566. assert(!FD->isBitField() &&
  567. "Cannot expand structure with bit-field members.");
  568. CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
  569. if (UnionSize < FieldSize) {
  570. UnionSize = FieldSize;
  571. LargestFD = FD;
  572. }
  573. }
  574. if (LargestFD)
  575. Fields.push_back(LargestFD);
  576. } else {
  577. if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
  578. assert(!CXXRD->isDynamicClass() &&
  579. "cannot expand vtable pointers in dynamic classes");
  580. for (const CXXBaseSpecifier &BS : CXXRD->bases())
  581. Bases.push_back(&BS);
  582. }
  583. for (const auto *FD : RD->fields()) {
  584. // Skip zero length bitfields.
  585. if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
  586. continue;
  587. assert(!FD->isBitField() &&
  588. "Cannot expand structure with bit-field members.");
  589. Fields.push_back(FD);
  590. }
  591. }
  592. return llvm::make_unique<RecordExpansion>(std::move(Bases),
  593. std::move(Fields));
  594. }
  595. if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
  596. return llvm::make_unique<ComplexExpansion>(CT->getElementType());
  597. }
  598. return llvm::make_unique<NoExpansion>();
  599. }
  600. static int getExpansionSize(QualType Ty, const ASTContext &Context) {
  601. auto Exp = getTypeExpansion(Ty, Context);
  602. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  603. return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
  604. }
  605. if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  606. int Res = 0;
  607. for (auto BS : RExp->Bases)
  608. Res += getExpansionSize(BS->getType(), Context);
  609. for (auto FD : RExp->Fields)
  610. Res += getExpansionSize(FD->getType(), Context);
  611. return Res;
  612. }
  613. if (isa<ComplexExpansion>(Exp.get()))
  614. return 2;
  615. assert(isa<NoExpansion>(Exp.get()));
  616. return 1;
  617. }
  618. void
  619. CodeGenTypes::getExpandedTypes(QualType Ty,
  620. SmallVectorImpl<llvm::Type *>::iterator &TI) {
  621. auto Exp = getTypeExpansion(Ty, Context);
  622. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  623. for (int i = 0, n = CAExp->NumElts; i < n; i++) {
  624. getExpandedTypes(CAExp->EltTy, TI);
  625. }
  626. } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  627. for (auto BS : RExp->Bases)
  628. getExpandedTypes(BS->getType(), TI);
  629. for (auto FD : RExp->Fields)
  630. getExpandedTypes(FD->getType(), TI);
  631. } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
  632. llvm::Type *EltTy = ConvertType(CExp->EltTy);
  633. *TI++ = EltTy;
  634. *TI++ = EltTy;
  635. } else {
  636. assert(isa<NoExpansion>(Exp.get()));
  637. *TI++ = ConvertType(Ty);
  638. }
  639. }
  640. void CodeGenFunction::ExpandTypeFromArgs(
  641. QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
  642. assert(LV.isSimple() &&
  643. "Unexpected non-simple lvalue during struct expansion.");
  644. auto Exp = getTypeExpansion(Ty, getContext());
  645. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  646. for (int i = 0, n = CAExp->NumElts; i < n; i++) {
  647. llvm::Value *EltAddr =
  648. Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i);
  649. LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
  650. ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
  651. }
  652. } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  653. llvm::Value *This = LV.getAddress();
  654. for (const CXXBaseSpecifier *BS : RExp->Bases) {
  655. // Perform a single step derived-to-base conversion.
  656. llvm::Value *Base =
  657. GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
  658. /*NullCheckValue=*/false, SourceLocation());
  659. LValue SubLV = MakeAddrLValue(Base, BS->getType());
  660. // Recurse onto bases.
  661. ExpandTypeFromArgs(BS->getType(), SubLV, AI);
  662. }
  663. for (auto FD : RExp->Fields) {
  664. // FIXME: What are the right qualifiers here?
  665. LValue SubLV = EmitLValueForField(LV, FD);
  666. ExpandTypeFromArgs(FD->getType(), SubLV, AI);
  667. }
  668. } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
  669. llvm::Value *RealAddr =
  670. Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real");
  671. EmitStoreThroughLValue(RValue::get(*AI++),
  672. MakeAddrLValue(RealAddr, CExp->EltTy));
  673. llvm::Value *ImagAddr =
  674. Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag");
  675. EmitStoreThroughLValue(RValue::get(*AI++),
  676. MakeAddrLValue(ImagAddr, CExp->EltTy));
  677. } else {
  678. assert(isa<NoExpansion>(Exp.get()));
  679. EmitStoreThroughLValue(RValue::get(*AI++), LV);
  680. }
  681. }
  682. void CodeGenFunction::ExpandTypeToArgs(
  683. QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
  684. SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
  685. auto Exp = getTypeExpansion(Ty, getContext());
  686. if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
  687. llvm::Value *Addr = RV.getAggregateAddr();
  688. for (int i = 0, n = CAExp->NumElts; i < n; i++) {
  689. llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i);
  690. RValue EltRV =
  691. convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
  692. ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
  693. }
  694. } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
  695. llvm::Value *This = RV.getAggregateAddr();
  696. for (const CXXBaseSpecifier *BS : RExp->Bases) {
  697. // Perform a single step derived-to-base conversion.
  698. llvm::Value *Base =
  699. GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
  700. /*NullCheckValue=*/false, SourceLocation());
  701. RValue BaseRV = RValue::getAggregate(Base);
  702. // Recurse onto bases.
  703. ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
  704. IRCallArgPos);
  705. }
  706. LValue LV = MakeAddrLValue(This, Ty);
  707. for (auto FD : RExp->Fields) {
  708. RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
  709. ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
  710. IRCallArgPos);
  711. }
  712. } else if (isa<ComplexExpansion>(Exp.get())) {
  713. ComplexPairTy CV = RV.getComplexVal();
  714. IRCallArgs[IRCallArgPos++] = CV.first;
  715. IRCallArgs[IRCallArgPos++] = CV.second;
  716. } else {
  717. assert(isa<NoExpansion>(Exp.get()));
  718. assert(RV.isScalar() &&
  719. "Unexpected non-scalar rvalue during struct expansion.");
  720. // Insert a bitcast as needed.
  721. llvm::Value *V = RV.getScalarVal();
  722. if (IRCallArgPos < IRFuncTy->getNumParams() &&
  723. V->getType() != IRFuncTy->getParamType(IRCallArgPos))
  724. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
  725. IRCallArgs[IRCallArgPos++] = V;
  726. }
  727. }
  728. /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
  729. /// accessing some number of bytes out of it, try to gep into the struct to get
  730. /// at its inner goodness. Dive as deep as possible without entering an element
  731. /// with an in-memory size smaller than DstSize.
  732. static llvm::Value *
  733. EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
  734. llvm::StructType *SrcSTy,
  735. uint64_t DstSize, CodeGenFunction &CGF) {
  736. // We can't dive into a zero-element struct.
  737. if (SrcSTy->getNumElements() == 0) return SrcPtr;
  738. llvm::Type *FirstElt = SrcSTy->getElementType(0);
  739. // If the first elt is at least as large as what we're looking for, or if the
  740. // first element is the same size as the whole struct, we can enter it. The
  741. // comparison must be made on the store size and not the alloca size. Using
  742. // the alloca size may overstate the size of the load.
  743. uint64_t FirstEltSize =
  744. CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
  745. if (FirstEltSize < DstSize &&
  746. FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
  747. return SrcPtr;
  748. // GEP into the first element.
  749. SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive");
  750. // If the first element is a struct, recurse.
  751. llvm::Type *SrcTy =
  752. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  753. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
  754. return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  755. return SrcPtr;
  756. }
  757. /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
  758. /// are either integers or pointers. This does a truncation of the value if it
  759. /// is too large or a zero extension if it is too small.
  760. ///
  761. /// This behaves as if the value were coerced through memory, so on big-endian
  762. /// targets the high bits are preserved in a truncation, while little-endian
  763. /// targets preserve the low bits.
  764. static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
  765. llvm::Type *Ty,
  766. CodeGenFunction &CGF) {
  767. if (Val->getType() == Ty)
  768. return Val;
  769. if (isa<llvm::PointerType>(Val->getType())) {
  770. // If this is Pointer->Pointer avoid conversion to and from int.
  771. if (isa<llvm::PointerType>(Ty))
  772. return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
  773. // Convert the pointer to an integer so we can play with its width.
  774. Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
  775. }
  776. llvm::Type *DestIntTy = Ty;
  777. if (isa<llvm::PointerType>(DestIntTy))
  778. DestIntTy = CGF.IntPtrTy;
  779. if (Val->getType() != DestIntTy) {
  780. const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
  781. if (DL.isBigEndian()) {
  782. // Preserve the high bits on big-endian targets.
  783. // That is what memory coercion does.
  784. uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
  785. uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
  786. if (SrcSize > DstSize) {
  787. Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
  788. Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
  789. } else {
  790. Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
  791. Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
  792. }
  793. } else {
  794. // Little-endian targets preserve the low bits. No shifts required.
  795. Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
  796. }
  797. }
  798. if (isa<llvm::PointerType>(Ty))
  799. Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
  800. return Val;
  801. }
  802. /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
  803. /// a pointer to an object of type \arg Ty, known to be aligned to
  804. /// \arg SrcAlign bytes.
  805. ///
  806. /// This safely handles the case when the src type is smaller than the
  807. /// destination type; in this situation the values of bits which not
  808. /// present in the src are undefined.
  809. static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
  810. llvm::Type *Ty, CharUnits SrcAlign,
  811. CodeGenFunction &CGF) {
  812. llvm::Type *SrcTy =
  813. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  814. // If SrcTy and Ty are the same, just do a load.
  815. if (SrcTy == Ty)
  816. return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
  817. uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
  818. if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
  819. SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
  820. SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  821. }
  822. uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
  823. // If the source and destination are integer or pointer types, just do an
  824. // extension or truncation to the desired type.
  825. if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
  826. (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
  827. llvm::LoadInst *Load =
  828. CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
  829. return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
  830. }
  831. // If load is legal, just bitcast the src pointer.
  832. if (SrcSize >= DstSize) {
  833. // Generally SrcSize is never greater than DstSize, since this means we are
  834. // losing bits. However, this can happen in cases where the structure has
  835. // additional padding, for example due to a user specified alignment.
  836. //
  837. // FIXME: Assert that we aren't truncating non-padding bits when have access
  838. // to that information.
  839. llvm::Value *Casted =
  840. CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
  841. return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity());
  842. }
  843. // Otherwise do coercion through memory. This is stupid, but
  844. // simple.
  845. llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty);
  846. Tmp->setAlignment(SrcAlign.getQuantity());
  847. llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
  848. llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
  849. llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
  850. CGF.Builder.CreateMemCpy(Casted, SrcCasted,
  851. llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
  852. SrcAlign.getQuantity(), false);
  853. return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity());
  854. }
  855. // Function to store a first-class aggregate into memory. We prefer to
  856. // store the elements rather than the aggregate to be more friendly to
  857. // fast-isel.
  858. // FIXME: Do we need to recurse here?
  859. static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
  860. llvm::Value *DestPtr, bool DestIsVolatile,
  861. CharUnits DestAlign,
  862. QualType QTy // HLSL Change
  863. ) {
  864. // Prefer scalar stores to first-class aggregate stores.
  865. if (llvm::StructType *STy =
  866. dyn_cast<llvm::StructType>(Val->getType())) {
  867. // HLSL Change Begins
  868. if (CGF.getLangOpts().HLSL) {
  869. CGF.CGM.getHLSLRuntime().EmitHLSLAggregateStore(CGF, Val, DestPtr, QTy);
  870. return;
  871. }
  872. // HLSL Change Ends
  873. const llvm::StructLayout *Layout =
  874. CGF.CGM.getDataLayout().getStructLayout(STy);
  875. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  876. llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i);
  877. llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
  878. uint64_t EltOffset = Layout->getElementOffset(i);
  879. CharUnits EltAlign =
  880. DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset));
  881. CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(),
  882. DestIsVolatile);
  883. }
  884. } else {
  885. CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(),
  886. DestIsVolatile);
  887. }
  888. }
  889. /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
  890. /// where the source and destination may have different types. The
  891. /// destination is known to be aligned to \arg DstAlign bytes.
  892. ///
  893. /// This safely handles the case when the src type is larger than the
  894. /// destination type; the upper bits of the src will be lost.
  895. static void CreateCoercedStore(llvm::Value *Src,
  896. llvm::Value *DstPtr,
  897. bool DstIsVolatile,
  898. CharUnits DstAlign,
  899. CodeGenFunction &CGF,
  900. QualType QTy // HLSL Change
  901. ) {
  902. llvm::Type *SrcTy = Src->getType();
  903. llvm::Type *DstTy =
  904. cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  905. if (SrcTy == DstTy) {
  906. CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
  907. DstIsVolatile);
  908. return;
  909. }
  910. uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
  911. if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
  912. DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
  913. DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
  914. }
  915. // If the source and destination are integer or pointer types, just do an
  916. // extension or truncation to the desired type.
  917. if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
  918. (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
  919. Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
  920. CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
  921. DstIsVolatile);
  922. return;
  923. }
  924. uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
  925. // If store is legal, just bitcast the src pointer.
  926. if (SrcSize <= DstSize) {
  927. llvm::Value *Casted =
  928. CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
  929. BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign, QTy); // HLSL Change - Add QTy
  930. } else {
  931. // Otherwise do coercion through memory. This is stupid, but
  932. // simple.
  933. // Generally SrcSize is never greater than DstSize, since this means we are
  934. // losing bits. However, this can happen in cases where the structure has
  935. // additional padding, for example due to a user specified alignment.
  936. //
  937. // FIXME: Assert that we aren't truncating non-padding bits when have access
  938. // to that information.
  939. llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy);
  940. Tmp->setAlignment(DstAlign.getQuantity());
  941. CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity());
  942. llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
  943. llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
  944. llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
  945. CGF.Builder.CreateMemCpy(DstCasted, Casted,
  946. llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
  947. DstAlign.getQuantity(), false);
  948. }
  949. }
  950. namespace {
  951. /// Encapsulates information about the way function arguments from
  952. /// CGFunctionInfo should be passed to actual LLVM IR function.
  953. class ClangToLLVMArgMapping {
  954. static const unsigned InvalidIndex = ~0U;
  955. unsigned InallocaArgNo;
  956. unsigned SRetArgNo;
  957. unsigned TotalIRArgs;
  958. /// Arguments of LLVM IR function corresponding to single Clang argument.
  959. struct IRArgs {
  960. unsigned PaddingArgIndex;
  961. // Argument is expanded to IR arguments at positions
  962. // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
  963. unsigned FirstArgIndex;
  964. unsigned NumberOfArgs;
  965. IRArgs()
  966. : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
  967. NumberOfArgs(0) {}
  968. };
  969. SmallVector<IRArgs, 8> ArgInfo;
  970. public:
  971. ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
  972. bool OnlyRequiredArgs = false)
  973. : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
  974. ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
  975. construct(Context, FI, OnlyRequiredArgs);
  976. }
  977. bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
  978. unsigned getInallocaArgNo() const {
  979. assert(hasInallocaArg());
  980. return InallocaArgNo;
  981. }
  982. bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
  983. unsigned getSRetArgNo() const {
  984. assert(hasSRetArg());
  985. return SRetArgNo;
  986. }
  987. unsigned totalIRArgs() const { return TotalIRArgs; }
  988. bool hasPaddingArg(unsigned ArgNo) const {
  989. assert(ArgNo < ArgInfo.size());
  990. return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
  991. }
  992. unsigned getPaddingArgNo(unsigned ArgNo) const {
  993. assert(hasPaddingArg(ArgNo));
  994. return ArgInfo[ArgNo].PaddingArgIndex;
  995. }
  996. /// Returns index of first IR argument corresponding to ArgNo, and their
  997. /// quantity.
  998. std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
  999. assert(ArgNo < ArgInfo.size());
  1000. return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
  1001. ArgInfo[ArgNo].NumberOfArgs);
  1002. }
  1003. private:
  1004. void construct(const ASTContext &Context, const CGFunctionInfo &FI,
  1005. bool OnlyRequiredArgs);
  1006. };
  1007. void ClangToLLVMArgMapping::construct(const ASTContext &Context,
  1008. const CGFunctionInfo &FI,
  1009. bool OnlyRequiredArgs) {
  1010. unsigned IRArgNo = 0;
  1011. bool SwapThisWithSRet = false;
  1012. const ABIArgInfo &RetAI = FI.getReturnInfo();
  1013. if (RetAI.getKind() == ABIArgInfo::Indirect) {
  1014. SwapThisWithSRet = RetAI.isSRetAfterThis();
  1015. SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
  1016. }
  1017. unsigned ArgNo = 0;
  1018. unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
  1019. for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
  1020. ++I, ++ArgNo) {
  1021. assert(I != FI.arg_end());
  1022. QualType ArgType = I->type;
  1023. const ABIArgInfo &AI = I->info;
  1024. // Collect data about IR arguments corresponding to Clang argument ArgNo.
  1025. auto &IRArgs = ArgInfo[ArgNo];
  1026. if (AI.getPaddingType())
  1027. IRArgs.PaddingArgIndex = IRArgNo++;
  1028. switch (AI.getKind()) {
  1029. case ABIArgInfo::Extend:
  1030. case ABIArgInfo::Direct: {
  1031. // FIXME: handle sseregparm someday...
  1032. llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
  1033. if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
  1034. IRArgs.NumberOfArgs = STy->getNumElements();
  1035. } else {
  1036. IRArgs.NumberOfArgs = 1;
  1037. }
  1038. break;
  1039. }
  1040. case ABIArgInfo::Indirect:
  1041. IRArgs.NumberOfArgs = 1;
  1042. break;
  1043. case ABIArgInfo::Ignore:
  1044. case ABIArgInfo::InAlloca:
  1045. // ignore and inalloca doesn't have matching LLVM parameters.
  1046. IRArgs.NumberOfArgs = 0;
  1047. break;
  1048. case ABIArgInfo::Expand: {
  1049. IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
  1050. break;
  1051. }
  1052. }
  1053. if (IRArgs.NumberOfArgs > 0) {
  1054. IRArgs.FirstArgIndex = IRArgNo;
  1055. IRArgNo += IRArgs.NumberOfArgs;
  1056. }
  1057. // Skip over the sret parameter when it comes second. We already handled it
  1058. // above.
  1059. if (IRArgNo == 1 && SwapThisWithSRet)
  1060. IRArgNo++;
  1061. }
  1062. assert(ArgNo == ArgInfo.size());
  1063. if (FI.usesInAlloca())
  1064. InallocaArgNo = IRArgNo++;
  1065. TotalIRArgs = IRArgNo;
  1066. }
  1067. } // namespace
  1068. /***/
  1069. bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
  1070. return FI.getReturnInfo().isIndirect();
  1071. }
  1072. bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
  1073. return ReturnTypeUsesSRet(FI) &&
  1074. getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
  1075. }
  1076. bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
  1077. if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
  1078. switch (BT->getKind()) {
  1079. default:
  1080. return false;
  1081. case BuiltinType::Float:
  1082. return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
  1083. case BuiltinType::Double:
  1084. return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
  1085. case BuiltinType::LongDouble:
  1086. return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
  1087. }
  1088. }
  1089. return false;
  1090. }
  1091. bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
  1092. if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
  1093. if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
  1094. if (BT->getKind() == BuiltinType::LongDouble)
  1095. return getTarget().useObjCFP2RetForComplexLongDouble();
  1096. }
  1097. }
  1098. return false;
  1099. }
  1100. llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
  1101. const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
  1102. return GetFunctionType(FI);
  1103. }
  1104. llvm::FunctionType *
  1105. CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
  1106. bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
  1107. (void)Inserted;
  1108. assert(Inserted && "Recursively being processed?");
  1109. llvm::Type *resultType = nullptr;
  1110. const ABIArgInfo &retAI = FI.getReturnInfo();
  1111. switch (retAI.getKind()) {
  1112. case ABIArgInfo::Expand:
  1113. llvm_unreachable("Invalid ABI kind for return argument");
  1114. case ABIArgInfo::Extend:
  1115. case ABIArgInfo::Direct:
  1116. resultType = retAI.getCoerceToType();
  1117. break;
  1118. case ABIArgInfo::InAlloca:
  1119. if (retAI.getInAllocaSRet()) {
  1120. // sret things on win32 aren't void, they return the sret pointer.
  1121. QualType ret = FI.getReturnType();
  1122. llvm::Type *ty = ConvertType(ret);
  1123. unsigned addressSpace = Context.getTargetAddressSpace(ret);
  1124. resultType = llvm::PointerType::get(ty, addressSpace);
  1125. } else {
  1126. resultType = llvm::Type::getVoidTy(getLLVMContext());
  1127. }
  1128. break;
  1129. case ABIArgInfo::Indirect: {
  1130. assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
  1131. resultType = llvm::Type::getVoidTy(getLLVMContext());
  1132. break;
  1133. }
  1134. case ABIArgInfo::Ignore:
  1135. resultType = llvm::Type::getVoidTy(getLLVMContext());
  1136. break;
  1137. }
  1138. ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
  1139. SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
  1140. // Add type for sret argument.
  1141. if (IRFunctionArgs.hasSRetArg()) {
  1142. QualType Ret = FI.getReturnType();
  1143. llvm::Type *Ty = ConvertType(Ret);
  1144. unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
  1145. ArgTypes[IRFunctionArgs.getSRetArgNo()] =
  1146. llvm::PointerType::get(Ty, AddressSpace);
  1147. }
  1148. // Add type for inalloca argument.
  1149. if (IRFunctionArgs.hasInallocaArg()) {
  1150. auto ArgStruct = FI.getArgStruct();
  1151. assert(ArgStruct);
  1152. ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
  1153. }
  1154. // Add in all of the required arguments.
  1155. unsigned ArgNo = 0;
  1156. CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
  1157. ie = it + FI.getNumRequiredArgs();
  1158. for (; it != ie; ++it, ++ArgNo) {
  1159. const ABIArgInfo &ArgInfo = it->info;
  1160. // Insert a padding type to ensure proper alignment.
  1161. if (IRFunctionArgs.hasPaddingArg(ArgNo))
  1162. ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
  1163. ArgInfo.getPaddingType();
  1164. unsigned FirstIRArg, NumIRArgs;
  1165. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  1166. switch (ArgInfo.getKind()) {
  1167. case ABIArgInfo::Ignore:
  1168. case ABIArgInfo::InAlloca:
  1169. assert(NumIRArgs == 0);
  1170. break;
  1171. case ABIArgInfo::Indirect: {
  1172. assert(NumIRArgs == 1);
  1173. // indirect arguments are always on the stack, which is addr space #0.
  1174. llvm::Type *LTy = ConvertTypeForMem(it->type);
  1175. ArgTypes[FirstIRArg] = LTy->getPointerTo();
  1176. break;
  1177. }
  1178. case ABIArgInfo::Extend:
  1179. case ABIArgInfo::Direct: {
  1180. // Fast-isel and the optimizer generally like scalar values better than
  1181. // FCAs, so we flatten them if this is safe to do for this argument.
  1182. llvm::Type *argType = ArgInfo.getCoerceToType();
  1183. llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
  1184. if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
  1185. assert(NumIRArgs == st->getNumElements());
  1186. for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
  1187. ArgTypes[FirstIRArg + i] = st->getElementType(i);
  1188. } else {
  1189. assert(NumIRArgs == 1);
  1190. ArgTypes[FirstIRArg] = argType;
  1191. }
  1192. break;
  1193. }
  1194. case ABIArgInfo::Expand:
  1195. auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
  1196. getExpandedTypes(it->type, ArgTypesIter);
  1197. assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
  1198. break;
  1199. }
  1200. }
  1201. bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
  1202. assert(Erased && "Not in set?");
  1203. return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
  1204. }
  1205. llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
  1206. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
  1207. const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
  1208. if (!isFuncTypeConvertible(FPT))
  1209. return llvm::StructType::get(getLLVMContext());
  1210. const CGFunctionInfo *Info;
  1211. if (isa<CXXDestructorDecl>(MD))
  1212. Info =
  1213. &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
  1214. else
  1215. Info = &arrangeCXXMethodDeclaration(MD);
  1216. return GetFunctionType(*Info);
  1217. }
  1218. void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
  1219. const Decl *TargetDecl,
  1220. AttributeListType &PAL,
  1221. unsigned &CallingConv,
  1222. bool AttrOnCallSite) {
  1223. llvm::AttrBuilder FuncAttrs;
  1224. llvm::AttrBuilder RetAttrs;
  1225. bool HasOptnone = false;
  1226. CallingConv = FI.getEffectiveCallingConvention();
  1227. if (FI.isNoReturn())
  1228. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  1229. // FIXME: handle sseregparm someday...
  1230. if (TargetDecl) {
  1231. if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
  1232. FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
  1233. if (TargetDecl->hasAttr<NoThrowAttr>())
  1234. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1235. if (TargetDecl->hasAttr<NoReturnAttr>())
  1236. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  1237. if (TargetDecl->hasAttr<NoDuplicateAttr>())
  1238. FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
  1239. if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
  1240. const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
  1241. if (FPT && FPT->isNothrow(getContext()))
  1242. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1243. // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
  1244. // These attributes are not inherited by overloads.
  1245. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
  1246. if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
  1247. FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
  1248. }
  1249. // 'const' and 'pure' attribute functions are also nounwind.
  1250. if (TargetDecl->hasAttr<ConstAttr>()) {
  1251. FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
  1252. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1253. } else if (TargetDecl->hasAttr<PureAttr>()) {
  1254. FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
  1255. FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
  1256. }
  1257. if (TargetDecl->hasAttr<RestrictAttr>())
  1258. RetAttrs.addAttribute(llvm::Attribute::NoAlias);
  1259. if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
  1260. RetAttrs.addAttribute(llvm::Attribute::NonNull);
  1261. HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
  1262. }
  1263. // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
  1264. if (!HasOptnone) {
  1265. if (CodeGenOpts.OptimizeSize)
  1266. FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
  1267. if (CodeGenOpts.OptimizeSize == 2)
  1268. FuncAttrs.addAttribute(llvm::Attribute::MinSize);
  1269. }
  1270. if (CodeGenOpts.DisableRedZone)
  1271. FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
  1272. if (CodeGenOpts.NoImplicitFloat)
  1273. FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
  1274. if (CodeGenOpts.EnableSegmentedStacks &&
  1275. !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
  1276. FuncAttrs.addAttribute("split-stack");
  1277. if (AttrOnCallSite) {
  1278. // Attributes that should go on the call site only.
  1279. if (!CodeGenOpts.SimplifyLibCalls)
  1280. FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
  1281. if (!CodeGenOpts.TrapFuncName.empty())
  1282. FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
  1283. } else {
  1284. // Attributes that should go on the function, but not the call site.
  1285. if (!CodeGenOpts.DisableFPElim) {
  1286. FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
  1287. } else if (CodeGenOpts.OmitLeafFramePointer) {
  1288. FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
  1289. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
  1290. } else {
  1291. FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
  1292. FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
  1293. }
  1294. FuncAttrs.addAttribute("disable-tail-calls",
  1295. llvm::toStringRef(CodeGenOpts.DisableTailCalls));
  1296. FuncAttrs.addAttribute("less-precise-fpmad",
  1297. llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
  1298. FuncAttrs.addAttribute("no-infs-fp-math",
  1299. llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
  1300. FuncAttrs.addAttribute("no-nans-fp-math",
  1301. llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
  1302. FuncAttrs.addAttribute("unsafe-fp-math",
  1303. llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
  1304. FuncAttrs.addAttribute("use-soft-float",
  1305. llvm::toStringRef(CodeGenOpts.SoftFloat));
  1306. FuncAttrs.addAttribute("stack-protector-buffer-size",
  1307. llvm::utostr(CodeGenOpts.SSPBufferSize));
  1308. if (!CodeGenOpts.StackRealignment)
  1309. FuncAttrs.addAttribute("no-realign-stack");
  1310. // Add target-cpu and target-features attributes to functions. If
  1311. // we have a decl for the function and it has a target attribute then
  1312. // parse that and add it to the feature set.
  1313. StringRef TargetCPU = getTarget().getTargetOpts().CPU;
  1314. // TODO: Features gets us the features on the command line including
  1315. // feature dependencies. For canonicalization purposes we might want to
  1316. // avoid putting features in the target-features set if we know it'll be
  1317. // one of the default features in the backend, e.g. corei7-avx and +avx or
  1318. // figure out non-explicit dependencies.
  1319. // Canonicalize the existing features in a new feature map.
  1320. // TODO: Migrate the existing backends to keep the map around rather than
  1321. // the vector.
  1322. llvm::StringMap<bool> FeatureMap;
  1323. for (auto F : getTarget().getTargetOpts().Features) {
  1324. const char *Name = F.c_str();
  1325. bool Enabled = Name[0] == '+';
  1326. getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled);
  1327. }
  1328. const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
  1329. if (FD) {
  1330. if (const auto *TD = FD->getAttr<TargetAttr>()) {
  1331. StringRef FeaturesStr = TD->getFeatures();
  1332. SmallVector<StringRef, 1> AttrFeatures;
  1333. FeaturesStr.split(AttrFeatures, ",");
  1334. // Grab the various features and prepend a "+" to turn on the feature to
  1335. // the backend and add them to our existing set of features.
  1336. for (auto &Feature : AttrFeatures) {
  1337. // Go ahead and trim whitespace rather than either erroring or
  1338. // accepting it weirdly.
  1339. Feature = Feature.trim();
  1340. // While we're here iterating check for a different target cpu.
  1341. if (Feature.startswith("arch="))
  1342. TargetCPU = Feature.split("=").second.trim();
  1343. else if (Feature.startswith("tune="))
  1344. // We don't support cpu tuning this way currently.
  1345. ;
  1346. else if (Feature.startswith("fpmath="))
  1347. // TODO: Support the fpmath option this way. It will require checking
  1348. // overall feature validity for the function with the rest of the
  1349. // attributes on the function.
  1350. ;
  1351. else if (Feature.startswith("mno-"))
  1352. getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second,
  1353. false);
  1354. else
  1355. getTarget().setFeatureEnabled(FeatureMap, Feature, true);
  1356. }
  1357. }
  1358. }
  1359. // Produce the canonical string for this set of features.
  1360. std::vector<std::string> Features;
  1361. for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
  1362. ie = FeatureMap.end();
  1363. it != ie; ++it)
  1364. Features.push_back((it->second ? "+" : "-") + it->first().str());
  1365. // Now add the target-cpu and target-features to the function.
  1366. if (TargetCPU != "")
  1367. FuncAttrs.addAttribute("target-cpu", TargetCPU);
  1368. if (!Features.empty()) {
  1369. std::sort(Features.begin(), Features.end());
  1370. FuncAttrs.addAttribute("target-features",
  1371. llvm::join(Features.begin(), Features.end(), ","));
  1372. }
  1373. }
  1374. ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
  1375. QualType RetTy = FI.getReturnType();
  1376. const ABIArgInfo &RetAI = FI.getReturnInfo();
  1377. switch (RetAI.getKind()) {
  1378. case ABIArgInfo::Extend:
  1379. if (RetTy->hasSignedIntegerRepresentation())
  1380. RetAttrs.addAttribute(llvm::Attribute::SExt);
  1381. else if (RetTy->hasUnsignedIntegerRepresentation())
  1382. RetAttrs.addAttribute(llvm::Attribute::ZExt);
  1383. // FALL THROUGH
  1384. case ABIArgInfo::Direct:
  1385. if (RetAI.getInReg())
  1386. RetAttrs.addAttribute(llvm::Attribute::InReg);
  1387. break;
  1388. case ABIArgInfo::Ignore:
  1389. break;
  1390. case ABIArgInfo::InAlloca:
  1391. case ABIArgInfo::Indirect: {
  1392. // inalloca and sret disable readnone and readonly
  1393. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1394. .removeAttribute(llvm::Attribute::ReadNone);
  1395. break;
  1396. }
  1397. case ABIArgInfo::Expand:
  1398. llvm_unreachable("Invalid ABI kind for return argument");
  1399. }
  1400. if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
  1401. QualType PTy = RefTy->getPointeeType();
  1402. if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
  1403. RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
  1404. .getQuantity());
  1405. else if (getContext().getTargetAddressSpace(PTy) == 0)
  1406. RetAttrs.addAttribute(llvm::Attribute::NonNull);
  1407. }
  1408. // Attach return attributes.
  1409. if (RetAttrs.hasAttributes()) {
  1410. PAL.push_back(llvm::AttributeSet::get(
  1411. getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
  1412. }
  1413. // Attach attributes to sret.
  1414. if (IRFunctionArgs.hasSRetArg()) {
  1415. llvm::AttrBuilder SRETAttrs;
  1416. SRETAttrs.addAttribute(llvm::Attribute::StructRet);
  1417. if (RetAI.getInReg())
  1418. SRETAttrs.addAttribute(llvm::Attribute::InReg);
  1419. PAL.push_back(llvm::AttributeSet::get(
  1420. getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
  1421. }
  1422. // Attach attributes to inalloca argument.
  1423. if (IRFunctionArgs.hasInallocaArg()) {
  1424. llvm::AttrBuilder Attrs;
  1425. Attrs.addAttribute(llvm::Attribute::InAlloca);
  1426. PAL.push_back(llvm::AttributeSet::get(
  1427. getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
  1428. }
  1429. unsigned ArgNo = 0;
  1430. for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
  1431. E = FI.arg_end();
  1432. I != E; ++I, ++ArgNo) {
  1433. QualType ParamType = I->type;
  1434. const ABIArgInfo &AI = I->info;
  1435. llvm::AttrBuilder Attrs;
  1436. // Add attribute for padding argument, if necessary.
  1437. if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
  1438. if (AI.getPaddingInReg())
  1439. PAL.push_back(llvm::AttributeSet::get(
  1440. getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
  1441. llvm::Attribute::InReg));
  1442. }
  1443. // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
  1444. // have the corresponding parameter variable. It doesn't make
  1445. // sense to do it here because parameters are so messed up.
  1446. switch (AI.getKind()) {
  1447. case ABIArgInfo::Extend:
  1448. if (ParamType->isSignedIntegerOrEnumerationType())
  1449. Attrs.addAttribute(llvm::Attribute::SExt);
  1450. else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
  1451. if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
  1452. Attrs.addAttribute(llvm::Attribute::SExt);
  1453. else
  1454. Attrs.addAttribute(llvm::Attribute::ZExt);
  1455. }
  1456. // FALL THROUGH
  1457. case ABIArgInfo::Direct:
  1458. if (ArgNo == 0 && FI.isChainCall())
  1459. Attrs.addAttribute(llvm::Attribute::Nest);
  1460. else if (AI.getInReg())
  1461. Attrs.addAttribute(llvm::Attribute::InReg);
  1462. break;
  1463. case ABIArgInfo::Indirect:
  1464. if (AI.getInReg())
  1465. Attrs.addAttribute(llvm::Attribute::InReg);
  1466. if (AI.getIndirectByVal())
  1467. Attrs.addAttribute(llvm::Attribute::ByVal);
  1468. Attrs.addAlignmentAttr(AI.getIndirectAlign());
  1469. // byval disables readnone and readonly.
  1470. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1471. .removeAttribute(llvm::Attribute::ReadNone);
  1472. break;
  1473. case ABIArgInfo::Ignore:
  1474. case ABIArgInfo::Expand:
  1475. continue;
  1476. case ABIArgInfo::InAlloca:
  1477. // inalloca disables readnone and readonly.
  1478. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
  1479. .removeAttribute(llvm::Attribute::ReadNone);
  1480. continue;
  1481. }
  1482. if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
  1483. QualType PTy = RefTy->getPointeeType();
  1484. if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
  1485. Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
  1486. .getQuantity());
  1487. else if (getContext().getTargetAddressSpace(PTy) == 0)
  1488. Attrs.addAttribute(llvm::Attribute::NonNull);
  1489. }
  1490. if (Attrs.hasAttributes()) {
  1491. unsigned FirstIRArg, NumIRArgs;
  1492. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  1493. for (unsigned i = 0; i < NumIRArgs; i++)
  1494. PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
  1495. FirstIRArg + i + 1, Attrs));
  1496. }
  1497. }
  1498. assert(ArgNo == FI.arg_size());
  1499. if (FuncAttrs.hasAttributes())
  1500. PAL.push_back(llvm::
  1501. AttributeSet::get(getLLVMContext(),
  1502. llvm::AttributeSet::FunctionIndex,
  1503. FuncAttrs));
  1504. }
  1505. /// An argument came in as a promoted argument; demote it back to its
  1506. /// declared type.
  1507. static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
  1508. const VarDecl *var,
  1509. llvm::Value *value) {
  1510. llvm::Type *varType = CGF.ConvertType(var->getType());
  1511. // This can happen with promotions that actually don't change the
  1512. // underlying type, like the enum promotions.
  1513. if (value->getType() == varType) return value;
  1514. assert((varType->isIntegerTy() || varType->isFloatingPointTy())
  1515. && "unexpected promotion type");
  1516. if (isa<llvm::IntegerType>(varType))
  1517. return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
  1518. return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
  1519. }
  1520. /// Returns the attribute (either parameter attribute, or function
  1521. /// attribute), which declares argument ArgNo to be non-null.
  1522. static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
  1523. QualType ArgType, unsigned ArgNo) {
  1524. // FIXME: __attribute__((nonnull)) can also be applied to:
  1525. // - references to pointers, where the pointee is known to be
  1526. // nonnull (apparently a Clang extension)
  1527. // - transparent unions containing pointers
  1528. // In the former case, LLVM IR cannot represent the constraint. In
  1529. // the latter case, we have no guarantee that the transparent union
  1530. // is in fact passed as a pointer.
  1531. if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
  1532. return nullptr;
  1533. // First, check attribute on parameter itself.
  1534. if (PVD) {
  1535. if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
  1536. return ParmNNAttr;
  1537. }
  1538. // Check function attributes.
  1539. if (!FD)
  1540. return nullptr;
  1541. for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
  1542. if (NNAttr->isNonNull(ArgNo))
  1543. return NNAttr;
  1544. }
  1545. return nullptr;
  1546. }
  1547. void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
  1548. llvm::Function *Fn,
  1549. const FunctionArgList &Args) {
  1550. if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
  1551. // Naked functions don't have prologues.
  1552. return;
  1553. // If this is an implicit-return-zero function, go ahead and
  1554. // initialize the return value. TODO: it might be nice to have
  1555. // a more general mechanism for this that didn't require synthesized
  1556. // return statements.
  1557. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
  1558. if (FD->hasImplicitReturnZero()) {
  1559. QualType RetTy = FD->getReturnType().getUnqualifiedType();
  1560. llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
  1561. llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
  1562. Builder.CreateStore(Zero, ReturnValue);
  1563. }
  1564. }
  1565. // FIXME: We no longer need the types from FunctionArgList; lift up and
  1566. // simplify.
  1567. ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
  1568. // Flattened function arguments.
  1569. SmallVector<llvm::Argument *, 16> FnArgs;
  1570. FnArgs.reserve(IRFunctionArgs.totalIRArgs());
  1571. for (auto &Arg : Fn->args()) {
  1572. FnArgs.push_back(&Arg);
  1573. }
  1574. assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
  1575. // If we're using inalloca, all the memory arguments are GEPs off of the last
  1576. // parameter, which is a pointer to the complete memory area.
  1577. llvm::Value *ArgStruct = nullptr;
  1578. if (IRFunctionArgs.hasInallocaArg()) {
  1579. ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
  1580. assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
  1581. }
  1582. // Name the struct return parameter.
  1583. if (IRFunctionArgs.hasSRetArg()) {
  1584. auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
  1585. AI->setName("agg.result");
  1586. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
  1587. llvm::Attribute::NoAlias));
  1588. }
  1589. // Track if we received the parameter as a pointer (indirect, byval, or
  1590. // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
  1591. // into a local alloca for us.
  1592. enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
  1593. typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
  1594. SmallVector<ValueAndIsPtr, 16> ArgVals;
  1595. ArgVals.reserve(Args.size());
  1596. // Create a pointer value for every parameter declaration. This usually
  1597. // entails copying one or more LLVM IR arguments into an alloca. Don't push
  1598. // any cleanups or do anything that might unwind. We do that separately, so
  1599. // we can push the cleanups in the correct order for the ABI.
  1600. assert(FI.arg_size() == Args.size() &&
  1601. "Mismatch between function signature & arguments.");
  1602. unsigned ArgNo = 0;
  1603. CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
  1604. for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
  1605. i != e; ++i, ++info_it, ++ArgNo) {
  1606. const VarDecl *Arg = *i;
  1607. QualType Ty = info_it->type;
  1608. const ABIArgInfo &ArgI = info_it->info;
  1609. bool isPromoted = !getLangOpts().HLSL && // HLSL Change - no knr promotion in HLSL
  1610. isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
  1611. unsigned FirstIRArg, NumIRArgs;
  1612. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  1613. switch (ArgI.getKind()) {
  1614. case ABIArgInfo::InAlloca: {
  1615. assert(NumIRArgs == 0);
  1616. llvm::Value *V =
  1617. Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct,
  1618. ArgI.getInAllocaFieldIndex(), Arg->getName());
  1619. ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
  1620. break;
  1621. }
  1622. case ABIArgInfo::Indirect: {
  1623. assert(NumIRArgs == 1);
  1624. llvm::Value *V = FnArgs[FirstIRArg];
  1625. if (!hasScalarEvaluationKind(Ty)) {
  1626. // Aggregates and complex variables are accessed by reference. All we
  1627. // need to do is realign the value, if requested
  1628. if (ArgI.getIndirectRealign()) {
  1629. llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
  1630. // Copy from the incoming argument pointer to the temporary with the
  1631. // appropriate alignment.
  1632. //
  1633. // FIXME: We should have a common utility for generating an aggregate
  1634. // copy.
  1635. llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
  1636. CharUnits Size = getContext().getTypeSizeInChars(Ty);
  1637. llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
  1638. llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
  1639. Builder.CreateMemCpy(Dst,
  1640. Src,
  1641. llvm::ConstantInt::get(IntPtrTy,
  1642. Size.getQuantity()),
  1643. ArgI.getIndirectAlign(),
  1644. false);
  1645. V = AlignedTemp;
  1646. }
  1647. ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
  1648. } else {
  1649. // Load scalar value from indirect argument.
  1650. V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty,
  1651. Arg->getLocStart());
  1652. if (isPromoted)
  1653. V = emitArgumentDemotion(*this, Arg, V);
  1654. ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
  1655. }
  1656. break;
  1657. }
  1658. case ABIArgInfo::Extend:
  1659. case ABIArgInfo::Direct: {
  1660. // HLSL Change Begins
  1661. if (hlsl::IsHLSLMatType(Ty)) {
  1662. assert(NumIRArgs == 1);
  1663. auto AI = FnArgs[FirstIRArg];
  1664. llvm::Value *V = AI;
  1665. ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
  1666. break;
  1667. }
  1668. // HLSL Change Ends
  1669. // If we have the trivial case, handle it with no muss and fuss.
  1670. if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
  1671. ArgI.getCoerceToType() == ConvertType(Ty) &&
  1672. ArgI.getDirectOffset() == 0) {
  1673. assert(NumIRArgs == 1);
  1674. auto AI = FnArgs[FirstIRArg];
  1675. llvm::Value *V = AI;
  1676. if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
  1677. if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
  1678. PVD->getFunctionScopeIndex()))
  1679. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1680. AI->getArgNo() + 1,
  1681. llvm::Attribute::NonNull));
  1682. QualType OTy = PVD->getOriginalType();
  1683. if (const auto *ArrTy =
  1684. getContext().getAsConstantArrayType(OTy)) {
  1685. // A C99 array parameter declaration with the static keyword also
  1686. // indicates dereferenceability, and if the size is constant we can
  1687. // use the dereferenceable attribute (which requires the size in
  1688. // bytes).
  1689. if (ArrTy->getSizeModifier() == ArrayType::Static) {
  1690. QualType ETy = ArrTy->getElementType();
  1691. uint64_t ArrSize = ArrTy->getSize().getZExtValue();
  1692. if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
  1693. ArrSize) {
  1694. llvm::AttrBuilder Attrs;
  1695. Attrs.addDereferenceableAttr(
  1696. getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
  1697. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1698. AI->getArgNo() + 1, Attrs));
  1699. } else if (getContext().getTargetAddressSpace(ETy) == 0) {
  1700. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1701. AI->getArgNo() + 1,
  1702. llvm::Attribute::NonNull));
  1703. }
  1704. }
  1705. } else if (const auto *ArrTy =
  1706. getContext().getAsVariableArrayType(OTy)) {
  1707. // For C99 VLAs with the static keyword, we don't know the size so
  1708. // we can't use the dereferenceable attribute, but in addrspace(0)
  1709. // we know that it must be nonnull.
  1710. if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
  1711. !getContext().getTargetAddressSpace(ArrTy->getElementType()))
  1712. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1713. AI->getArgNo() + 1,
  1714. llvm::Attribute::NonNull));
  1715. }
  1716. const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
  1717. if (!AVAttr)
  1718. if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
  1719. AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
  1720. if (AVAttr) {
  1721. llvm::Value *AlignmentValue =
  1722. EmitScalarExpr(AVAttr->getAlignment());
  1723. llvm::ConstantInt *AlignmentCI =
  1724. cast<llvm::ConstantInt>(AlignmentValue);
  1725. unsigned Alignment =
  1726. std::min((unsigned) AlignmentCI->getZExtValue(),
  1727. +llvm::Value::MaximumAlignment);
  1728. llvm::AttrBuilder Attrs;
  1729. Attrs.addAlignmentAttr(Alignment);
  1730. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1731. AI->getArgNo() + 1, Attrs));
  1732. }
  1733. }
  1734. if (Arg->getType().isRestrictQualified())
  1735. AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
  1736. AI->getArgNo() + 1,
  1737. llvm::Attribute::NoAlias));
  1738. // Ensure the argument is the correct type.
  1739. if (V->getType() != ArgI.getCoerceToType())
  1740. V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
  1741. if (isPromoted)
  1742. V = emitArgumentDemotion(*this, Arg, V);
  1743. if (const CXXMethodDecl *MD =
  1744. dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
  1745. if (MD->isVirtual() && Arg == CXXABIThisDecl)
  1746. V = CGM.getCXXABI().
  1747. adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
  1748. }
  1749. // Because of merging of function types from multiple decls it is
  1750. // possible for the type of an argument to not match the corresponding
  1751. // type in the function type. Since we are codegening the callee
  1752. // in here, add a cast to the argument type.
  1753. llvm::Type *LTy = ConvertType(Arg->getType());
  1754. if (V->getType() != LTy)
  1755. V = Builder.CreateBitCast(V, LTy);
  1756. ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
  1757. break;
  1758. }
  1759. llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
  1760. // The alignment we need to use is the max of the requested alignment for
  1761. // the argument plus the alignment required by our access code below.
  1762. unsigned AlignmentToUse =
  1763. CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
  1764. AlignmentToUse = std::max(AlignmentToUse,
  1765. (unsigned)getContext().getDeclAlign(Arg).getQuantity());
  1766. Alloca->setAlignment(AlignmentToUse);
  1767. llvm::Value *V = Alloca;
  1768. llvm::Value *Ptr = V; // Pointer to store into.
  1769. CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse);
  1770. // If the value is offset in memory, apply the offset now.
  1771. if (unsigned Offs = ArgI.getDirectOffset()) {
  1772. Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
  1773. Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs);
  1774. Ptr = Builder.CreateBitCast(Ptr,
  1775. llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
  1776. PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
  1777. }
  1778. // Fast-isel and the optimizer generally like scalar values better than
  1779. // FCAs, so we flatten them if this is safe to do for this argument.
  1780. llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
  1781. if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
  1782. STy->getNumElements() > 1) {
  1783. uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
  1784. llvm::Type *DstTy =
  1785. cast<llvm::PointerType>(Ptr->getType())->getElementType();
  1786. uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
  1787. if (SrcSize <= DstSize) {
  1788. Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
  1789. assert(STy->getNumElements() == NumIRArgs);
  1790. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1791. auto AI = FnArgs[FirstIRArg + i];
  1792. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  1793. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i);
  1794. Builder.CreateStore(AI, EltPtr);
  1795. }
  1796. } else {
  1797. llvm::AllocaInst *TempAlloca =
  1798. CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
  1799. TempAlloca->setAlignment(AlignmentToUse);
  1800. llvm::Value *TempV = TempAlloca;
  1801. assert(STy->getNumElements() == NumIRArgs);
  1802. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  1803. auto AI = FnArgs[FirstIRArg + i];
  1804. AI->setName(Arg->getName() + ".coerce" + Twine(i));
  1805. llvm::Value *EltPtr =
  1806. Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i);
  1807. Builder.CreateStore(AI, EltPtr);
  1808. }
  1809. Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
  1810. }
  1811. } else {
  1812. // Simple case, just do a coerced store of the argument into the alloca.
  1813. assert(NumIRArgs == 1);
  1814. auto AI = FnArgs[FirstIRArg];
  1815. AI->setName(Arg->getName() + ".coerce");
  1816. CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this, Ty); // HLSL Change - Add Ty.
  1817. }
  1818. // Match to what EmitParmDecl is expecting for this type.
  1819. if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
  1820. V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
  1821. if (isPromoted)
  1822. V = emitArgumentDemotion(*this, Arg, V);
  1823. ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
  1824. } else {
  1825. ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
  1826. }
  1827. break;
  1828. }
  1829. case ABIArgInfo::Expand: {
  1830. // If this structure was expanded into multiple arguments then
  1831. // we need to create a temporary and reconstruct it from the
  1832. // arguments.
  1833. llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
  1834. CharUnits Align = getContext().getDeclAlign(Arg);
  1835. Alloca->setAlignment(Align.getQuantity());
  1836. LValue LV = MakeAddrLValue(Alloca, Ty, Align);
  1837. ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
  1838. auto FnArgIter = FnArgs.begin() + FirstIRArg;
  1839. ExpandTypeFromArgs(Ty, LV, FnArgIter);
  1840. assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
  1841. for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
  1842. auto AI = FnArgs[FirstIRArg + i];
  1843. AI->setName(Arg->getName() + "." + Twine(i));
  1844. }
  1845. break;
  1846. }
  1847. case ABIArgInfo::Ignore:
  1848. assert(NumIRArgs == 0);
  1849. // Initialize the local variable appropriately.
  1850. if (!hasScalarEvaluationKind(Ty)) {
  1851. ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
  1852. } else {
  1853. llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
  1854. ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
  1855. }
  1856. break;
  1857. }
  1858. }
  1859. if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
  1860. for (int I = Args.size() - 1; I >= 0; --I)
  1861. EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
  1862. I + 1);
  1863. } else {
  1864. for (unsigned I = 0, E = Args.size(); I != E; ++I)
  1865. EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
  1866. I + 1);
  1867. }
  1868. // HLSL Change Begins.
  1869. if (getLangOpts().HLSL) {
  1870. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
  1871. CGM.getHLSLRuntime().EmitHLSLFunctionProlog(Fn, FD);
  1872. }
  1873. }
  1874. // HLSL Change Ends.
  1875. }
  1876. #if 0 // HLSL Change Start - no ObjC support
  1877. static void eraseUnusedBitCasts(llvm::Instruction *insn) {
  1878. while (insn->use_empty()) {
  1879. llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
  1880. if (!bitcast) return;
  1881. // This is "safe" because we would have used a ConstantExpr otherwise.
  1882. insn = cast<llvm::Instruction>(bitcast->getOperand(0));
  1883. bitcast->eraseFromParent();
  1884. }
  1885. }
  1886. /// Try to emit a fused autorelease of a return result.
  1887. static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
  1888. llvm::Value *result) {
  1889. // We must be immediately followed the cast.
  1890. llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
  1891. if (BB->empty()) return nullptr;
  1892. if (&BB->back() != result) return nullptr;
  1893. llvm::Type *resultType = result->getType();
  1894. // result is in a BasicBlock and is therefore an Instruction.
  1895. llvm::Instruction *generator = cast<llvm::Instruction>(result);
  1896. SmallVector<llvm::Instruction*,4> insnsToKill;
  1897. // Look for:
  1898. // %generator = bitcast %type1* %generator2 to %type2*
  1899. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
  1900. // We would have emitted this as a constant if the operand weren't
  1901. // an Instruction.
  1902. generator = cast<llvm::Instruction>(bitcast->getOperand(0));
  1903. // Require the generator to be immediately followed by the cast.
  1904. if (generator->getNextNode() != bitcast)
  1905. return nullptr;
  1906. insnsToKill.push_back(bitcast);
  1907. }
  1908. // Look for:
  1909. // %generator = call i8* @objc_retain(i8* %originalResult)
  1910. // or
  1911. // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
  1912. llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
  1913. if (!call) return nullptr;
  1914. bool doRetainAutorelease;
  1915. if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
  1916. doRetainAutorelease = true;
  1917. } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
  1918. .objc_retainAutoreleasedReturnValue) {
  1919. doRetainAutorelease = false;
  1920. // If we emitted an assembly marker for this call (and the
  1921. // ARCEntrypoints field should have been set if so), go looking
  1922. // for that call. If we can't find it, we can't do this
  1923. // optimization. But it should always be the immediately previous
  1924. // instruction, unless we needed bitcasts around the call.
  1925. if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
  1926. llvm::Instruction *prev = call->getPrevNode();
  1927. assert(prev);
  1928. if (isa<llvm::BitCastInst>(prev)) {
  1929. prev = prev->getPrevNode();
  1930. assert(prev);
  1931. }
  1932. assert(isa<llvm::CallInst>(prev));
  1933. assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
  1934. CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
  1935. insnsToKill.push_back(prev);
  1936. }
  1937. } else {
  1938. return nullptr;
  1939. }
  1940. result = call->getArgOperand(0);
  1941. insnsToKill.push_back(call);
  1942. // Keep killing bitcasts, for sanity. Note that we no longer care
  1943. // about precise ordering as long as there's exactly one use.
  1944. while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
  1945. if (!bitcast->hasOneUse()) break;
  1946. insnsToKill.push_back(bitcast);
  1947. result = bitcast->getOperand(0);
  1948. }
  1949. // Delete all the unnecessary instructions, from latest to earliest.
  1950. for (SmallVectorImpl<llvm::Instruction*>::iterator
  1951. i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
  1952. (*i)->eraseFromParent();
  1953. // Do the fused retain/autorelease if we were asked to.
  1954. if (doRetainAutorelease)
  1955. result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
  1956. // Cast back to the result type.
  1957. return CGF.Builder.CreateBitCast(result, resultType);
  1958. }
  1959. /// If this is a +1 of the value of an immutable 'self', remove it.
  1960. static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
  1961. llvm::Value *result) {
  1962. // This is only applicable to a method with an immutable 'self'.
  1963. const ObjCMethodDecl *method =
  1964. dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
  1965. if (!method) return nullptr;
  1966. const VarDecl *self = method->getSelfDecl();
  1967. if (!self->getType().isConstQualified()) return nullptr;
  1968. // Look for a retain call.
  1969. llvm::CallInst *retainCall =
  1970. dyn_cast<llvm::CallInst>(result->stripPointerCasts());
  1971. if (!retainCall ||
  1972. retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
  1973. return nullptr;
  1974. // Look for an ordinary load of 'self'.
  1975. llvm::Value *retainedValue = retainCall->getArgOperand(0);
  1976. llvm::LoadInst *load =
  1977. dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
  1978. if (!load || load->isAtomic() || load->isVolatile() ||
  1979. load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
  1980. return nullptr;
  1981. // Okay! Burn it all down. This relies for correctness on the
  1982. // assumption that the retain is emitted as part of the return and
  1983. // that thereafter everything is used "linearly".
  1984. llvm::Type *resultType = result->getType();
  1985. eraseUnusedBitCasts(cast<llvm::Instruction>(result));
  1986. assert(retainCall->use_empty());
  1987. retainCall->eraseFromParent();
  1988. eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
  1989. return CGF.Builder.CreateBitCast(load, resultType);
  1990. }
  1991. /// Emit an ARC autorelease of the result of a function.
  1992. ///
  1993. /// \return the value to actually return from the function
  1994. static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
  1995. llvm::Value *result) {
  1996. // If we're returning 'self', kill the initial retain. This is a
  1997. // heuristic attempt to "encourage correctness" in the really unfortunate
  1998. // case where we have a return of self during a dealloc and we desperately
  1999. // need to avoid the possible autorelease.
  2000. if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
  2001. return self;
  2002. // At -O0, try to emit a fused retain/autorelease.
  2003. if (CGF.shouldUseFusedARCCalls())
  2004. if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
  2005. return fused;
  2006. return CGF.EmitARCAutoreleaseReturnValue(result);
  2007. }
  2008. #endif // HLSL Change Ends - no ObjC support
  2009. /// Heuristically search for a dominating store to the return-value slot.
  2010. static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
  2011. // If there are multiple uses of the return-value slot, just check
  2012. // for something immediately preceding the IP. Sometimes this can
  2013. // happen with how we generate implicit-returns; it can also happen
  2014. // with noreturn cleanups.
  2015. if (!CGF.ReturnValue->hasOneUse()) {
  2016. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  2017. if (IP->empty()) return nullptr;
  2018. llvm::Instruction *I = &IP->back();
  2019. // Skip lifetime markers
  2020. for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
  2021. IE = IP->rend();
  2022. II != IE; ++II) {
  2023. if (llvm::IntrinsicInst *Intrinsic =
  2024. dyn_cast<llvm::IntrinsicInst>(&*II)) {
  2025. if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
  2026. const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
  2027. ++II;
  2028. if (II == IE)
  2029. break;
  2030. if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
  2031. continue;
  2032. }
  2033. }
  2034. I = &*II;
  2035. break;
  2036. }
  2037. llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I);
  2038. if (!store) return nullptr;
  2039. if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
  2040. assert(!store->isAtomic() && !store->isVolatile()); // see below
  2041. return store;
  2042. }
  2043. llvm::StoreInst *store =
  2044. dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
  2045. if (!store) return nullptr;
  2046. // These aren't actually possible for non-coerced returns, and we
  2047. // only care about non-coerced returns on this code path.
  2048. assert(!store->isAtomic() && !store->isVolatile());
  2049. // Now do a first-and-dirty dominance check: just walk up the
  2050. // single-predecessors chain from the current insertion point.
  2051. llvm::BasicBlock *StoreBB = store->getParent();
  2052. llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
  2053. while (IP != StoreBB) {
  2054. if (!(IP = IP->getSinglePredecessor()))
  2055. return nullptr;
  2056. }
  2057. // Okay, the store's basic block dominates the insertion point; we
  2058. // can do our thing.
  2059. return store;
  2060. }
  2061. void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
  2062. bool EmitRetDbgLoc,
  2063. SourceLocation EndLoc) {
  2064. if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
  2065. // Naked functions don't have epilogues.
  2066. Builder.CreateUnreachable();
  2067. return;
  2068. }
  2069. // Functions with no result always return void.
  2070. if (!ReturnValue) {
  2071. Builder.CreateRetVoid();
  2072. return;
  2073. }
  2074. llvm::DebugLoc RetDbgLoc;
  2075. llvm::Value *RV = nullptr;
  2076. QualType RetTy = FI.getReturnType();
  2077. const ABIArgInfo &RetAI = FI.getReturnInfo();
  2078. switch (RetAI.getKind()) {
  2079. case ABIArgInfo::InAlloca:
  2080. // Aggregrates get evaluated directly into the destination. Sometimes we
  2081. // need to return the sret value in a register, though.
  2082. assert(hasAggregateEvaluationKind(RetTy));
  2083. if (RetAI.getInAllocaSRet()) {
  2084. llvm::Function::arg_iterator EI = CurFn->arg_end();
  2085. --EI;
  2086. llvm::Value *ArgStruct = EI;
  2087. llvm::Value *SRet = Builder.CreateStructGEP(
  2088. nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
  2089. RV = Builder.CreateLoad(SRet, "sret");
  2090. }
  2091. break;
  2092. case ABIArgInfo::Indirect: {
  2093. auto AI = CurFn->arg_begin();
  2094. if (RetAI.isSRetAfterThis())
  2095. ++AI;
  2096. switch (getEvaluationKind(RetTy)) {
  2097. case TEK_Complex: {
  2098. ComplexPairTy RT =
  2099. EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
  2100. EndLoc);
  2101. EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
  2102. /*isInit*/ true);
  2103. break;
  2104. }
  2105. case TEK_Aggregate:
  2106. // Do nothing; aggregrates get evaluated directly into the destination.
  2107. break;
  2108. case TEK_Scalar:
  2109. EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
  2110. MakeNaturalAlignAddrLValue(AI, RetTy),
  2111. /*isInit*/ true);
  2112. break;
  2113. }
  2114. break;
  2115. }
  2116. case ABIArgInfo::Extend:
  2117. case ABIArgInfo::Direct:
  2118. if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
  2119. RetAI.getDirectOffset() == 0) {
  2120. // HLSL Change Begin.
  2121. // If optimization is disabled, just load return value.
  2122. if (CGM.getCodeGenOpts().DisableLLVMOpts) {
  2123. // HLSL Change Begins
  2124. if (hlsl::IsHLSLMatType(RetTy))
  2125. RV = CGM.getHLSLRuntime().EmitHLSLMatrixLoad(*this, ReturnValue,
  2126. FnRetTy); // FnRetTy retains attributed type
  2127. else
  2128. // HLSL Change Ends
  2129. RV = Builder.CreateLoad(ReturnValue);
  2130. } else {
  2131. // HLSL Change End.
  2132. // The internal return value temp always will have pointer-to-return-type
  2133. // type, just do a load.
  2134. // If there is a dominating store to ReturnValue, we can elide
  2135. // the load, zap the store, and usually zap the alloca.
  2136. if (llvm::StoreInst *SI =
  2137. findDominatingStoreToReturnValue(*this)) {
  2138. // Reuse the debug location from the store unless there is
  2139. // cleanup code to be emitted between the store and return
  2140. // instruction.
  2141. if (EmitRetDbgLoc && !AutoreleaseResult)
  2142. RetDbgLoc = SI->getDebugLoc();
  2143. // Get the stored value and nuke the now-dead store.
  2144. RV = SI->getValueOperand();
  2145. SI->eraseFromParent();
  2146. // If that was the only use of the return value, nuke it as well now.
  2147. if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
  2148. cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
  2149. ReturnValue = nullptr;
  2150. }
  2151. // Otherwise, we have to do a simple load.
  2152. } else {
  2153. // HLSL Change Begins
  2154. if (hlsl::IsHLSLMatType(RetTy))
  2155. RV = CGM.getHLSLRuntime().EmitHLSLMatrixLoad(*this, ReturnValue,
  2156. FnRetTy); // FnRetTy retains attributed type
  2157. else
  2158. // HLSL Change Ends
  2159. RV = Builder.CreateLoad(ReturnValue);
  2160. }
  2161. } // HLSL Change
  2162. } else {
  2163. llvm::Value *V = ReturnValue;
  2164. CharUnits Align = getContext().getTypeAlignInChars(RetTy);
  2165. // If the value is offset in memory, apply the offset now.
  2166. if (unsigned Offs = RetAI.getDirectOffset()) {
  2167. V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
  2168. V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs);
  2169. V = Builder.CreateBitCast(V,
  2170. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  2171. Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs));
  2172. }
  2173. RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this);
  2174. }
  2175. // In ARC, end functions that return a retainable type with a call
  2176. // to objc_autoreleaseReturnValue.
  2177. #if 0 // HLSL Change - no ObjC support
  2178. if (AutoreleaseResult) {
  2179. assert(getLangOpts().ObjCAutoRefCount &&
  2180. !FI.isReturnsRetained() &&
  2181. RetTy->isObjCRetainableType());
  2182. RV = emitAutoreleaseOfResult(*this, RV);
  2183. }
  2184. #else
  2185. assert(!AutoreleaseResult && "autorelease not supported in HLSL");
  2186. #endif // HLSL Change - no ObjC support
  2187. break;
  2188. case ABIArgInfo::Ignore:
  2189. break;
  2190. case ABIArgInfo::Expand:
  2191. llvm_unreachable("Invalid ABI kind for return argument");
  2192. }
  2193. llvm::Instruction *Ret;
  2194. if (RV) {
  2195. if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
  2196. if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
  2197. SanitizerScope SanScope(this);
  2198. llvm::Value *Cond = Builder.CreateICmpNE(
  2199. RV, llvm::Constant::getNullValue(RV->getType()));
  2200. llvm::Constant *StaticData[] = {
  2201. EmitCheckSourceLocation(EndLoc),
  2202. EmitCheckSourceLocation(RetNNAttr->getLocation()),
  2203. };
  2204. EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
  2205. "nonnull_return", StaticData, None);
  2206. }
  2207. }
  2208. Ret = Builder.CreateRet(RV);
  2209. } else {
  2210. Ret = Builder.CreateRetVoid();
  2211. }
  2212. if (RetDbgLoc)
  2213. Ret->setDebugLoc(std::move(RetDbgLoc));
  2214. }
  2215. static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
  2216. const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
  2217. return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
  2218. }
  2219. static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
  2220. // FIXME: Generate IR in one pass, rather than going back and fixing up these
  2221. // placeholders.
  2222. llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
  2223. llvm::Value *Placeholder =
  2224. llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
  2225. Placeholder = CGF.Builder.CreateLoad(Placeholder);
  2226. return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
  2227. Ty.getQualifiers(),
  2228. AggValueSlot::IsNotDestructed,
  2229. AggValueSlot::DoesNotNeedGCBarriers,
  2230. AggValueSlot::IsNotAliased);
  2231. }
  2232. void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
  2233. const VarDecl *param,
  2234. SourceLocation loc) {
  2235. // StartFunction converted the ABI-lowered parameter(s) into a
  2236. // local alloca. We need to turn that into an r-value suitable
  2237. // for EmitCall.
  2238. llvm::Value *local = GetAddrOfLocalVar(param);
  2239. QualType type = param->getType();
  2240. // For the most part, we just need to load the alloca, except:
  2241. // 1) aggregate r-values are actually pointers to temporaries, and
  2242. // 2) references to non-scalars are pointers directly to the aggregate.
  2243. // I don't know why references to scalars are different here.
  2244. if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
  2245. if (!hasScalarEvaluationKind(ref->getPointeeType()))
  2246. return args.add(RValue::getAggregate(local), type);
  2247. // Locals which are references to scalars are represented
  2248. // with allocas holding the pointer.
  2249. return args.add(RValue::get(Builder.CreateLoad(local)), type);
  2250. }
  2251. assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
  2252. "cannot emit delegate call arguments for inalloca arguments!");
  2253. args.add(convertTempToRValue(local, type, loc), type);
  2254. }
  2255. #if 0 // HLSL Change - no ObjC support
  2256. static bool isProvablyNull(llvm::Value *addr) {
  2257. return isa<llvm::ConstantPointerNull>(addr);
  2258. }
  2259. static bool isProvablyNonNull(llvm::Value *addr) {
  2260. return isa<llvm::AllocaInst>(addr);
  2261. }
  2262. /// Emit the actual writing-back of a writeback.
  2263. static void emitWriteback(CodeGenFunction &CGF,
  2264. const CallArgList::Writeback &writeback) {
  2265. const LValue &srcLV = writeback.Source;
  2266. llvm::Value *srcAddr = srcLV.getAddress();
  2267. assert(!isProvablyNull(srcAddr) &&
  2268. "shouldn't have writeback for provably null argument");
  2269. llvm::BasicBlock *contBB = nullptr;
  2270. // If the argument wasn't provably non-null, we need to null check
  2271. // before doing the store.
  2272. bool provablyNonNull = isProvablyNonNull(srcAddr);
  2273. if (!provablyNonNull) {
  2274. llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
  2275. contBB = CGF.createBasicBlock("icr.done");
  2276. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  2277. CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
  2278. CGF.EmitBlock(writebackBB);
  2279. }
  2280. // Load the value to writeback.
  2281. llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
  2282. // Cast it back, in case we're writing an id to a Foo* or something.
  2283. value = CGF.Builder.CreateBitCast(value,
  2284. cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
  2285. "icr.writeback-cast");
  2286. // Perform the writeback.
  2287. // If we have a "to use" value, it's something we need to emit a use
  2288. // of. This has to be carefully threaded in: if it's done after the
  2289. // release it's potentially undefined behavior (and the optimizer
  2290. // will ignore it), and if it happens before the retain then the
  2291. // optimizer could move the release there.
  2292. if (writeback.ToUse) {
  2293. assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
  2294. // Retain the new value. No need to block-copy here: the block's
  2295. // being passed up the stack.
  2296. value = CGF.EmitARCRetainNonBlock(value);
  2297. // Emit the intrinsic use here.
  2298. CGF.EmitARCIntrinsicUse(writeback.ToUse);
  2299. // Load the old value (primitively).
  2300. llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
  2301. // Put the new value in place (primitively).
  2302. CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
  2303. // Release the old value.
  2304. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
  2305. // Otherwise, we can just do a normal lvalue store.
  2306. } else {
  2307. CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
  2308. }
  2309. // Jump to the continuation block.
  2310. if (!provablyNonNull)
  2311. CGF.EmitBlock(contBB);
  2312. }
  2313. static void emitWritebacks(CodeGenFunction &CGF,
  2314. const CallArgList &args) {
  2315. for (const auto &I : args.writebacks())
  2316. emitWriteback(CGF, I);
  2317. }
  2318. #endif // HLSL Change - no ObjC support
  2319. static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
  2320. const CallArgList &CallArgs) {
  2321. assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
  2322. ArrayRef<CallArgList::CallArgCleanup> Cleanups =
  2323. CallArgs.getCleanupsToDeactivate();
  2324. // Iterate in reverse to increase the likelihood of popping the cleanup.
  2325. for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
  2326. I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
  2327. CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
  2328. I->IsActiveIP->eraseFromParent();
  2329. }
  2330. }
  2331. #if 0 // HLSL Change - no ObjC support
  2332. static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
  2333. if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
  2334. if (uop->getOpcode() == UO_AddrOf)
  2335. return uop->getSubExpr();
  2336. return nullptr;
  2337. }
  2338. /// Emit an argument that's being passed call-by-writeback. That is,
  2339. /// we are passing the address of
  2340. static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
  2341. const ObjCIndirectCopyRestoreExpr *CRE) {
  2342. LValue srcLV;
  2343. // Make an optimistic effort to emit the address as an l-value.
  2344. // This can fail if the argument expression is more complicated.
  2345. if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
  2346. srcLV = CGF.EmitLValue(lvExpr);
  2347. // Otherwise, just emit it as a scalar.
  2348. } else {
  2349. llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
  2350. QualType srcAddrType =
  2351. CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
  2352. srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
  2353. }
  2354. llvm::Value *srcAddr = srcLV.getAddress();
  2355. // The dest and src types don't necessarily match in LLVM terms
  2356. // because of the crazy ObjC compatibility rules.
  2357. llvm::PointerType *destType =
  2358. cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
  2359. // If the address is a constant null, just pass the appropriate null.
  2360. if (isProvablyNull(srcAddr)) {
  2361. args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
  2362. CRE->getType());
  2363. return;
  2364. }
  2365. // Create the temporary.
  2366. llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
  2367. "icr.temp");
  2368. // Loading an l-value can introduce a cleanup if the l-value is __weak,
  2369. // and that cleanup will be conditional if we can't prove that the l-value
  2370. // isn't null, so we need to register a dominating point so that the cleanups
  2371. // system will make valid IR.
  2372. CodeGenFunction::ConditionalEvaluation condEval(CGF);
  2373. // Zero-initialize it if we're not doing a copy-initialization.
  2374. bool shouldCopy = CRE->shouldCopy();
  2375. if (!shouldCopy) {
  2376. llvm::Value *null =
  2377. llvm::ConstantPointerNull::get(
  2378. cast<llvm::PointerType>(destType->getElementType()));
  2379. CGF.Builder.CreateStore(null, temp);
  2380. }
  2381. llvm::BasicBlock *contBB = nullptr;
  2382. llvm::BasicBlock *originBB = nullptr;
  2383. // If the address is *not* known to be non-null, we need to switch.
  2384. llvm::Value *finalArgument;
  2385. bool provablyNonNull = isProvablyNonNull(srcAddr);
  2386. if (provablyNonNull) {
  2387. finalArgument = temp;
  2388. } else {
  2389. llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
  2390. finalArgument = CGF.Builder.CreateSelect(isNull,
  2391. llvm::ConstantPointerNull::get(destType),
  2392. temp, "icr.argument");
  2393. // If we need to copy, then the load has to be conditional, which
  2394. // means we need control flow.
  2395. if (shouldCopy) {
  2396. originBB = CGF.Builder.GetInsertBlock();
  2397. contBB = CGF.createBasicBlock("icr.cont");
  2398. llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
  2399. CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
  2400. CGF.EmitBlock(copyBB);
  2401. condEval.begin(CGF);
  2402. }
  2403. }
  2404. llvm::Value *valueToUse = nullptr;
  2405. // Perform a copy if necessary.
  2406. if (shouldCopy) {
  2407. RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
  2408. assert(srcRV.isScalar());
  2409. llvm::Value *src = srcRV.getScalarVal();
  2410. src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
  2411. "icr.cast");
  2412. // Use an ordinary store, not a store-to-lvalue.
  2413. CGF.Builder.CreateStore(src, temp);
  2414. // If optimization is enabled, and the value was held in a
  2415. // __strong variable, we need to tell the optimizer that this
  2416. // value has to stay alive until we're doing the store back.
  2417. // This is because the temporary is effectively unretained,
  2418. // and so otherwise we can violate the high-level semantics.
  2419. if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  2420. srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
  2421. valueToUse = src;
  2422. }
  2423. }
  2424. // Finish the control flow if we needed it.
  2425. if (shouldCopy && !provablyNonNull) {
  2426. llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
  2427. CGF.EmitBlock(contBB);
  2428. // Make a phi for the value to intrinsically use.
  2429. if (valueToUse) {
  2430. llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
  2431. "icr.to-use");
  2432. phiToUse->addIncoming(valueToUse, copyBB);
  2433. phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
  2434. originBB);
  2435. valueToUse = phiToUse;
  2436. }
  2437. condEval.end(CGF);
  2438. }
  2439. args.addWriteback(srcLV, temp, valueToUse);
  2440. args.add(RValue::get(finalArgument), CRE->getType());
  2441. }
  2442. #endif // HLSL Change - no ObjC support
  2443. void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
  2444. assert(!StackBase && !StackCleanup.isValid());
  2445. // Save the stack.
  2446. llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
  2447. StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
  2448. // Control gets really tied up in landing pads, so we have to spill the
  2449. // stacksave to an alloca to avoid violating SSA form.
  2450. // TODO: This is dead if we never emit the cleanup. We should create the
  2451. // alloca and store lazily on the first cleanup emission.
  2452. StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
  2453. CGF.Builder.CreateStore(StackBase, StackBaseMem);
  2454. CGF.pushStackRestore(EHCleanup, StackBaseMem);
  2455. StackCleanup = CGF.EHStack.getInnermostEHScope();
  2456. assert(StackCleanup.isValid());
  2457. }
  2458. void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
  2459. if (StackBase) {
  2460. CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
  2461. llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
  2462. // We could load StackBase from StackBaseMem, but in the non-exceptional
  2463. // case we can skip it.
  2464. CGF.Builder.CreateCall(F, StackBase);
  2465. }
  2466. }
  2467. void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
  2468. SourceLocation ArgLoc,
  2469. const FunctionDecl *FD,
  2470. unsigned ParmNum) {
  2471. if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
  2472. return;
  2473. auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
  2474. unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
  2475. auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
  2476. if (!NNAttr)
  2477. return;
  2478. SanitizerScope SanScope(this);
  2479. assert(RV.isScalar());
  2480. llvm::Value *V = RV.getScalarVal();
  2481. llvm::Value *Cond =
  2482. Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
  2483. llvm::Constant *StaticData[] = {
  2484. EmitCheckSourceLocation(ArgLoc),
  2485. EmitCheckSourceLocation(NNAttr->getLocation()),
  2486. llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
  2487. };
  2488. EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
  2489. "nonnull_arg", StaticData, None);
  2490. }
  2491. void CodeGenFunction::EmitCallArgs(CallArgList &Args,
  2492. ArrayRef<QualType> ArgTypes,
  2493. CallExpr::const_arg_iterator ArgBeg,
  2494. CallExpr::const_arg_iterator ArgEnd,
  2495. const FunctionDecl *CalleeDecl,
  2496. unsigned ParamsToSkip) {
  2497. // We *have* to evaluate arguments from right to left in the MS C++ ABI,
  2498. // because arguments are destroyed left to right in the callee.
  2499. if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
  2500. // Insert a stack save if we're going to need any inalloca args.
  2501. bool HasInAllocaArgs = false;
  2502. for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
  2503. I != E && !HasInAllocaArgs; ++I)
  2504. HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
  2505. if (HasInAllocaArgs) {
  2506. assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
  2507. Args.allocateArgumentMemory(*this);
  2508. }
  2509. // Evaluate each argument.
  2510. size_t CallArgsStart = Args.size();
  2511. for (int I = ArgTypes.size() - 1; I >= 0; --I) {
  2512. CallExpr::const_arg_iterator Arg = ArgBeg + I;
  2513. EmitCallArg(Args, *Arg, ArgTypes[I]);
  2514. EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
  2515. CalleeDecl, ParamsToSkip + I);
  2516. }
  2517. // Un-reverse the arguments we just evaluated so they match up with the LLVM
  2518. // IR function.
  2519. std::reverse(Args.begin() + CallArgsStart, Args.end());
  2520. return;
  2521. }
  2522. for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
  2523. CallExpr::const_arg_iterator Arg = ArgBeg + I;
  2524. assert(Arg != ArgEnd);
  2525. EmitCallArg(Args, *Arg, ArgTypes[I]);
  2526. EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
  2527. CalleeDecl, ParamsToSkip + I);
  2528. }
  2529. }
  2530. namespace {
  2531. struct DestroyUnpassedArg : EHScopeStack::Cleanup {
  2532. DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
  2533. : Addr(Addr), Ty(Ty) {}
  2534. llvm::Value *Addr;
  2535. QualType Ty;
  2536. void Emit(CodeGenFunction &CGF, Flags flags) override {
  2537. const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
  2538. assert(!Dtor->isTrivial());
  2539. CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
  2540. /*Delegating=*/false, Addr);
  2541. }
  2542. };
  2543. }
  2544. struct DisableDebugLocationUpdates {
  2545. CodeGenFunction &CGF;
  2546. bool disabledDebugInfo;
  2547. DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
  2548. if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
  2549. CGF.disableDebugInfo();
  2550. }
  2551. ~DisableDebugLocationUpdates() {
  2552. if (disabledDebugInfo)
  2553. CGF.enableDebugInfo();
  2554. }
  2555. };
  2556. void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
  2557. QualType type) {
  2558. DisableDebugLocationUpdates Dis(*this, E);
  2559. #if 0 // HLSL Change - no ObjC support
  2560. if (const ObjCIndirectCopyRestoreExpr *CRE
  2561. = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
  2562. assert(getLangOpts().ObjCAutoRefCount);
  2563. assert(getContext().hasSameType(E->getType(), type));
  2564. return emitWritebackArg(*this, args, CRE);
  2565. }
  2566. #endif // HLSL Change - no ObjC support
  2567. assert(type->isReferenceType() == E->isGLValue() &&
  2568. "reference binding to unmaterialized r-value!");
  2569. if (E->isGLValue()) {
  2570. // HLSL Change Begins.
  2571. if (E->getObjectKind() == OK_VectorComponent) {
  2572. if (const HLSLVectorElementExpr *VecElt = dyn_cast<HLSLVectorElementExpr>(E)) {
  2573. LValue LV = EmitHLSLVectorElementExpr(VecElt);
  2574. llvm::Value *Ptr = nullptr;
  2575. if (LV.isSimple()) {
  2576. // Handle the special case when the vector component access
  2577. // is done on a scalar using .x or .r.
  2578. //
  2579. // Example 1:
  2580. // groupshared uint g;
  2581. // InterlockedAdd(g.x, 1);
  2582. //
  2583. // Example 2:
  2584. // RWBuffer<uint> buf;
  2585. // InterlockedAdd(buf[0].r, 1);
  2586. llvm::Value *V = LV.getAddress();
  2587. Ptr = Builder.CreateGEP(V, Builder.getInt32(0));
  2588. } else {
  2589. llvm::Value *V = LV.getExtVectorAddr();
  2590. llvm::Constant *Elts = LV.getExtVectorElts();
  2591. // Only support scalar for atomic operations.
  2592. assert(Elts->getType()->getVectorNumElements() == 1);
  2593. llvm::Value *ch = Builder.CreateExtractElement(Elts, (uint64_t)0);
  2594. Ptr = Builder.CreateGEP(V, { Builder.getInt32(0), ch });
  2595. }
  2596. RValue RV = RValue::get(Ptr);
  2597. return args.add(RV, type);
  2598. } else {
  2599. LValue LV = EmitExtMatrixElementExpr(cast<ExtMatrixElementExpr>(E));
  2600. llvm::Value *Ptr = LV.getAddress();
  2601. // Only support scalar for atomic operations.
  2602. assert(Ptr->getType()->getPointerElementType() == Ptr->getType()->getPointerElementType()->getScalarType());
  2603. RValue RV = RValue::get(Ptr);
  2604. return args.add(RV, type);
  2605. }
  2606. }
  2607. // HLSL Change Ends.
  2608. assert(E->getObjectKind() == OK_Ordinary);
  2609. return args.add(EmitReferenceBindingToExpr(E), type);
  2610. }
  2611. bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
  2612. // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
  2613. // However, we still have to push an EH-only cleanup in case we unwind before
  2614. // we make it to the call.
  2615. if (HasAggregateEvalKind &&
  2616. !LangOptions().HLSL && // HLSL Change : Do not generate agg.tmp for HLSL
  2617. CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
  2618. // If we're using inalloca, use the argument memory. Otherwise, use a
  2619. // temporary.
  2620. AggValueSlot Slot;
  2621. if (args.isUsingInAlloca())
  2622. Slot = createPlaceholderSlot(*this, type);
  2623. else
  2624. Slot = CreateAggTemp(type, "agg.tmp");
  2625. const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
  2626. bool DestroyedInCallee =
  2627. RD && RD->hasNonTrivialDestructor() &&
  2628. CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
  2629. if (DestroyedInCallee)
  2630. Slot.setExternallyDestructed();
  2631. EmitAggExpr(E, Slot);
  2632. RValue RV = Slot.asRValue();
  2633. args.add(RV, type);
  2634. if (DestroyedInCallee) {
  2635. // Create a no-op GEP between the placeholder and the cleanup so we can
  2636. // RAUW it successfully. It also serves as a marker of the first
  2637. // instruction where the cleanup is active.
  2638. pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
  2639. // This unreachable is a temporary marker which will be removed later.
  2640. llvm::Instruction *IsActive = Builder.CreateUnreachable();
  2641. args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
  2642. }
  2643. return;
  2644. }
  2645. if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
  2646. cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
  2647. LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
  2648. assert(L.isSimple());
  2649. if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
  2650. // HLSL Change Begin - don't copy input arg.
  2651. // Copy for out param is done at CGMSHLSLRuntime::EmitHLSLOutParamConversion*.
  2652. args.add(L.asAggregateRValue(), type); // /*NeedsCopy*/true);
  2653. // HLSL Change End
  2654. } else {
  2655. // We can't represent a misaligned lvalue in the CallArgList, so copy
  2656. // to an aligned temporary now.
  2657. llvm::Value *tmp = CreateMemTemp(type);
  2658. EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
  2659. L.getAlignment());
  2660. args.add(RValue::getAggregate(tmp), type);
  2661. }
  2662. return;
  2663. }
  2664. // HLSL Change Begins.
  2665. // For DeclRefExpr of aggregate type, don't create temp.
  2666. if (HasAggregateEvalKind && LangOptions().HLSL &&
  2667. isa<DeclRefExpr>(E)) {
  2668. LValue LV = EmitDeclRefLValue(cast<DeclRefExpr>(E));
  2669. RValue RV = RValue::getAggregate(LV.getAddress());
  2670. args.add(RV, type);
  2671. return;
  2672. }
  2673. // HLSL Change Ends.
  2674. args.add(EmitAnyExprToTemp(E), type);
  2675. }
  2676. QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
  2677. // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
  2678. // implicitly widens null pointer constants that are arguments to varargs
  2679. // functions to pointer-sized ints.
  2680. if (!getTarget().getTriple().isOSWindows())
  2681. return Arg->getType();
  2682. if (Arg->getType()->isIntegerType() &&
  2683. getContext().getTypeSize(Arg->getType()) <
  2684. getContext().getTargetInfo().getPointerWidth(0) &&
  2685. Arg->isNullPointerConstant(getContext(),
  2686. Expr::NPC_ValueDependentIsNotNull)) {
  2687. return getContext().getIntPtrType();
  2688. }
  2689. return Arg->getType();
  2690. }
  2691. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  2692. // optimizer it can aggressively ignore unwind edges.
  2693. void
  2694. CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
  2695. if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
  2696. !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
  2697. Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
  2698. CGM.getNoObjCARCExceptionsMetadata());
  2699. }
  2700. /// Emits a call to the given no-arguments nounwind runtime function.
  2701. llvm::CallInst *
  2702. CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
  2703. const llvm::Twine &name) {
  2704. return EmitNounwindRuntimeCall(callee, None, name);
  2705. }
  2706. /// Emits a call to the given nounwind runtime function.
  2707. llvm::CallInst *
  2708. CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
  2709. ArrayRef<llvm::Value*> args,
  2710. const llvm::Twine &name) {
  2711. llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
  2712. call->setDoesNotThrow();
  2713. return call;
  2714. }
  2715. /// Emits a simple call (never an invoke) to the given no-arguments
  2716. /// runtime function.
  2717. llvm::CallInst *
  2718. CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
  2719. const llvm::Twine &name) {
  2720. return EmitRuntimeCall(callee, None, name);
  2721. }
  2722. /// Emits a simple call (never an invoke) to the given runtime
  2723. /// function.
  2724. llvm::CallInst *
  2725. CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
  2726. ArrayRef<llvm::Value*> args,
  2727. const llvm::Twine &name) {
  2728. llvm::CallInst *call = Builder.CreateCall(callee, args, name);
  2729. call->setCallingConv(getRuntimeCC());
  2730. return call;
  2731. }
  2732. /// Emits a call or invoke to the given noreturn runtime function.
  2733. void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
  2734. ArrayRef<llvm::Value*> args) {
  2735. if (getInvokeDest()) {
  2736. llvm::InvokeInst *invoke =
  2737. Builder.CreateInvoke(callee,
  2738. getUnreachableBlock(),
  2739. getInvokeDest(),
  2740. args);
  2741. invoke->setDoesNotReturn();
  2742. invoke->setCallingConv(getRuntimeCC());
  2743. } else {
  2744. llvm::CallInst *call = Builder.CreateCall(callee, args);
  2745. call->setDoesNotReturn();
  2746. call->setCallingConv(getRuntimeCC());
  2747. Builder.CreateUnreachable();
  2748. }
  2749. }
  2750. /// Emits a call or invoke instruction to the given nullary runtime
  2751. /// function.
  2752. llvm::CallSite
  2753. CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
  2754. const Twine &name) {
  2755. return EmitRuntimeCallOrInvoke(callee, None, name);
  2756. }
  2757. /// Emits a call or invoke instruction to the given runtime function.
  2758. llvm::CallSite
  2759. CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
  2760. ArrayRef<llvm::Value*> args,
  2761. const Twine &name) {
  2762. llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
  2763. callSite.setCallingConv(getRuntimeCC());
  2764. return callSite;
  2765. }
  2766. llvm::CallSite
  2767. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  2768. const Twine &Name) {
  2769. return EmitCallOrInvoke(Callee, None, Name);
  2770. }
  2771. /// Emits a call or invoke instruction to the given function, depending
  2772. /// on the current state of the EH stack.
  2773. llvm::CallSite
  2774. CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
  2775. ArrayRef<llvm::Value *> Args,
  2776. const Twine &Name) {
  2777. llvm::BasicBlock *InvokeDest = getInvokeDest();
  2778. llvm::Instruction *Inst;
  2779. if (!InvokeDest)
  2780. Inst = Builder.CreateCall(Callee, Args, Name);
  2781. else {
  2782. llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
  2783. Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
  2784. EmitBlock(ContBB);
  2785. }
  2786. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  2787. // optimizer it can aggressively ignore unwind edges.
  2788. if (CGM.getLangOpts().ObjCAutoRefCount)
  2789. AddObjCARCExceptionMetadata(Inst);
  2790. return llvm::CallSite(Inst);
  2791. }
  2792. /// \brief Store a non-aggregate value to an address to initialize it. For
  2793. /// initialization, a non-atomic store will be used.
  2794. static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
  2795. LValue Dst) {
  2796. if (Src.isScalar())
  2797. CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
  2798. else
  2799. CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
  2800. }
  2801. void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
  2802. llvm::Value *New) {
  2803. DeferredReplacements.push_back(std::make_pair(Old, New));
  2804. }
  2805. RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
  2806. llvm::Value *Callee,
  2807. ReturnValueSlot ReturnValue,
  2808. const CallArgList &CallArgs,
  2809. const Decl *TargetDecl,
  2810. llvm::Instruction **callOrInvoke) {
  2811. // FIXME: We no longer need the types from CallArgs; lift up and simplify.
  2812. // Handle struct-return functions by passing a pointer to the
  2813. // location that we would like to return into.
  2814. QualType RetTy = CallInfo.getReturnType();
  2815. const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
  2816. llvm::FunctionType *IRFuncTy =
  2817. cast<llvm::FunctionType>(
  2818. cast<llvm::PointerType>(Callee->getType())->getElementType());
  2819. // If we're using inalloca, insert the allocation after the stack save.
  2820. // FIXME: Do this earlier rather than hacking it in here!
  2821. llvm::AllocaInst *ArgMemory = nullptr;
  2822. if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
  2823. llvm::Instruction *IP = CallArgs.getStackBase();
  2824. llvm::AllocaInst *AI;
  2825. if (IP) {
  2826. IP = IP->getNextNode();
  2827. AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
  2828. } else {
  2829. AI = CreateTempAlloca(ArgStruct, "argmem");
  2830. }
  2831. AI->setUsedWithInAlloca(true);
  2832. assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
  2833. ArgMemory = AI;
  2834. }
  2835. ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
  2836. SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
  2837. // If the call returns a temporary with struct return, create a temporary
  2838. // alloca to hold the result, unless one is given to us.
  2839. llvm::Value *SRetPtr = nullptr;
  2840. size_t UnusedReturnSize = 0;
  2841. if (RetAI.isIndirect() || RetAI.isInAlloca()) {
  2842. SRetPtr = ReturnValue.getValue();
  2843. if (!SRetPtr) {
  2844. SRetPtr = CreateMemTemp(RetTy);
  2845. if (HaveInsertPoint() && ReturnValue.isUnused()) {
  2846. uint64_t size =
  2847. CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
  2848. if (EmitLifetimeStart(size, SRetPtr))
  2849. UnusedReturnSize = size;
  2850. }
  2851. }
  2852. // HLSL Change begin.
  2853. CGM.getHLSLRuntime().MarkRetTemp(*this, SRetPtr, RetTy);
  2854. // HLSL Change end.
  2855. if (IRFunctionArgs.hasSRetArg()) {
  2856. IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
  2857. } else {
  2858. llvm::Value *Addr =
  2859. Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
  2860. RetAI.getInAllocaFieldIndex());
  2861. Builder.CreateStore(SRetPtr, Addr);
  2862. }
  2863. }
  2864. assert(CallInfo.arg_size() == CallArgs.size() &&
  2865. "Mismatch between function signature & arguments.");
  2866. unsigned ArgNo = 0;
  2867. CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
  2868. for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
  2869. I != E; ++I, ++info_it, ++ArgNo) {
  2870. const ABIArgInfo &ArgInfo = info_it->info;
  2871. RValue RV = I->RV;
  2872. CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
  2873. // Insert a padding argument to ensure proper alignment.
  2874. if (IRFunctionArgs.hasPaddingArg(ArgNo))
  2875. IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
  2876. llvm::UndefValue::get(ArgInfo.getPaddingType());
  2877. unsigned FirstIRArg, NumIRArgs;
  2878. std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
  2879. switch (ArgInfo.getKind()) {
  2880. case ABIArgInfo::InAlloca: {
  2881. assert(NumIRArgs == 0);
  2882. assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
  2883. if (RV.isAggregate()) {
  2884. // Replace the placeholder with the appropriate argument slot GEP.
  2885. llvm::Instruction *Placeholder =
  2886. cast<llvm::Instruction>(RV.getAggregateAddr());
  2887. CGBuilderTy::InsertPoint IP = Builder.saveIP();
  2888. Builder.SetInsertPoint(Placeholder);
  2889. llvm::Value *Addr =
  2890. Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
  2891. ArgInfo.getInAllocaFieldIndex());
  2892. Builder.restoreIP(IP);
  2893. deferPlaceholderReplacement(Placeholder, Addr);
  2894. } else {
  2895. // Store the RValue into the argument struct.
  2896. llvm::Value *Addr =
  2897. Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
  2898. ArgInfo.getInAllocaFieldIndex());
  2899. unsigned AS = Addr->getType()->getPointerAddressSpace();
  2900. llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
  2901. // There are some cases where a trivial bitcast is not avoidable. The
  2902. // definition of a type later in a translation unit may change it's type
  2903. // from {}* to (%struct.foo*)*.
  2904. if (Addr->getType() != MemType)
  2905. Addr = Builder.CreateBitCast(Addr, MemType);
  2906. LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
  2907. EmitInitStoreOfNonAggregate(*this, RV, argLV);
  2908. }
  2909. break;
  2910. }
  2911. case ABIArgInfo::Indirect: {
  2912. assert(NumIRArgs == 1);
  2913. if (RV.isScalar() || RV.isComplex()) {
  2914. // Make a temporary alloca to pass the argument.
  2915. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  2916. if (ArgInfo.getIndirectAlign() > AI->getAlignment())
  2917. AI->setAlignment(ArgInfo.getIndirectAlign());
  2918. IRCallArgs[FirstIRArg] = AI;
  2919. LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
  2920. EmitInitStoreOfNonAggregate(*this, RV, argLV);
  2921. } else {
  2922. // We want to avoid creating an unnecessary temporary+copy here;
  2923. // however, we need one in three cases:
  2924. // 1. If the argument is not byval, and we are required to copy the
  2925. // source. (This case doesn't occur on any common architecture.)
  2926. // 2. If the argument is byval, RV is not sufficiently aligned, and
  2927. // we cannot force it to be sufficiently aligned.
  2928. // 3. If the argument is byval, but RV is located in an address space
  2929. // different than that of the argument (0).
  2930. llvm::Value *Addr = RV.getAggregateAddr();
  2931. unsigned Align = ArgInfo.getIndirectAlign();
  2932. const llvm::DataLayout *TD = &CGM.getDataLayout();
  2933. const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
  2934. const unsigned ArgAddrSpace =
  2935. (FirstIRArg < IRFuncTy->getNumParams()
  2936. ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
  2937. : 0);
  2938. if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
  2939. (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
  2940. llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) ||
  2941. (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
  2942. // Create an aligned temporary, and copy to it.
  2943. llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
  2944. if (Align > AI->getAlignment())
  2945. AI->setAlignment(Align);
  2946. IRCallArgs[FirstIRArg] = AI;
  2947. EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
  2948. } else {
  2949. // Skip the extra memcpy call.
  2950. // HLSL Change Starts
  2951. // Generate AddrSpaceCast for shared memory.
  2952. if (RVAddrSpace != ArgAddrSpace) {
  2953. Addr = Builder.CreateAddrSpaceCast(
  2954. Addr, IRFuncTy->getParamType(FirstIRArg));
  2955. }
  2956. // HLSL Change Ends
  2957. IRCallArgs[FirstIRArg] = Addr;
  2958. }
  2959. }
  2960. break;
  2961. }
  2962. case ABIArgInfo::Ignore:
  2963. assert(NumIRArgs == 0);
  2964. break;
  2965. case ABIArgInfo::Extend:
  2966. case ABIArgInfo::Direct: {
  2967. if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
  2968. ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
  2969. ArgInfo.getDirectOffset() == 0) {
  2970. assert(NumIRArgs == 1);
  2971. llvm::Value *V;
  2972. if (RV.isScalar())
  2973. V = RV.getScalarVal();
  2974. else
  2975. V = Builder.CreateLoad(RV.getAggregateAddr());
  2976. // We might have to widen integers, but we should never truncate.
  2977. if (ArgInfo.getCoerceToType() != V->getType() &&
  2978. V->getType()->isIntegerTy())
  2979. V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
  2980. // If the argument doesn't match, perform a bitcast to coerce it. This
  2981. // can happen due to trivial type mismatches.
  2982. if (FirstIRArg < IRFuncTy->getNumParams() &&
  2983. V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
  2984. // HLSL Change Starts
  2985. // Generate AddrSpaceCast for shared memory.
  2986. if (V->getType()->isPointerTy())
  2987. V = Builder.CreatePointerBitCastOrAddrSpaceCast(
  2988. V, IRFuncTy->getParamType(FirstIRArg));
  2989. else
  2990. // HLSL Change Ends
  2991. V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
  2992. }
  2993. IRCallArgs[FirstIRArg] = V;
  2994. break;
  2995. }
  2996. // HLSL Change Begins
  2997. if (hlsl::IsHLSLMatType(I->Ty)) {
  2998. // For matrix, just use the val directly
  2999. IRCallArgs[FirstIRArg] = RV.getScalarVal();
  3000. continue;
  3001. }
  3002. // HLSL Change Ends
  3003. // FIXME: Avoid the conversion through memory if possible.
  3004. llvm::Value *SrcPtr;
  3005. CharUnits SrcAlign;
  3006. if (RV.isScalar() || RV.isComplex()) {
  3007. SrcPtr = CreateMemTemp(I->Ty, "coerce");
  3008. SrcAlign = TypeAlign;
  3009. LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
  3010. EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
  3011. } else {
  3012. SrcPtr = RV.getAggregateAddr();
  3013. // This alignment is guaranteed by EmitCallArg.
  3014. SrcAlign = TypeAlign;
  3015. }
  3016. // If the value is offset in memory, apply the offset now.
  3017. if (unsigned Offs = ArgInfo.getDirectOffset()) {
  3018. SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
  3019. SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs);
  3020. SrcPtr = Builder.CreateBitCast(SrcPtr,
  3021. llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
  3022. SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
  3023. }
  3024. // Fast-isel and the optimizer generally like scalar values better than
  3025. // FCAs, so we flatten them if this is safe to do for this argument.
  3026. llvm::StructType *STy =
  3027. dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
  3028. if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
  3029. llvm::Type *SrcTy =
  3030. cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
  3031. uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
  3032. uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
  3033. // If the source type is smaller than the destination type of the
  3034. // coerce-to logic, copy the source value into a temp alloca the size
  3035. // of the destination type to allow loading all of it. The bits past
  3036. // the source value are left undef.
  3037. if (SrcSize < DstSize) {
  3038. llvm::AllocaInst *TempAlloca
  3039. = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
  3040. Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
  3041. SrcPtr = TempAlloca;
  3042. } else {
  3043. SrcPtr = Builder.CreateBitCast(SrcPtr,
  3044. llvm::PointerType::getUnqual(STy));
  3045. }
  3046. assert(NumIRArgs == STy->getNumElements());
  3047. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
  3048. llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i);
  3049. llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
  3050. // We don't know what we're loading from.
  3051. LI->setAlignment(1);
  3052. IRCallArgs[FirstIRArg + i] = LI;
  3053. }
  3054. } else {
  3055. // In the simple case, just pass the coerced loaded value.
  3056. assert(NumIRArgs == 1);
  3057. IRCallArgs[FirstIRArg] =
  3058. CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
  3059. SrcAlign, *this);
  3060. }
  3061. break;
  3062. }
  3063. case ABIArgInfo::Expand:
  3064. unsigned IRArgPos = FirstIRArg;
  3065. ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
  3066. assert(IRArgPos == FirstIRArg + NumIRArgs);
  3067. break;
  3068. }
  3069. }
  3070. if (ArgMemory) {
  3071. llvm::Value *Arg = ArgMemory;
  3072. if (CallInfo.isVariadic()) {
  3073. // When passing non-POD arguments by value to variadic functions, we will
  3074. // end up with a variadic prototype and an inalloca call site. In such
  3075. // cases, we can't do any parameter mismatch checks. Give up and bitcast
  3076. // the callee.
  3077. unsigned CalleeAS =
  3078. cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
  3079. Callee = Builder.CreateBitCast(
  3080. Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
  3081. } else {
  3082. llvm::Type *LastParamTy =
  3083. IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
  3084. if (Arg->getType() != LastParamTy) {
  3085. #ifndef NDEBUG
  3086. // Assert that these structs have equivalent element types.
  3087. llvm::StructType *FullTy = CallInfo.getArgStruct();
  3088. llvm::StructType *DeclaredTy = cast<llvm::StructType>(
  3089. cast<llvm::PointerType>(LastParamTy)->getElementType());
  3090. assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
  3091. for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
  3092. DE = DeclaredTy->element_end(),
  3093. FI = FullTy->element_begin();
  3094. DI != DE; ++DI, ++FI)
  3095. assert(*DI == *FI);
  3096. #endif
  3097. Arg = Builder.CreateBitCast(Arg, LastParamTy);
  3098. }
  3099. }
  3100. assert(IRFunctionArgs.hasInallocaArg());
  3101. IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
  3102. }
  3103. if (!CallArgs.getCleanupsToDeactivate().empty())
  3104. deactivateArgCleanupsBeforeCall(*this, CallArgs);
  3105. // If the callee is a bitcast of a function to a varargs pointer to function
  3106. // type, check to see if we can remove the bitcast. This handles some cases
  3107. // with unprototyped functions.
  3108. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
  3109. if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
  3110. llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
  3111. llvm::FunctionType *CurFT =
  3112. cast<llvm::FunctionType>(CurPT->getElementType());
  3113. llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
  3114. if (CE->getOpcode() == llvm::Instruction::BitCast &&
  3115. ActualFT->getReturnType() == CurFT->getReturnType() &&
  3116. ActualFT->getNumParams() == CurFT->getNumParams() &&
  3117. ActualFT->getNumParams() == IRCallArgs.size() &&
  3118. (CurFT->isVarArg() || !ActualFT->isVarArg())) {
  3119. bool ArgsMatch = true;
  3120. for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
  3121. if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
  3122. ArgsMatch = false;
  3123. break;
  3124. }
  3125. // Strip the cast if we can get away with it. This is a nice cleanup,
  3126. // but also allows us to inline the function at -O0 if it is marked
  3127. // always_inline.
  3128. if (ArgsMatch)
  3129. Callee = CalleeF;
  3130. }
  3131. }
  3132. assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
  3133. for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
  3134. // Inalloca argument can have different type.
  3135. if (IRFunctionArgs.hasInallocaArg() &&
  3136. i == IRFunctionArgs.getInallocaArgNo())
  3137. continue;
  3138. if (i < IRFuncTy->getNumParams())
  3139. assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
  3140. }
  3141. unsigned CallingConv;
  3142. CodeGen::AttributeListType AttributeList;
  3143. CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
  3144. CallingConv, true);
  3145. llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
  3146. AttributeList);
  3147. llvm::BasicBlock *InvokeDest = nullptr;
  3148. if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
  3149. llvm::Attribute::NoUnwind) ||
  3150. currentFunctionUsesSEHTry())
  3151. InvokeDest = getInvokeDest();
  3152. llvm::CallSite CS;
  3153. if (!InvokeDest) {
  3154. CS = Builder.CreateCall(Callee, IRCallArgs);
  3155. } else {
  3156. llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
  3157. CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
  3158. EmitBlock(Cont);
  3159. }
  3160. if (callOrInvoke)
  3161. *callOrInvoke = CS.getInstruction();
  3162. if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
  3163. !CS.hasFnAttr(llvm::Attribute::NoInline))
  3164. Attrs =
  3165. Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
  3166. llvm::Attribute::AlwaysInline);
  3167. // Disable inlining inside SEH __try blocks.
  3168. if (isSEHTryScope())
  3169. Attrs =
  3170. Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
  3171. llvm::Attribute::NoInline);
  3172. CS.setAttributes(Attrs);
  3173. CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
  3174. // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
  3175. // optimizer it can aggressively ignore unwind edges.
  3176. if (CGM.getLangOpts().ObjCAutoRefCount)
  3177. AddObjCARCExceptionMetadata(CS.getInstruction());
  3178. // If the call doesn't return, finish the basic block and clear the
  3179. // insertion point; this allows the rest of IRgen to discard
  3180. // unreachable code.
  3181. if (CS.doesNotReturn()) {
  3182. if (UnusedReturnSize)
  3183. EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
  3184. SRetPtr);
  3185. Builder.CreateUnreachable();
  3186. Builder.ClearInsertionPoint();
  3187. // FIXME: For now, emit a dummy basic block because expr emitters in
  3188. // generally are not ready to handle emitting expressions at unreachable
  3189. // points.
  3190. EnsureInsertPoint();
  3191. // Return a reasonable RValue.
  3192. return GetUndefRValue(RetTy);
  3193. }
  3194. llvm::Instruction *CI = CS.getInstruction();
  3195. if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
  3196. CI->setName("call");
  3197. #if 0 // HLSL Change - no ObjC support
  3198. // Emit any writebacks immediately. Arguably this should happen
  3199. // after any return-value munging.
  3200. if (CallArgs.hasWritebacks())
  3201. emitWritebacks(*this, CallArgs);
  3202. #else
  3203. assert(!CallArgs.hasWritebacks() && "writebacks are unavailable in HLSL");
  3204. #endif // HLSL Change - no ObjC support
  3205. // The stack cleanup for inalloca arguments has to run out of the normal
  3206. // lexical order, so deactivate it and run it manually here.
  3207. CallArgs.freeArgumentMemory(*this);
  3208. RValue Ret = [&] {
  3209. switch (RetAI.getKind()) {
  3210. case ABIArgInfo::InAlloca:
  3211. case ABIArgInfo::Indirect: {
  3212. RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
  3213. if (UnusedReturnSize)
  3214. EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
  3215. SRetPtr);
  3216. return ret;
  3217. }
  3218. case ABIArgInfo::Ignore:
  3219. // If we are ignoring an argument that had a result, make sure to
  3220. // construct the appropriate return value for our caller.
  3221. return GetUndefRValue(RetTy);
  3222. case ABIArgInfo::Extend:
  3223. case ABIArgInfo::Direct: {
  3224. llvm::Type *RetIRTy = ConvertType(RetTy);
  3225. if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
  3226. switch (getEvaluationKind(RetTy)) {
  3227. case TEK_Complex: {
  3228. llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
  3229. llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
  3230. return RValue::getComplex(std::make_pair(Real, Imag));
  3231. }
  3232. case TEK_Aggregate: {
  3233. llvm::Value *DestPtr = ReturnValue.getValue();
  3234. bool DestIsVolatile = ReturnValue.isVolatile();
  3235. CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
  3236. if (!DestPtr) {
  3237. DestPtr = CreateMemTemp(RetTy, "agg.tmp");
  3238. DestIsVolatile = false;
  3239. }
  3240. BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign, RetTy); // HLSL Change - Add QualTy.
  3241. return RValue::getAggregate(DestPtr);
  3242. }
  3243. case TEK_Scalar: {
  3244. // If the argument doesn't match, perform a bitcast to coerce it. This
  3245. // can happen due to trivial type mismatches.
  3246. llvm::Value *V = CI;
  3247. if (V->getType() != RetIRTy)
  3248. V = Builder.CreateBitCast(V, RetIRTy);
  3249. return RValue::get(V);
  3250. }
  3251. }
  3252. llvm_unreachable("bad evaluation kind");
  3253. }
  3254. llvm::Value *DestPtr = ReturnValue.getValue();
  3255. bool DestIsVolatile = ReturnValue.isVolatile();
  3256. CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
  3257. if (!DestPtr) {
  3258. DestPtr = CreateMemTemp(RetTy, "coerce");
  3259. DestIsVolatile = false;
  3260. }
  3261. // If the value is offset in memory, apply the offset now.
  3262. llvm::Value *StorePtr = DestPtr;
  3263. CharUnits StoreAlign = DestAlign;
  3264. if (unsigned Offs = RetAI.getDirectOffset()) {
  3265. StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
  3266. StorePtr =
  3267. Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs);
  3268. StorePtr = Builder.CreateBitCast(StorePtr,
  3269. llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
  3270. StoreAlign =
  3271. StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
  3272. }
  3273. CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this, RetTy); // HLSL Change - Add QTy.
  3274. return convertTempToRValue(DestPtr, RetTy, SourceLocation());
  3275. }
  3276. case ABIArgInfo::Expand:
  3277. llvm_unreachable("Invalid ABI kind for return argument");
  3278. }
  3279. llvm_unreachable("Unhandled ABIArgInfo::Kind");
  3280. } ();
  3281. if (Ret.isScalar() && TargetDecl) {
  3282. if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
  3283. llvm::Value *OffsetValue = nullptr;
  3284. if (const auto *Offset = AA->getOffset())
  3285. OffsetValue = EmitScalarExpr(Offset);
  3286. llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
  3287. llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
  3288. EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
  3289. OffsetValue);
  3290. }
  3291. }
  3292. return Ret;
  3293. }
  3294. /* VarArg handling */
  3295. llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
  3296. return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
  3297. }