llvm_abi.cpp 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. enum lbArgKind {
  2. lbArg_Direct,
  3. lbArg_Indirect,
  4. lbArg_Ignore,
  5. };
  6. struct lbArgType {
  7. lbArgKind kind;
  8. LLVMTypeRef type;
  9. LLVMTypeRef cast_type; // Optional
  10. LLVMTypeRef pad_type; // Optional
  11. LLVMAttributeRef attribute; // Optional
  12. LLVMAttributeRef align_attribute; // Optional
  13. i64 byval_alignment;
  14. bool is_byval;
  15. };
  16. i64 lb_sizeof(LLVMTypeRef type);
  17. i64 lb_alignof(LLVMTypeRef type);
  18. lbArgType lb_arg_type_direct(LLVMTypeRef type, LLVMTypeRef cast_type, LLVMTypeRef pad_type, LLVMAttributeRef attr) {
  19. return lbArgType{lbArg_Direct, type, cast_type, pad_type, attr, nullptr, 0, false};
  20. }
  21. lbArgType lb_arg_type_direct(LLVMTypeRef type) {
  22. return lb_arg_type_direct(type, nullptr, nullptr, nullptr);
  23. }
  24. lbArgType lb_arg_type_indirect(LLVMTypeRef type, LLVMAttributeRef attr) {
  25. return lbArgType{lbArg_Indirect, type, nullptr, nullptr, attr, nullptr, 0, false};
  26. }
  27. lbArgType lb_arg_type_indirect_byval(LLVMContextRef c, LLVMTypeRef type) {
  28. i64 alignment = lb_alignof(type);
  29. alignment = gb_max(alignment, 8);
  30. LLVMAttributeRef byval_attr = lb_create_enum_attribute_with_type(c, "byval", type);
  31. LLVMAttributeRef align_attr = lb_create_enum_attribute(c, "align", alignment);
  32. return lbArgType{lbArg_Indirect, type, nullptr, nullptr, byval_attr, align_attr, alignment, true};
  33. }
  34. lbArgType lb_arg_type_ignore(LLVMTypeRef type) {
  35. return lbArgType{lbArg_Ignore, type, nullptr, nullptr, nullptr, nullptr, 0, false};
  36. }
  37. struct lbFunctionType {
  38. LLVMContextRef ctx;
  39. ProcCallingConvention calling_convention;
  40. Array<lbArgType> args;
  41. lbArgType ret;
  42. };
  43. i64 llvm_align_formula(i64 off, i64 a) {
  44. return (off + a - 1) / a * a;
  45. }
  46. bool lb_is_type_kind(LLVMTypeRef type, LLVMTypeKind kind) {
  47. if (type == nullptr) {
  48. return false;
  49. }
  50. return LLVMGetTypeKind(type) == kind;
  51. }
  52. LLVMTypeRef lb_function_type_to_llvm_ptr(lbFunctionType *ft, bool is_var_arg) {
  53. unsigned arg_count = cast(unsigned)ft->args.count;
  54. unsigned offset = 0;
  55. LLVMTypeRef ret = nullptr;
  56. if (ft->ret.kind == lbArg_Direct) {
  57. if (ft->ret.cast_type != nullptr) {
  58. ret = ft->ret.cast_type;
  59. } else {
  60. ret = ft->ret.type;
  61. }
  62. } else if (ft->ret.kind == lbArg_Indirect) {
  63. offset += 1;
  64. ret = LLVMVoidTypeInContext(ft->ctx);
  65. } else if (ft->ret.kind == lbArg_Ignore) {
  66. ret = LLVMVoidTypeInContext(ft->ctx);
  67. }
  68. GB_ASSERT_MSG(ret != nullptr, "%d", ft->ret.kind);
  69. unsigned maximum_arg_count = offset+arg_count;
  70. LLVMTypeRef *args = gb_alloc_array(permanent_allocator(), LLVMTypeRef, maximum_arg_count);
  71. if (offset == 1) {
  72. GB_ASSERT(ft->ret.kind == lbArg_Indirect);
  73. args[0] = LLVMPointerType(ft->ret.type, 0);
  74. }
  75. unsigned arg_index = offset;
  76. for (unsigned i = 0; i < arg_count; i++) {
  77. lbArgType *arg = &ft->args[i];
  78. if (arg->kind == lbArg_Direct) {
  79. LLVMTypeRef arg_type = nullptr;
  80. if (ft->args[i].cast_type != nullptr) {
  81. arg_type = arg->cast_type;
  82. } else {
  83. arg_type = arg->type;
  84. }
  85. args[arg_index++] = arg_type;
  86. } else if (arg->kind == lbArg_Indirect) {
  87. GB_ASSERT(!lb_is_type_kind(arg->type, LLVMPointerTypeKind));
  88. args[arg_index++] = LLVMPointerType(arg->type, 0);
  89. } else if (arg->kind == lbArg_Ignore) {
  90. // ignore
  91. }
  92. }
  93. unsigned total_arg_count = arg_index;
  94. LLVMTypeRef func_type = LLVMFunctionType(ret, args, total_arg_count, is_var_arg);
  95. return LLVMPointerType(func_type, 0);
  96. }
  97. void lb_add_function_type_attributes(LLVMValueRef fn, lbFunctionType *ft, ProcCallingConvention calling_convention) {
  98. if (ft == nullptr) {
  99. return;
  100. }
  101. unsigned arg_count = cast(unsigned)ft->args.count;
  102. unsigned offset = 0;
  103. if (ft->ret.kind == lbArg_Indirect) {
  104. offset += 1;
  105. }
  106. LLVMContextRef c = ft->ctx;
  107. LLVMAttributeRef noalias_attr = lb_create_enum_attribute(c, "noalias");
  108. LLVMAttributeRef nonnull_attr = lb_create_enum_attribute(c, "nonnull");
  109. LLVMAttributeRef nocapture_attr = lb_create_enum_attribute(c, "nocapture");
  110. unsigned arg_index = offset;
  111. for (unsigned i = 0; i < arg_count; i++) {
  112. lbArgType *arg = &ft->args[i];
  113. if (arg->kind == lbArg_Ignore) {
  114. continue;
  115. }
  116. if (arg->attribute) {
  117. LLVMAddAttributeAtIndex(fn, arg_index+1, arg->attribute);
  118. }
  119. if (arg->align_attribute) {
  120. LLVMAddAttributeAtIndex(fn, arg_index+1, arg->align_attribute);
  121. }
  122. arg_index++;
  123. }
  124. if (offset != 0 && ft->ret.kind == lbArg_Indirect && ft->ret.attribute != nullptr) {
  125. LLVMAddAttributeAtIndex(fn, offset, ft->ret.attribute);
  126. LLVMAddAttributeAtIndex(fn, offset, noalias_attr);
  127. }
  128. lbCallingConventionKind cc_kind = lbCallingConvention_C;
  129. // TODO(bill): Clean up this logic
  130. if (!is_arch_wasm()) {
  131. cc_kind = lb_calling_convention_map[calling_convention];
  132. }
  133. // if (build_context.metrics.arch == TargetArch_amd64) {
  134. // if (build_context.metrics.os == TargetOs_windows) {
  135. // if (cc_kind == lbCallingConvention_C) {
  136. // cc_kind = lbCallingConvention_Win64;
  137. // }
  138. // } else {
  139. // if (cc_kind == lbCallingConvention_C) {
  140. // cc_kind = lbCallingConvention_X86_64_SysV;
  141. // }
  142. // }
  143. // }
  144. LLVMSetFunctionCallConv(fn, cc_kind);
  145. if (calling_convention == ProcCC_Odin) {
  146. unsigned context_index = offset+arg_count;
  147. LLVMAddAttributeAtIndex(fn, context_index, noalias_attr);
  148. LLVMAddAttributeAtIndex(fn, context_index, nonnull_attr);
  149. LLVMAddAttributeAtIndex(fn, context_index, nocapture_attr);
  150. }
  151. }
  152. i64 lb_sizeof(LLVMTypeRef type) {
  153. LLVMTypeKind kind = LLVMGetTypeKind(type);
  154. switch (kind) {
  155. case LLVMVoidTypeKind:
  156. return 0;
  157. case LLVMIntegerTypeKind:
  158. {
  159. unsigned w = LLVMGetIntTypeWidth(type);
  160. return (w + 7)/8;
  161. }
  162. case LLVMHalfTypeKind:
  163. return 2;
  164. case LLVMFloatTypeKind:
  165. return 4;
  166. case LLVMDoubleTypeKind:
  167. return 8;
  168. case LLVMPointerTypeKind:
  169. return build_context.word_size;
  170. case LLVMStructTypeKind:
  171. {
  172. unsigned field_count = LLVMCountStructElementTypes(type);
  173. i64 offset = 0;
  174. if (LLVMIsPackedStruct(type)) {
  175. for (unsigned i = 0; i < field_count; i++) {
  176. LLVMTypeRef field = LLVMStructGetTypeAtIndex(type, i);
  177. offset += lb_sizeof(field);
  178. }
  179. } else {
  180. for (unsigned i = 0; i < field_count; i++) {
  181. LLVMTypeRef field = LLVMStructGetTypeAtIndex(type, i);
  182. i64 align = lb_alignof(field);
  183. offset = llvm_align_formula(offset, align);
  184. offset += lb_sizeof(field);
  185. }
  186. offset = llvm_align_formula(offset, lb_alignof(type));
  187. }
  188. return offset;
  189. }
  190. break;
  191. case LLVMArrayTypeKind:
  192. {
  193. LLVMTypeRef elem = LLVMGetElementType(type);
  194. i64 elem_size = lb_sizeof(elem);
  195. i64 count = LLVMGetArrayLength(type);
  196. i64 size = count * elem_size;
  197. return size;
  198. }
  199. break;
  200. case LLVMX86_MMXTypeKind:
  201. return 8;
  202. case LLVMVectorTypeKind:
  203. {
  204. LLVMTypeRef elem = LLVMGetElementType(type);
  205. i64 elem_size = lb_sizeof(elem);
  206. i64 count = LLVMGetVectorSize(type);
  207. i64 size = count * elem_size;
  208. return gb_clamp(next_pow2(size), 1, build_context.max_align);
  209. }
  210. }
  211. GB_PANIC("Unhandled type for lb_sizeof -> %s", LLVMPrintTypeToString(type));
  212. return 0;
  213. }
  214. i64 lb_alignof(LLVMTypeRef type) {
  215. LLVMTypeKind kind = LLVMGetTypeKind(type);
  216. switch (kind) {
  217. case LLVMVoidTypeKind:
  218. return 1;
  219. case LLVMIntegerTypeKind:
  220. {
  221. unsigned w = LLVMGetIntTypeWidth(type);
  222. return gb_clamp((w + 7)/8, 1, build_context.word_size);
  223. }
  224. case LLVMHalfTypeKind:
  225. return 2;
  226. case LLVMFloatTypeKind:
  227. return 4;
  228. case LLVMDoubleTypeKind:
  229. return 8;
  230. case LLVMPointerTypeKind:
  231. return build_context.word_size;
  232. case LLVMStructTypeKind:
  233. {
  234. if (LLVMIsPackedStruct(type)) {
  235. return 1;
  236. } else {
  237. unsigned field_count = LLVMCountStructElementTypes(type);
  238. i64 max_align = 1;
  239. for (unsigned i = 0; i < field_count; i++) {
  240. LLVMTypeRef field = LLVMStructGetTypeAtIndex(type, i);
  241. i64 field_align = lb_alignof(field);
  242. max_align = gb_max(max_align, field_align);
  243. }
  244. return max_align;
  245. }
  246. }
  247. break;
  248. case LLVMArrayTypeKind:
  249. return lb_alignof(LLVMGetElementType(type));
  250. case LLVMX86_MMXTypeKind:
  251. return 8;
  252. case LLVMVectorTypeKind:
  253. {
  254. // TODO(bill): This appears to be correct but LLVM isn't necessarily "great" with regards to documentation
  255. LLVMTypeRef elem = LLVMGetElementType(type);
  256. i64 elem_size = lb_sizeof(elem);
  257. i64 count = LLVMGetVectorSize(type);
  258. i64 size = count * elem_size;
  259. return gb_clamp(next_pow2(size), 1, build_context.max_align);
  260. }
  261. }
  262. GB_PANIC("Unhandled type for lb_sizeof -> %s", LLVMPrintTypeToString(type));
  263. // LLVMValueRef v = LLVMAlignOf(type);
  264. // GB_ASSERT(LLVMIsConstant(v));
  265. // return LLVMConstIntGetSExtValue(v);
  266. return 1;
  267. }
  268. #define LB_ABI_INFO(name) lbFunctionType *name(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count, LLVMTypeRef return_type, bool return_is_defined, ProcCallingConvention calling_convention)
  269. typedef LB_ABI_INFO(lbAbiInfoType);
  270. // NOTE(bill): I hate `namespace` in C++ but this is just because I don't want to prefix everything
  271. namespace lbAbi386 {
  272. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
  273. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
  274. LB_ABI_INFO(abi_info) {
  275. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  276. ft->ctx = c;
  277. ft->args = compute_arg_types(c, arg_types, arg_count);
  278. ft->ret = compute_return_type(c, return_type, return_is_defined);
  279. ft->calling_convention = calling_convention;
  280. return ft;
  281. }
  282. lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type, bool is_return) {
  283. if (!is_return && lb_sizeof(type) > 8) {
  284. return lb_arg_type_indirect(type, nullptr);
  285. }
  286. if (build_context.metrics.os == TargetOs_windows &&
  287. build_context.word_size == 8 &&
  288. lb_is_type_kind(type, LLVMIntegerTypeKind) &&
  289. type == LLVMIntTypeInContext(c, 128)) {
  290. // NOTE(bill): Because Windows AMD64 is weird
  291. // TODO(bill): LLVM is probably bugged here and doesn't correctly generate the right code
  292. // So even though it is "technically" wrong, no cast might be the best option
  293. LLVMTypeRef cast_type = nullptr;
  294. if (true || !is_return) {
  295. cast_type = LLVMVectorType(LLVMInt64TypeInContext(c), 2);
  296. }
  297. return lb_arg_type_direct(type, cast_type, nullptr, nullptr);
  298. }
  299. LLVMAttributeRef attr = nullptr;
  300. LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
  301. if (type == i1) {
  302. attr = lb_create_enum_attribute(c, "zeroext");
  303. }
  304. return lb_arg_type_direct(type, nullptr, nullptr, attr);
  305. }
  306. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
  307. auto args = array_make<lbArgType>(heap_allocator(), arg_count);
  308. for (unsigned i = 0; i < arg_count; i++) {
  309. LLVMTypeRef t = arg_types[i];
  310. LLVMTypeKind kind = LLVMGetTypeKind(t);
  311. i64 sz = lb_sizeof(t);
  312. if (kind == LLVMStructTypeKind || kind == LLVMArrayTypeKind) {
  313. if (sz == 0) {
  314. args[i] = lb_arg_type_ignore(t);
  315. } else {
  316. args[i] = lb_arg_type_indirect(t, nullptr);
  317. }
  318. } else {
  319. args[i] = non_struct(c, t, false);
  320. }
  321. }
  322. return args;
  323. }
  324. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined) {
  325. if (!return_is_defined) {
  326. return lb_arg_type_direct(LLVMVoidTypeInContext(c));
  327. } else if (lb_is_type_kind(return_type, LLVMStructTypeKind) || lb_is_type_kind(return_type, LLVMArrayTypeKind)) {
  328. i64 sz = lb_sizeof(return_type);
  329. switch (sz) {
  330. case 1: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 8), nullptr, nullptr);
  331. case 2: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 16), nullptr, nullptr);
  332. case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
  333. case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
  334. }
  335. LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
  336. return lb_arg_type_indirect(return_type, attr);
  337. }
  338. return non_struct(c, return_type, true);
  339. }
  340. };
  341. namespace lbAbiAmd64Win64 {
  342. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
  343. LB_ABI_INFO(abi_info) {
  344. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  345. ft->ctx = c;
  346. ft->args = compute_arg_types(c, arg_types, arg_count);
  347. ft->ret = lbAbi386::compute_return_type(c, return_type, return_is_defined);
  348. ft->calling_convention = calling_convention;
  349. return ft;
  350. }
  351. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
  352. auto args = array_make<lbArgType>(heap_allocator(), arg_count);
  353. for (unsigned i = 0; i < arg_count; i++) {
  354. LLVMTypeRef t = arg_types[i];
  355. LLVMTypeKind kind = LLVMGetTypeKind(t);
  356. if (kind == LLVMStructTypeKind || kind == LLVMArrayTypeKind) {
  357. i64 sz = lb_sizeof(t);
  358. switch (sz) {
  359. case 1:
  360. case 2:
  361. case 4:
  362. case 8:
  363. args[i] = lb_arg_type_direct(t, LLVMIntTypeInContext(c, 8*cast(unsigned)sz), nullptr, nullptr);
  364. break;
  365. default:
  366. args[i] = lb_arg_type_indirect(t, nullptr);
  367. break;
  368. }
  369. } else {
  370. args[i] = lbAbi386::non_struct(c, t, false);
  371. }
  372. }
  373. return args;
  374. }
  375. };
  376. // NOTE(bill): I hate `namespace` in C++ but this is just because I don't want to prefix everything
  377. namespace lbAbiAmd64SysV {
  378. enum RegClass {
  379. RegClass_NoClass,
  380. RegClass_Int,
  381. RegClass_SSEFs,
  382. RegClass_SSEFv,
  383. RegClass_SSEDs,
  384. RegClass_SSEDv,
  385. RegClass_SSEInt8,
  386. RegClass_SSEInt16,
  387. RegClass_SSEInt32,
  388. RegClass_SSEInt64,
  389. RegClass_SSEUp,
  390. RegClass_X87,
  391. RegClass_X87Up,
  392. RegClass_ComplexX87,
  393. RegClass_Memory,
  394. };
  395. bool is_sse(RegClass reg_class) {
  396. switch (reg_class) {
  397. case RegClass_SSEFs:
  398. case RegClass_SSEFv:
  399. case RegClass_SSEDs:
  400. case RegClass_SSEDv:
  401. return true;
  402. case RegClass_SSEInt8:
  403. case RegClass_SSEInt16:
  404. case RegClass_SSEInt32:
  405. case RegClass_SSEInt64:
  406. return true;
  407. }
  408. return false;
  409. }
  410. void all_mem(Array<RegClass> *cs) {
  411. for_array(i, *cs) {
  412. (*cs)[i] = RegClass_Memory;
  413. }
  414. }
  415. enum Amd64TypeAttributeKind {
  416. Amd64TypeAttribute_None,
  417. Amd64TypeAttribute_ByVal,
  418. Amd64TypeAttribute_StructRect,
  419. };
  420. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
  421. void classify_with(LLVMTypeRef t, Array<RegClass> *cls, i64 ix, i64 off);
  422. void fixup(LLVMTypeRef t, Array<RegClass> *cls);
  423. lbArgType amd64_type(LLVMContextRef c, LLVMTypeRef type, Amd64TypeAttributeKind attribute_kind, ProcCallingConvention calling_convention);
  424. Array<RegClass> classify(LLVMTypeRef t);
  425. LLVMTypeRef llreg(LLVMContextRef c, Array<RegClass> const &reg_classes);
  426. LB_ABI_INFO(abi_info) {
  427. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  428. ft->ctx = c;
  429. ft->calling_convention = calling_convention;
  430. ft->args = array_make<lbArgType>(heap_allocator(), arg_count);
  431. for (unsigned i = 0; i < arg_count; i++) {
  432. ft->args[i] = amd64_type(c, arg_types[i], Amd64TypeAttribute_ByVal, calling_convention);
  433. }
  434. if (return_is_defined) {
  435. ft->ret = amd64_type(c, return_type, Amd64TypeAttribute_StructRect, calling_convention);
  436. } else {
  437. ft->ret = lb_arg_type_direct(LLVMVoidTypeInContext(c));
  438. }
  439. return ft;
  440. }
  441. bool is_mem_cls(Array<RegClass> const &cls, Amd64TypeAttributeKind attribute_kind) {
  442. if (attribute_kind == Amd64TypeAttribute_ByVal) {
  443. if (cls.count == 0) {
  444. return false;
  445. }
  446. auto first = cls[0];
  447. return first == RegClass_Memory || first == RegClass_X87 || first == RegClass_ComplexX87;
  448. } else if (attribute_kind == Amd64TypeAttribute_StructRect) {
  449. if (cls.count == 0) {
  450. return false;
  451. }
  452. return cls[0] == RegClass_Memory;
  453. }
  454. return false;
  455. }
  456. bool is_register(LLVMTypeRef type) {
  457. LLVMTypeKind kind = LLVMGetTypeKind(type);
  458. i64 sz = lb_sizeof(type);
  459. if (sz == 0) {
  460. return false;
  461. }
  462. switch (kind) {
  463. case LLVMIntegerTypeKind:
  464. case LLVMHalfTypeKind:
  465. case LLVMFloatTypeKind:
  466. case LLVMDoubleTypeKind:
  467. case LLVMPointerTypeKind:
  468. return true;
  469. }
  470. return false;
  471. }
  472. lbArgType amd64_type(LLVMContextRef c, LLVMTypeRef type, Amd64TypeAttributeKind attribute_kind, ProcCallingConvention calling_convention) {
  473. if (is_register(type)) {
  474. LLVMAttributeRef attribute = nullptr;
  475. if (type == LLVMInt1TypeInContext(c)) {
  476. attribute = lb_create_enum_attribute(c, "zeroext");
  477. }
  478. return lb_arg_type_direct(type, nullptr, nullptr, attribute);
  479. }
  480. auto cls = classify(type);
  481. if (is_mem_cls(cls, attribute_kind)) {
  482. LLVMAttributeRef attribute = nullptr;
  483. if (attribute_kind == Amd64TypeAttribute_ByVal) {
  484. if (!is_calling_convention_odin(calling_convention)) {
  485. return lb_arg_type_indirect_byval(c, type);
  486. }
  487. attribute = nullptr;
  488. } else if (attribute_kind == Amd64TypeAttribute_StructRect) {
  489. attribute = lb_create_enum_attribute_with_type(c, "sret", type);
  490. }
  491. return lb_arg_type_indirect(type, attribute);
  492. } else {
  493. return lb_arg_type_direct(type, llreg(c, cls), nullptr, nullptr);
  494. }
  495. }
  496. lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type) {
  497. LLVMAttributeRef attr = nullptr;
  498. LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
  499. if (type == i1) {
  500. attr = lb_create_enum_attribute(c, "zeroext");
  501. }
  502. return lb_arg_type_direct(type, nullptr, nullptr, attr);
  503. }
  504. Array<RegClass> classify(LLVMTypeRef t) {
  505. i64 sz = lb_sizeof(t);
  506. i64 words = (sz + 7)/8;
  507. auto reg_classes = array_make<RegClass>(heap_allocator(), cast(isize)words);
  508. if (words > 4) {
  509. all_mem(&reg_classes);
  510. } else {
  511. classify_with(t, &reg_classes, 0, 0);
  512. fixup(t, &reg_classes);
  513. }
  514. return reg_classes;
  515. }
  516. void unify(Array<RegClass> *cls, i64 i, RegClass const newv) {
  517. RegClass const oldv = (*cls)[cast(isize)i];
  518. if (oldv == newv) {
  519. return;
  520. }
  521. RegClass to_write = newv;
  522. if (oldv == RegClass_NoClass) {
  523. to_write = newv;
  524. } else if (newv == RegClass_NoClass) {
  525. return;
  526. } else if (oldv == RegClass_Memory || newv == RegClass_Memory) {
  527. to_write = RegClass_Memory;
  528. } else if (oldv == RegClass_Int || newv == RegClass_Int) {
  529. to_write = RegClass_Int;
  530. } else if (oldv == RegClass_X87 || oldv == RegClass_X87Up || oldv == RegClass_ComplexX87) {
  531. to_write = RegClass_Memory;
  532. } else if (newv == RegClass_X87 || newv == RegClass_X87Up || newv == RegClass_ComplexX87) {
  533. to_write = RegClass_Memory;
  534. } else if (newv == RegClass_SSEUp) {
  535. switch (oldv) {
  536. case RegClass_SSEFv:
  537. case RegClass_SSEFs:
  538. case RegClass_SSEDv:
  539. case RegClass_SSEDs:
  540. case RegClass_SSEInt8:
  541. case RegClass_SSEInt16:
  542. case RegClass_SSEInt32:
  543. case RegClass_SSEInt64:
  544. return;
  545. }
  546. }
  547. (*cls)[cast(isize)i] = to_write;
  548. }
  549. void fixup(LLVMTypeRef t, Array<RegClass> *cls) {
  550. i64 i = 0;
  551. i64 e = cls->count;
  552. if (e > 2 && (lb_is_type_kind(t, LLVMStructTypeKind) ||
  553. lb_is_type_kind(t, LLVMArrayTypeKind) ||
  554. lb_is_type_kind(t, LLVMVectorTypeKind))) {
  555. RegClass &oldv = (*cls)[cast(isize)i];
  556. if (is_sse(oldv)) {
  557. for (i++; i < e; i++) {
  558. if (oldv != RegClass_SSEUp) {
  559. all_mem(cls);
  560. return;
  561. }
  562. }
  563. } else {
  564. all_mem(cls);
  565. return;
  566. }
  567. } else {
  568. while (i < e) {
  569. RegClass &oldv = (*cls)[cast(isize)i];
  570. if (oldv == RegClass_Memory) {
  571. all_mem(cls);
  572. return;
  573. } else if (oldv == RegClass_X87Up) {
  574. // NOTE(bill): Darwin
  575. all_mem(cls);
  576. return;
  577. } else if (oldv == RegClass_SSEUp) {
  578. oldv = RegClass_SSEDv;
  579. } else if (is_sse(oldv)) {
  580. i++;
  581. while (i != e && oldv == RegClass_SSEUp) {
  582. i++;
  583. }
  584. } else if (oldv == RegClass_X87) {
  585. i++;
  586. while (i != e && oldv == RegClass_X87Up) {
  587. i++;
  588. }
  589. } else {
  590. i++;
  591. }
  592. }
  593. }
  594. }
  595. unsigned llvec_len(Array<RegClass> const &reg_classes, isize offset) {
  596. unsigned len = 1;
  597. for (isize i = offset; i < reg_classes.count; i++) {
  598. if (reg_classes[i] != RegClass_SSEUp) {
  599. break;
  600. }
  601. len++;
  602. }
  603. return len;
  604. }
  605. LLVMTypeRef llreg(LLVMContextRef c, Array<RegClass> const &reg_classes) {
  606. auto types = array_make<LLVMTypeRef>(heap_allocator(), 0, reg_classes.count);
  607. for (isize i = 0; i < reg_classes.count; /**/) {
  608. RegClass reg_class = reg_classes[i];
  609. switch (reg_class) {
  610. case RegClass_Int:
  611. array_add(&types, LLVMIntTypeInContext(c, 64));
  612. break;
  613. case RegClass_SSEFv:
  614. case RegClass_SSEDv:
  615. case RegClass_SSEInt8:
  616. case RegClass_SSEInt16:
  617. case RegClass_SSEInt32:
  618. case RegClass_SSEInt64:
  619. {
  620. unsigned elems_per_word = 0;
  621. LLVMTypeRef elem_type = nullptr;
  622. switch (reg_class) {
  623. case RegClass_SSEFv:
  624. elems_per_word = 2;
  625. elem_type = LLVMFloatTypeInContext(c);
  626. break;
  627. case RegClass_SSEDv:
  628. elems_per_word = 1;
  629. elem_type = LLVMDoubleTypeInContext(c);
  630. break;
  631. case RegClass_SSEInt8:
  632. elems_per_word = 64/8;
  633. elem_type = LLVMIntTypeInContext(c, 8);
  634. break;
  635. case RegClass_SSEInt16:
  636. elems_per_word = 64/16;
  637. elem_type = LLVMIntTypeInContext(c, 16);
  638. break;
  639. case RegClass_SSEInt32:
  640. elems_per_word = 64/32;
  641. elem_type = LLVMIntTypeInContext(c, 32);
  642. break;
  643. case RegClass_SSEInt64:
  644. elems_per_word = 64/64;
  645. elem_type = LLVMIntTypeInContext(c, 64);
  646. break;
  647. }
  648. unsigned vec_len = llvec_len(reg_classes, i+1);
  649. LLVMTypeRef vec_type = LLVMVectorType(elem_type, vec_len * elems_per_word);
  650. array_add(&types, vec_type);
  651. i += vec_len;
  652. continue;
  653. }
  654. break;
  655. case RegClass_SSEFs:
  656. array_add(&types, LLVMFloatTypeInContext(c));
  657. break;
  658. case RegClass_SSEDs:
  659. array_add(&types, LLVMDoubleTypeInContext(c));
  660. break;
  661. default:
  662. GB_PANIC("Unhandled RegClass");
  663. }
  664. i += 1;
  665. }
  666. if (types.count == 1) {
  667. return types[0];
  668. }
  669. return LLVMStructTypeInContext(c, types.data, cast(unsigned)types.count, false);
  670. }
  671. void classify_with(LLVMTypeRef t, Array<RegClass> *cls, i64 ix, i64 off) {
  672. i64 t_align = lb_alignof(t);
  673. i64 t_size = lb_sizeof(t);
  674. i64 misalign = off % t_align;
  675. if (misalign != 0) {
  676. i64 e = (off + t_size + 7) / 8;
  677. for (i64 i = off / 8; i < e; i++) {
  678. unify(cls, ix+i, RegClass_Memory);
  679. }
  680. return;
  681. }
  682. switch (LLVMGetTypeKind(t)) {
  683. case LLVMIntegerTypeKind:
  684. case LLVMPointerTypeKind:
  685. case LLVMHalfTypeKind:
  686. unify(cls, ix + off/8, RegClass_Int);
  687. break;
  688. case LLVMFloatTypeKind:
  689. unify(cls, ix + off/8, (off%8 == 4) ? RegClass_SSEFv : RegClass_SSEFs);
  690. break;
  691. case LLVMDoubleTypeKind:
  692. unify(cls, ix + off/8, RegClass_SSEDs);
  693. break;
  694. case LLVMStructTypeKind:
  695. {
  696. LLVMBool packed = LLVMIsPackedStruct(t);
  697. unsigned field_count = LLVMCountStructElementTypes(t);
  698. i64 field_off = off;
  699. for (unsigned field_index = 0; field_index < field_count; field_index++) {
  700. LLVMTypeRef field_type = LLVMStructGetTypeAtIndex(t, field_index);
  701. if (!packed) {
  702. field_off = llvm_align_formula(field_off, lb_alignof(field_type));
  703. }
  704. classify_with(field_type, cls, ix, field_off);
  705. field_off += lb_sizeof(field_type);
  706. }
  707. }
  708. break;
  709. case LLVMArrayTypeKind:
  710. {
  711. i64 len = LLVMGetArrayLength(t);
  712. LLVMTypeRef elem = LLVMGetElementType(t);
  713. i64 elem_sz = lb_sizeof(elem);
  714. for (i64 i = 0; i < len; i++) {
  715. classify_with(elem, cls, ix, off + i*elem_sz);
  716. }
  717. }
  718. break;
  719. case LLVMVectorTypeKind:
  720. {
  721. i64 len = LLVMGetVectorSize(t);
  722. LLVMTypeRef elem = LLVMGetElementType(t);
  723. i64 elem_sz = lb_sizeof(elem);
  724. LLVMTypeKind elem_kind = LLVMGetTypeKind(elem);
  725. RegClass reg = RegClass_NoClass;
  726. switch (elem_kind) {
  727. case LLVMIntegerTypeKind:
  728. case LLVMHalfTypeKind:
  729. switch (LLVMGetIntTypeWidth(elem)) {
  730. case 8: reg = RegClass_SSEInt8;
  731. case 16: reg = RegClass_SSEInt16;
  732. case 32: reg = RegClass_SSEInt32;
  733. case 64: reg = RegClass_SSEInt64;
  734. default:
  735. GB_PANIC("Unhandled integer width for vector type");
  736. }
  737. break;
  738. case LLVMFloatTypeKind:
  739. reg = RegClass_SSEFv;
  740. break;
  741. case LLVMDoubleTypeKind:
  742. reg = RegClass_SSEDv;
  743. break;
  744. default:
  745. GB_PANIC("Unhandled vector element type");
  746. }
  747. for (i64 i = 0; i < len; i++) {
  748. unify(cls, ix + (off + i*elem_sz)/8, reg);
  749. // NOTE(bill): Everything after the first one is the upper
  750. // half of a register
  751. reg = RegClass_SSEUp;
  752. }
  753. }
  754. break;
  755. default:
  756. GB_PANIC("Unhandled type");
  757. break;
  758. }
  759. }
  760. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined) {
  761. if (!return_is_defined) {
  762. return lb_arg_type_direct(LLVMVoidTypeInContext(c));
  763. } else if (lb_is_type_kind(return_type, LLVMStructTypeKind)) {
  764. i64 sz = lb_sizeof(return_type);
  765. switch (sz) {
  766. case 1: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 8), nullptr, nullptr);
  767. case 2: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 16), nullptr, nullptr);
  768. case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
  769. case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
  770. }
  771. LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
  772. return lb_arg_type_indirect(return_type, attr);
  773. } else if (build_context.metrics.os == TargetOs_windows && lb_is_type_kind(return_type, LLVMIntegerTypeKind) && lb_sizeof(return_type) == 16) {
  774. return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 128), nullptr, nullptr);
  775. }
  776. return non_struct(c, return_type);
  777. }
  778. };
  779. namespace lbAbiArm64 {
  780. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
  781. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
  782. bool is_homogenous_aggregate(LLVMContextRef c, LLVMTypeRef type, LLVMTypeRef *base_type_, unsigned *member_count_);
  783. LB_ABI_INFO(abi_info) {
  784. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  785. ft->ctx = c;
  786. ft->ret = compute_return_type(c, return_type, return_is_defined);
  787. ft -> args = compute_arg_types(c, arg_types, arg_count);
  788. ft->calling_convention = calling_convention;
  789. return ft;
  790. }
  791. bool is_register(LLVMTypeRef type) {
  792. LLVMTypeKind kind = LLVMGetTypeKind(type);
  793. switch (kind) {
  794. case LLVMIntegerTypeKind:
  795. case LLVMHalfTypeKind:
  796. case LLVMFloatTypeKind:
  797. case LLVMDoubleTypeKind:
  798. case LLVMPointerTypeKind:
  799. return true;
  800. }
  801. return false;
  802. }
  803. lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type) {
  804. LLVMAttributeRef attr = nullptr;
  805. LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
  806. if (type == i1) {
  807. attr = lb_create_enum_attribute(c, "zeroext");
  808. }
  809. return lb_arg_type_direct(type, nullptr, nullptr, attr);
  810. }
  811. bool is_homogenous_array(LLVMContextRef c, LLVMTypeRef type, LLVMTypeRef *base_type_, unsigned *member_count_) {
  812. GB_ASSERT(lb_is_type_kind(type, LLVMArrayTypeKind));
  813. unsigned len = LLVMGetArrayLength(type);
  814. if (len == 0) {
  815. return false;
  816. }
  817. LLVMTypeRef elem = LLVMGetElementType(type);
  818. LLVMTypeRef base_type = nullptr;
  819. unsigned member_count = 0;
  820. if (is_homogenous_aggregate(c, elem, &base_type, &member_count)) {
  821. if (base_type_) *base_type_ = base_type;
  822. if (member_count_) *member_count_ = member_count * len;
  823. return true;
  824. }
  825. return false;
  826. }
  827. bool is_homogenous_struct(LLVMContextRef c, LLVMTypeRef type, LLVMTypeRef *base_type_, unsigned *member_count_) {
  828. GB_ASSERT(lb_is_type_kind(type, LLVMStructTypeKind));
  829. unsigned elem_count = LLVMCountStructElementTypes(type);
  830. if (elem_count == 0) {
  831. return false;
  832. }
  833. LLVMTypeRef base_type = nullptr;
  834. unsigned member_count = 0;
  835. for (unsigned i = 0; i < elem_count; i++) {
  836. LLVMTypeRef field_type = nullptr;
  837. unsigned field_member_count = 0;
  838. LLVMTypeRef elem = LLVMStructGetTypeAtIndex(type, i);
  839. if (!is_homogenous_aggregate(c, elem, &field_type, &field_member_count)) {
  840. return false;
  841. }
  842. if (base_type == nullptr) {
  843. base_type = field_type;
  844. member_count = field_member_count;
  845. } else {
  846. if (base_type != field_type) {
  847. return false;
  848. }
  849. member_count += field_member_count;
  850. }
  851. }
  852. if (base_type == nullptr) {
  853. return false;
  854. }
  855. if (lb_sizeof(type) == lb_sizeof(base_type) * member_count) {
  856. if (base_type_) *base_type_ = base_type;
  857. if (member_count_) *member_count_ = member_count;
  858. return true;
  859. }
  860. return false;
  861. }
  862. bool is_homogenous_aggregate(LLVMContextRef c, LLVMTypeRef type, LLVMTypeRef *base_type_, unsigned *member_count_) {
  863. LLVMTypeKind kind = LLVMGetTypeKind(type);
  864. switch (kind) {
  865. case LLVMFloatTypeKind:
  866. case LLVMDoubleTypeKind:
  867. if (base_type_) *base_type_ = type;
  868. if (member_count_) *member_count_ = 1;
  869. return true;
  870. case LLVMArrayTypeKind:
  871. return is_homogenous_array(c, type, base_type_, member_count_);
  872. case LLVMStructTypeKind:
  873. return is_homogenous_struct(c, type, base_type_, member_count_);
  874. }
  875. return false;
  876. }
  877. unsigned is_homogenous_aggregate_small_enough(LLVMTypeRef *base_type_, unsigned member_count_) {
  878. return (member_count_ <= 4);
  879. }
  880. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef type, bool return_is_defined) {
  881. LLVMTypeRef homo_base_type = {};
  882. unsigned homo_member_count = 0;
  883. if (!return_is_defined) {
  884. return lb_arg_type_direct(LLVMVoidTypeInContext(c));
  885. } else if (is_register(type)) {
  886. return non_struct(c, type);
  887. } else if (is_homogenous_aggregate(c, type, &homo_base_type, &homo_member_count)) {
  888. if(is_homogenous_aggregate_small_enough(&homo_base_type, homo_member_count)) {
  889. return lb_arg_type_direct(type, LLVMArrayType(homo_base_type, homo_member_count), nullptr, nullptr);
  890. } else {
  891. //TODO(Platin): do i need to create stuff that can handle the diffrent return type?
  892. // else this needs a fix in llvm_backend_proc as we would need to cast it to the correct array type
  893. //LLVMTypeRef array_type = LLVMArrayType(homo_base_type, homo_member_count);
  894. LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", type);
  895. return lb_arg_type_indirect(type, attr);
  896. }
  897. } else {
  898. i64 size = lb_sizeof(type);
  899. if (size <= 16) {
  900. LLVMTypeRef cast_type = nullptr;
  901. if (size <= 1) {
  902. cast_type = LLVMInt8TypeInContext(c);
  903. } else if (size <= 2) {
  904. cast_type = LLVMInt16TypeInContext(c);
  905. } else if (size <= 4) {
  906. cast_type = LLVMInt32TypeInContext(c);
  907. } else if (size <= 8) {
  908. cast_type = LLVMInt64TypeInContext(c);
  909. } else {
  910. unsigned count = cast(unsigned)((size+7)/8);
  911. cast_type = LLVMArrayType(LLVMInt64TypeInContext(c), count);
  912. }
  913. return lb_arg_type_direct(type, cast_type, nullptr, nullptr);
  914. } else {
  915. LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", type);
  916. return lb_arg_type_indirect(type, attr);
  917. }
  918. }
  919. }
  920. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
  921. auto args = array_make<lbArgType>(heap_allocator(), arg_count);
  922. for (unsigned i = 0; i < arg_count; i++) {
  923. LLVMTypeRef type = arg_types[i];
  924. LLVMTypeRef homo_base_type = {};
  925. unsigned homo_member_count = 0;
  926. if (is_register(type)) {
  927. args[i] = non_struct(c, type);
  928. } else if (is_homogenous_aggregate(c, type, &homo_base_type, &homo_member_count)) {
  929. args[i] = lb_arg_type_direct(type, LLVMArrayType(homo_base_type, homo_member_count), nullptr, nullptr);
  930. } else {
  931. i64 size = lb_sizeof(type);
  932. if (size <= 16) {
  933. LLVMTypeRef cast_type = nullptr;
  934. if (size <= 1) {
  935. cast_type = LLVMIntTypeInContext(c, 8);
  936. } else if (size <= 2) {
  937. cast_type = LLVMIntTypeInContext(c, 16);
  938. } else if (size <= 4) {
  939. cast_type = LLVMIntTypeInContext(c, 32);
  940. } else if (size <= 8) {
  941. cast_type = LLVMIntTypeInContext(c, 64);
  942. } else {
  943. unsigned count = cast(unsigned)((size+7)/8);
  944. cast_type = LLVMArrayType(LLVMIntTypeInContext(c, 64), count);
  945. }
  946. args[i] = lb_arg_type_direct(type, cast_type, nullptr, nullptr);
  947. } else {
  948. args[i] = lb_arg_type_indirect(type, nullptr);
  949. }
  950. }
  951. }
  952. return args;
  953. }
  954. }
  955. namespace lbAbiWasm {
  956. /*
  957. NOTE(bill): All of this is custom since there is not an "official"
  958. ABI definition for WASM, especially for Odin.
  959. The approach taken optimizes for passing things in multiple
  960. registers/arguments if possible rather than by pointer.
  961. */
  962. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
  963. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
  964. enum {MAX_DIRECT_STRUCT_SIZE = 32};
  965. LB_ABI_INFO(abi_info) {
  966. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  967. ft->ctx = c;
  968. ft->args = compute_arg_types(c, arg_types, arg_count);
  969. ft->ret = compute_return_type(c, return_type, return_is_defined);
  970. ft->calling_convention = calling_convention;
  971. return ft;
  972. }
  973. lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type, bool is_return) {
  974. if (!is_return && type == LLVMIntTypeInContext(c, 128)) {
  975. LLVMTypeRef cast_type = LLVMVectorType(LLVMInt64TypeInContext(c), 2);
  976. return lb_arg_type_direct(type, cast_type, nullptr, nullptr);
  977. }
  978. if (!is_return && lb_sizeof(type) > 8) {
  979. return lb_arg_type_indirect(type, nullptr);
  980. }
  981. LLVMAttributeRef attr = nullptr;
  982. LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
  983. if (type == i1) {
  984. attr = lb_create_enum_attribute(c, "zeroext");
  985. }
  986. return lb_arg_type_direct(type, nullptr, nullptr, attr);
  987. }
  988. bool is_basic_register_type(LLVMTypeRef type) {
  989. switch (LLVMGetTypeKind(type)) {
  990. case LLVMHalfTypeKind:
  991. case LLVMFloatTypeKind:
  992. case LLVMDoubleTypeKind:
  993. case LLVMPointerTypeKind:
  994. return true;
  995. case LLVMIntegerTypeKind:
  996. return lb_sizeof(type) <= 8;
  997. }
  998. return false;
  999. }
  1000. bool type_can_be_direct(LLVMTypeRef type) {
  1001. LLVMTypeKind kind = LLVMGetTypeKind(type);
  1002. i64 sz = lb_sizeof(type);
  1003. if (sz == 0) {
  1004. return false;
  1005. }
  1006. if (sz <= MAX_DIRECT_STRUCT_SIZE) {
  1007. if (kind == LLVMArrayTypeKind) {
  1008. if (is_basic_register_type(LLVMGetElementType(type))) {
  1009. return true;
  1010. }
  1011. } else if (kind == LLVMStructTypeKind) {
  1012. unsigned count = LLVMCountStructElementTypes(type);
  1013. for (unsigned i = 0; i < count; i++) {
  1014. LLVMTypeRef elem = LLVMStructGetTypeAtIndex(type, i);
  1015. if (!is_basic_register_type(elem)) {
  1016. return false;
  1017. }
  1018. }
  1019. return true;
  1020. }
  1021. }
  1022. return false;
  1023. }
  1024. lbArgType is_struct(LLVMContextRef c, LLVMTypeRef type) {
  1025. LLVMTypeKind kind = LLVMGetTypeKind(type);
  1026. GB_ASSERT(kind == LLVMArrayTypeKind || kind == LLVMStructTypeKind);
  1027. i64 sz = lb_sizeof(type);
  1028. if (sz == 0) {
  1029. return lb_arg_type_ignore(type);
  1030. }
  1031. if (type_can_be_direct(type)) {
  1032. return lb_arg_type_direct(type);
  1033. }
  1034. return lb_arg_type_indirect(type, nullptr);
  1035. }
  1036. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
  1037. auto args = array_make<lbArgType>(heap_allocator(), arg_count);
  1038. for (unsigned i = 0; i < arg_count; i++) {
  1039. LLVMTypeRef t = arg_types[i];
  1040. LLVMTypeKind kind = LLVMGetTypeKind(t);
  1041. if (kind == LLVMStructTypeKind || kind == LLVMArrayTypeKind) {
  1042. args[i] = is_struct(c, t);
  1043. } else {
  1044. args[i] = non_struct(c, t, false);
  1045. }
  1046. }
  1047. return args;
  1048. }
  1049. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined) {
  1050. if (!return_is_defined) {
  1051. return lb_arg_type_direct(LLVMVoidTypeInContext(c));
  1052. } else if (lb_is_type_kind(return_type, LLVMStructTypeKind) || lb_is_type_kind(return_type, LLVMArrayTypeKind)) {
  1053. if (type_can_be_direct(return_type)) {
  1054. return lb_arg_type_direct(return_type);
  1055. }
  1056. i64 sz = lb_sizeof(return_type);
  1057. switch (sz) {
  1058. case 1: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 8), nullptr, nullptr);
  1059. case 2: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 16), nullptr, nullptr);
  1060. case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
  1061. case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
  1062. }
  1063. LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
  1064. return lb_arg_type_indirect(return_type, attr);
  1065. }
  1066. return non_struct(c, return_type, true);
  1067. }
  1068. }
  1069. namespace lbAbiArm32 {
  1070. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count, ProcCallingConvention calling_convention);
  1071. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
  1072. LB_ABI_INFO(abi_info) {
  1073. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  1074. ft->ctx = c;
  1075. ft->args = compute_arg_types(c, arg_types, arg_count, calling_convention);
  1076. ft->ret = compute_return_type(c, return_type, return_is_defined);
  1077. ft->calling_convention = calling_convention;
  1078. return ft;
  1079. }
  1080. bool is_register(LLVMTypeRef type, bool is_return) {
  1081. LLVMTypeKind kind = LLVMGetTypeKind(type);
  1082. switch (kind) {
  1083. case LLVMHalfTypeKind:
  1084. case LLVMFloatTypeKind:
  1085. case LLVMDoubleTypeKind:
  1086. return true;
  1087. case LLVMIntegerTypeKind:
  1088. return lb_sizeof(type) <= 8;
  1089. case LLVMFunctionTypeKind:
  1090. return true;
  1091. case LLVMPointerTypeKind:
  1092. return true;
  1093. case LLVMVectorTypeKind:
  1094. return true;
  1095. }
  1096. return false;
  1097. }
  1098. lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type, bool is_return) {
  1099. LLVMAttributeRef attr = nullptr;
  1100. LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
  1101. if (type == i1) {
  1102. attr = lb_create_enum_attribute(c, "zeroext");
  1103. }
  1104. return lb_arg_type_direct(type, nullptr, nullptr, attr);
  1105. }
  1106. Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count, ProcCallingConvention calling_convention) {
  1107. auto args = array_make<lbArgType>(heap_allocator(), arg_count);
  1108. for (unsigned i = 0; i < arg_count; i++) {
  1109. LLVMTypeRef t = arg_types[i];
  1110. if (is_register(t, false)) {
  1111. args[i] = non_struct(c, t, false);
  1112. } else {
  1113. i64 sz = lb_sizeof(t);
  1114. i64 a = lb_alignof(t);
  1115. if (is_calling_convention_odin(calling_convention) && sz > 8) {
  1116. // Minor change to improve performance using the Odin calling conventions
  1117. args[i] = lb_arg_type_indirect(t, nullptr);
  1118. } else if (a <= 4) {
  1119. unsigned n = cast(unsigned)((sz + 3) / 4);
  1120. args[i] = lb_arg_type_direct(LLVMArrayType(LLVMIntTypeInContext(c, 32), n));
  1121. } else {
  1122. unsigned n = cast(unsigned)((sz + 7) / 8);
  1123. args[i] = lb_arg_type_direct(LLVMArrayType(LLVMIntTypeInContext(c, 64), n));
  1124. }
  1125. }
  1126. }
  1127. return args;
  1128. }
  1129. lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined) {
  1130. if (!return_is_defined) {
  1131. return lb_arg_type_direct(LLVMVoidTypeInContext(c));
  1132. } else if (!is_register(return_type, true)) {
  1133. switch (lb_sizeof(return_type)) {
  1134. case 1: return lb_arg_type_direct(LLVMIntTypeInContext(c, 8), return_type, nullptr, nullptr);
  1135. case 2: return lb_arg_type_direct(LLVMIntTypeInContext(c, 16), return_type, nullptr, nullptr);
  1136. case 3: case 4: return lb_arg_type_direct(LLVMIntTypeInContext(c, 32), return_type, nullptr, nullptr);
  1137. }
  1138. LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", return_type);
  1139. return lb_arg_type_indirect(return_type, attr);
  1140. }
  1141. return non_struct(c, return_type, true);
  1142. }
  1143. };
  1144. LB_ABI_INFO(lb_get_abi_info) {
  1145. switch (calling_convention) {
  1146. case ProcCC_None:
  1147. case ProcCC_InlineAsm:
  1148. {
  1149. lbFunctionType *ft = gb_alloc_item(permanent_allocator(), lbFunctionType);
  1150. ft->ctx = c;
  1151. ft->args = array_make<lbArgType>(heap_allocator(), arg_count);
  1152. for (unsigned i = 0; i < arg_count; i++) {
  1153. ft->args[i] = lb_arg_type_direct(arg_types[i]);
  1154. }
  1155. if (return_is_defined) {
  1156. ft->ret = lb_arg_type_direct(return_type);
  1157. } else {
  1158. ft->ret = lb_arg_type_direct(LLVMVoidTypeInContext(c));
  1159. }
  1160. ft->calling_convention = calling_convention;
  1161. return ft;
  1162. }
  1163. case ProcCC_Win64:
  1164. GB_ASSERT(build_context.metrics.arch == TargetArch_amd64);
  1165. return lbAbiAmd64Win64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1166. case ProcCC_SysV:
  1167. GB_ASSERT(build_context.metrics.arch == TargetArch_amd64);
  1168. return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1169. }
  1170. switch (build_context.metrics.arch) {
  1171. case TargetArch_amd64:
  1172. if (build_context.metrics.os == TargetOs_windows || build_context.metrics.abi == TargetABI_Win64) {
  1173. return lbAbiAmd64Win64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1174. } else if (build_context.metrics.abi == TargetABI_SysV) {
  1175. return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1176. } else {
  1177. return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1178. }
  1179. case TargetArch_i386:
  1180. return lbAbi386::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1181. case TargetArch_arm32:
  1182. return lbAbiArm32::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1183. case TargetArch_arm64:
  1184. return lbAbiArm64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1185. case TargetArch_wasm32:
  1186. case TargetArch_wasm64:
  1187. return lbAbiWasm::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
  1188. }
  1189. GB_PANIC("Unsupported ABI");
  1190. return {};
  1191. }