llvm_backend_type.cpp 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_not_found=true) {
  2. auto *set = &info->minimum_dependency_type_info_set;
  3. isize index = type_info_index(info, type, err_on_not_found);
  4. if (index >= 0) {
  5. auto *found = map_get(set, index);
  6. if (found) {
  7. GB_ASSERT(*found >= 0);
  8. return *found + 1;
  9. }
  10. }
  11. if (err_on_not_found) {
  12. gb_printf_err("NOT FOUND lb_type_info_index:\n\t%s\n\t@ index %td\n\tmax count: %u\nFound:\n", type_to_string(type), index, set->count);
  13. for (auto const &entry : *set) {
  14. isize type_info_index = entry.key;
  15. gb_printf_err("\t%s\n", type_to_string(info->type_info_types[type_info_index]));
  16. }
  17. GB_PANIC("NOT FOUND");
  18. }
  19. return -1;
  20. }
  21. gb_internal u64 lb_typeid_kind(lbModule *m, Type *type, u64 id=0) {
  22. GB_ASSERT(!build_context.no_rtti);
  23. type = default_type(type);
  24. if (id == 0) {
  25. id = cast(u64)lb_type_info_index(m->info, type);
  26. }
  27. u64 kind = Typeid_Invalid;
  28. Type *bt = base_type(type);
  29. TypeKind tk = bt->kind;
  30. switch (tk) {
  31. case Type_Basic: {
  32. u32 flags = bt->Basic.flags;
  33. if (flags & BasicFlag_Boolean) kind = Typeid_Boolean;
  34. if (flags & BasicFlag_Integer) kind = Typeid_Integer;
  35. if (flags & BasicFlag_Unsigned) kind = Typeid_Integer;
  36. if (flags & BasicFlag_Float) kind = Typeid_Float;
  37. if (flags & BasicFlag_Complex) kind = Typeid_Complex;
  38. if (flags & BasicFlag_Pointer) kind = Typeid_Pointer;
  39. if (flags & BasicFlag_String) kind = Typeid_String;
  40. if (flags & BasicFlag_Rune) kind = Typeid_Rune;
  41. } break;
  42. case Type_Pointer: kind = Typeid_Pointer; break;
  43. case Type_MultiPointer: kind = Typeid_Multi_Pointer; break;
  44. case Type_Array: kind = Typeid_Array; break;
  45. case Type_Matrix: kind = Typeid_Matrix; break;
  46. case Type_EnumeratedArray: kind = Typeid_Enumerated_Array; break;
  47. case Type_Slice: kind = Typeid_Slice; break;
  48. case Type_DynamicArray: kind = Typeid_Dynamic_Array; break;
  49. case Type_Map: kind = Typeid_Map; break;
  50. case Type_Struct: kind = Typeid_Struct; break;
  51. case Type_Enum: kind = Typeid_Enum; break;
  52. case Type_Union: kind = Typeid_Union; break;
  53. case Type_Tuple: kind = Typeid_Tuple; break;
  54. case Type_Proc: kind = Typeid_Procedure; break;
  55. case Type_BitSet: kind = Typeid_Bit_Set; break;
  56. case Type_SimdVector: kind = Typeid_Simd_Vector; break;
  57. case Type_RelativePointer: kind = Typeid_Relative_Pointer; break;
  58. case Type_RelativeMultiPointer: kind = Typeid_Relative_Multi_Pointer; break;
  59. case Type_SoaPointer: kind = Typeid_SoaPointer; break;
  60. case Type_BitField: kind = Typeid_Bit_Field; break;
  61. }
  62. return kind;
  63. }
  64. gb_internal lbValue lb_typeid(lbModule *m, Type *type) {
  65. GB_ASSERT(!build_context.no_rtti);
  66. type = default_type(type);
  67. u64 id = cast(u64)lb_type_info_index(m->info, type);
  68. GB_ASSERT(id >= 0);
  69. u64 kind = lb_typeid_kind(m, type, id);
  70. u64 named = is_type_named(type) && type->kind != Type_Basic;
  71. u64 special = 0;
  72. u64 reserved = 0;
  73. if (is_type_cstring(type)) {
  74. special = 1;
  75. } else if (is_type_integer(type) && !is_type_unsigned(type)) {
  76. special = 1;
  77. }
  78. u64 data = 0;
  79. if (build_context.ptr_size == 4) {
  80. GB_ASSERT(id <= (1u<<24u));
  81. data |= (id &~ (1u<<24)) << 0u; // index
  82. data |= (kind &~ (1u<<5)) << 24u; // kind
  83. data |= (named &~ (1u<<1)) << 29u; // named
  84. data |= (special &~ (1u<<1)) << 30u; // special
  85. data |= (reserved &~ (1u<<1)) << 31u; // reserved
  86. } else {
  87. GB_ASSERT(build_context.ptr_size == 8);
  88. GB_ASSERT(id <= (1ull<<56u));
  89. data |= (id &~ (1ull<<56)) << 0ul; // index
  90. data |= (kind &~ (1ull<<5)) << 56ull; // kind
  91. data |= (named &~ (1ull<<1)) << 61ull; // named
  92. data |= (special &~ (1ull<<1)) << 62ull; // special
  93. data |= (reserved &~ (1ull<<1)) << 63ull; // reserved
  94. }
  95. lbValue res = {};
  96. res.value = LLVMConstInt(lb_type(m, t_typeid), data, false);
  97. res.type = t_typeid;
  98. return res;
  99. }
  100. gb_internal lbValue lb_type_info(lbProcedure *p, Type *type) {
  101. GB_ASSERT(!build_context.no_rtti);
  102. type = default_type(type);
  103. lbModule *m = p->module;
  104. isize index = lb_type_info_index(m->info, type);
  105. GB_ASSERT(index >= 0);
  106. lbValue global = lb_global_type_info_data_ptr(m);
  107. lbValue ptr = lb_emit_array_epi(p, global, index);
  108. return lb_emit_load(p, ptr);
  109. }
  110. gb_internal LLVMTypeRef lb_get_procedure_raw_type(lbModule *m, Type *type) {
  111. return lb_type_internal_for_procedures_raw(m, type);
  112. }
  113. gb_internal lbValue lb_const_array_epi(lbModule *m, lbValue value, isize index) {
  114. GB_ASSERT(is_type_pointer(value.type));
  115. Type *type = type_deref(value.type);
  116. LLVMValueRef indices[2] = {
  117. LLVMConstInt(lb_type(m, t_int), 0, false),
  118. LLVMConstInt(lb_type(m, t_int), cast(unsigned long long)index, false),
  119. };
  120. LLVMTypeRef llvm_type = lb_type(m, type);
  121. lbValue res = {};
  122. Type *ptr = base_array_type(type);
  123. res.type = alloc_type_pointer(ptr);
  124. GB_ASSERT(LLVMIsConstant(value.value));
  125. res.value = LLVMConstGEP2(llvm_type, value.value, indices, gb_count_of(indices));
  126. return res;
  127. }
  128. gb_internal lbValue lb_type_info_member_types_offset(lbModule *m, isize count, i64 *offset_=nullptr) {
  129. GB_ASSERT(m == &m->gen->default_module);
  130. if (offset_) *offset_ = lb_global_type_info_member_types_index;
  131. lbValue offset = lb_const_array_epi(m, lb_global_type_info_member_types.addr, lb_global_type_info_member_types_index);
  132. lb_global_type_info_member_types_index += cast(i32)count;
  133. return offset;
  134. }
  135. gb_internal lbValue lb_type_info_member_names_offset(lbModule *m, isize count, i64 *offset_=nullptr) {
  136. GB_ASSERT(m == &m->gen->default_module);
  137. if (offset_) *offset_ = lb_global_type_info_member_names_index;
  138. lbValue offset = lb_const_array_epi(m, lb_global_type_info_member_names.addr, lb_global_type_info_member_names_index);
  139. lb_global_type_info_member_names_index += cast(i32)count;
  140. return offset;
  141. }
  142. gb_internal lbValue lb_type_info_member_offsets_offset(lbModule *m, isize count, i64 *offset_=nullptr) {
  143. GB_ASSERT(m == &m->gen->default_module);
  144. if (offset_) *offset_ = lb_global_type_info_member_offsets_index;
  145. lbValue offset = lb_const_array_epi(m, lb_global_type_info_member_offsets.addr, lb_global_type_info_member_offsets_index);
  146. lb_global_type_info_member_offsets_index += cast(i32)count;
  147. return offset;
  148. }
  149. gb_internal lbValue lb_type_info_member_usings_offset(lbModule *m, isize count, i64 *offset_=nullptr) {
  150. GB_ASSERT(m == &m->gen->default_module);
  151. if (offset_) *offset_ = lb_global_type_info_member_usings_index;
  152. lbValue offset = lb_const_array_epi(m, lb_global_type_info_member_usings.addr, lb_global_type_info_member_usings_index);
  153. lb_global_type_info_member_usings_index += cast(i32)count;
  154. return offset;
  155. }
  156. gb_internal lbValue lb_type_info_member_tags_offset(lbModule *m, isize count, i64 *offset_=nullptr) {
  157. GB_ASSERT(m == &m->gen->default_module);
  158. if (offset_) *offset_ = lb_global_type_info_member_tags_index;
  159. lbValue offset = lb_const_array_epi(m, lb_global_type_info_member_tags.addr, lb_global_type_info_member_tags_index);
  160. lb_global_type_info_member_tags_index += cast(i32)count;
  161. return offset;
  162. }
  163. gb_internal LLVMTypeRef *lb_setup_modified_types_for_type_info(lbModule *m, isize max_type_info_count) {
  164. LLVMTypeRef *element_types = gb_alloc_array(heap_allocator(), LLVMTypeRef, max_type_info_count);
  165. defer (gb_free(heap_allocator(), element_types));
  166. auto entries_handled = slice_make<bool>(heap_allocator(), max_type_info_count);
  167. defer (gb_free(heap_allocator(), entries_handled.data));
  168. entries_handled[0] = true;
  169. element_types[0] = lb_type(m, t_type_info);
  170. Type *tibt = base_type(t_type_info);
  171. GB_ASSERT(tibt->kind == Type_Struct);
  172. Type *ut = base_type(tibt->Struct.fields[tibt->Struct.fields.count-1]->type);
  173. GB_ASSERT(ut->kind == Type_Union);
  174. GB_ASSERT(tibt->Struct.fields.count == 5);
  175. LLVMTypeRef stypes[6] = {};
  176. stypes[0] = lb_type(m, tibt->Struct.fields[0]->type);
  177. stypes[1] = lb_type(m, tibt->Struct.fields[1]->type);
  178. stypes[2] = lb_type(m, tibt->Struct.fields[2]->type);
  179. isize variant_index = 0;
  180. if (build_context.int_size == 8) {
  181. stypes[3] = lb_type(m, t_i32); // padding
  182. stypes[4] = lb_type(m, tibt->Struct.fields[3]->type);
  183. variant_index = 5;
  184. } else {
  185. stypes[3] = lb_type(m, tibt->Struct.fields[3]->type);
  186. variant_index = 4;
  187. }
  188. LLVMTypeRef *modified_types = gb_alloc_array(heap_allocator(), LLVMTypeRef, Typeid__COUNT);
  189. GB_ASSERT(Typeid__COUNT == ut->Union.variants.count);
  190. modified_types[0] = element_types[0];
  191. i64 tag_offset = ut->Union.variant_block_size;
  192. LLVMTypeRef tag = lb_type(m, union_tag_type(ut));
  193. for_array(i, ut->Union.variants) {
  194. Type *t = ut->Union.variants[i];
  195. LLVMTypeRef padding = llvm_array_type(lb_type(m, t_u8), tag_offset-type_size_of(t));
  196. LLVMTypeRef vtypes[3] = {};
  197. vtypes[0] = lb_type(m, t);
  198. vtypes[1] = padding;
  199. vtypes[2] = tag;
  200. LLVMTypeRef variant_type = LLVMStructType(vtypes, gb_count_of(vtypes), true);
  201. stypes[variant_index] = variant_type;
  202. LLVMTypeRef modified_type = LLVMStructType(stypes, cast(unsigned)(variant_index+1), false);
  203. modified_types[i] = modified_type;
  204. }
  205. for (isize i = 0; i < Typeid__COUNT; i++) {
  206. GB_ASSERT_MSG(modified_types[i] != nullptr, "%td", ut->Union.variants.count);
  207. }
  208. return modified_types;
  209. }
  210. gb_internal void lb_setup_type_info_data_giant_array(lbModule *m, i64 global_type_info_data_entity_count) { // NOTE(bill): Setup type_info data
  211. auto const &ADD_GLOBAL_TYPE_INFO_ENTRY = [](lbModule *m, LLVMTypeRef type, isize index) -> LLVMValueRef {
  212. char name[64] = {};
  213. gb_snprintf(name, 63, "__$ti-%lld", cast(long long)index);
  214. LLVMValueRef g = LLVMAddGlobal(m->mod, type, name);
  215. LLVMSetLinkage(g, LLVMInternalLinkage);
  216. LLVMSetUnnamedAddress(g, LLVMGlobalUnnamedAddr);
  217. LLVMSetGlobalConstant(g, true);
  218. return g;
  219. };
  220. CheckerInfo *info = m->info;
  221. // Useful types
  222. Entity *type_info_flags_entity = find_core_entity(info->checker, str_lit("Type_Info_Flags"));
  223. Type *t_type_info_flags = type_info_flags_entity->type;
  224. gb_unused(t_type_info_flags);
  225. Type *ut = base_type(t_type_info);
  226. GB_ASSERT(ut->kind == Type_Struct);
  227. ut = base_type(ut->Struct.fields[ut->Struct.fields.count-1]->type);
  228. GB_ASSERT(ut->kind == Type_Union);
  229. auto entries_handled = slice_make<bool>(heap_allocator(), cast(isize)global_type_info_data_entity_count);
  230. defer (gb_free(heap_allocator(), entries_handled.data));
  231. entries_handled[0] = true;
  232. LLVMValueRef *giant_const_values = gb_alloc_array(heap_allocator(), LLVMValueRef, global_type_info_data_entity_count);
  233. defer (gb_free(heap_allocator(), giant_const_values));
  234. // zero value is just zero data
  235. giant_const_values[0] = ADD_GLOBAL_TYPE_INFO_ENTRY(m, lb_type(m, t_type_info), 0);
  236. LLVMSetInitializer(giant_const_values[0], LLVMConstNull(lb_type(m, t_type_info)));
  237. LLVMTypeRef *modified_types = lb_setup_modified_types_for_type_info(m, global_type_info_data_entity_count);
  238. defer (gb_free(heap_allocator(), modified_types));
  239. for_array(type_info_type_index, info->type_info_types) {
  240. Type *t = info->type_info_types[type_info_type_index];
  241. if (t == nullptr || t == t_invalid) {
  242. continue;
  243. }
  244. isize entry_index = lb_type_info_index(info, t, false);
  245. if (entry_index <= 0) {
  246. continue;
  247. }
  248. if (entries_handled[entry_index]) {
  249. continue;
  250. }
  251. entries_handled[entry_index] = true;
  252. LLVMTypeRef stype = nullptr;
  253. if (t->kind == Type_Named) {
  254. stype = modified_types[0];
  255. } else {
  256. stype = modified_types[lb_typeid_kind(m, t)];
  257. }
  258. giant_const_values[entry_index] = ADD_GLOBAL_TYPE_INFO_ENTRY(m, stype, entry_index);
  259. }
  260. for (isize i = 1; i < global_type_info_data_entity_count; i++) {
  261. entries_handled[i] = false;
  262. }
  263. LLVMValueRef *small_const_values = gb_alloc_array(heap_allocator(), LLVMValueRef, 6);
  264. defer (gb_free(heap_allocator(), small_const_values));
  265. #define type_info_allocate_values(name) \
  266. LLVMValueRef *name##_values = gb_alloc_array(heap_allocator(), LLVMValueRef, type_deref(name.addr.type)->Array.count); \
  267. defer (gb_free(heap_allocator(), name##_values)); \
  268. defer ({ \
  269. Type *at = type_deref(name.addr.type); \
  270. LLVMTypeRef elem = lb_type(m, at->Array.elem); \
  271. for (i64 i = 0; i < at->Array.count; i++) { \
  272. if ((name##_values)[i] == nullptr) { \
  273. (name##_values)[i] = LLVMConstNull(elem); \
  274. } \
  275. } \
  276. LLVMSetInitializer(name.addr.value, llvm_const_array(elem, name##_values, at->Array.count)); \
  277. })
  278. type_info_allocate_values(lb_global_type_info_member_types);
  279. type_info_allocate_values(lb_global_type_info_member_names);
  280. type_info_allocate_values(lb_global_type_info_member_offsets);
  281. type_info_allocate_values(lb_global_type_info_member_usings);
  282. type_info_allocate_values(lb_global_type_info_member_tags);
  283. auto const get_type_info_ptr = [&](lbModule *m, Type *type) -> LLVMValueRef {
  284. type = default_type(type);
  285. isize index = lb_type_info_index(m->info, type);
  286. GB_ASSERT(index >= 0);
  287. return giant_const_values[index];
  288. };
  289. for_array(type_info_type_index, info->type_info_types) {
  290. Type *t = info->type_info_types[type_info_type_index];
  291. if (t == nullptr || t == t_invalid) {
  292. continue;
  293. }
  294. isize entry_index = lb_type_info_index(info, t, false);
  295. if (entry_index <= 0) {
  296. continue;
  297. }
  298. if (entries_handled[entry_index]) {
  299. continue;
  300. }
  301. entries_handled[entry_index] = true;
  302. LLVMTypeRef stype = nullptr;
  303. if (t->kind == Type_Named) {
  304. stype = modified_types[0];
  305. } else {
  306. stype = modified_types[lb_typeid_kind(m, t)];
  307. }
  308. i64 size = type_size_of(t);
  309. i64 align = type_align_of(t);
  310. u32 flags = type_info_flags_of_type(t);
  311. lbValue id = lb_typeid(m, t);
  312. GB_ASSERT_MSG(align != 0, "%lld %s", align, type_to_string(t));
  313. lbValue type_info_flags = lb_const_int(m, t_type_info_flags, flags);
  314. for (isize i = 0; i < 6; i++) {
  315. small_const_values[i] = nullptr;
  316. }
  317. small_const_values[0] = LLVMConstInt(lb_type(m, t_int), size, true);
  318. small_const_values[1] = LLVMConstInt(lb_type(m, t_int), align, true);
  319. small_const_values[2] = type_info_flags.value;
  320. unsigned variant_index = 0;
  321. if (build_context.int_size == 8) {
  322. small_const_values[3] = LLVMConstNull(LLVMStructGetTypeAtIndex(stype, 3));
  323. small_const_values[4] = id.value;
  324. variant_index = 5;
  325. } else {
  326. small_const_values[3] = id.value;
  327. variant_index = 4;
  328. }
  329. LLVMTypeRef full_variant_type = LLVMStructGetTypeAtIndex(stype, variant_index);
  330. unsigned full_variant_elem_count = LLVMCountStructElementTypes(full_variant_type);
  331. if (full_variant_elem_count != 2) {
  332. GB_ASSERT_MSG(LLVMCountStructElementTypes(full_variant_type) == 3, "%lld %s", entry_index, type_to_string(t)); // blob, padding, tag
  333. }
  334. LLVMValueRef variant_value = nullptr;
  335. Type *tag_type = nullptr;
  336. switch (t->kind) {
  337. case Type_Named: {
  338. tag_type = t_type_info_named;
  339. LLVMValueRef pkg_name = nullptr;
  340. if (t->Named.type_name->pkg) {
  341. pkg_name = lb_const_string(m, t->Named.type_name->pkg->name).value;
  342. } else {
  343. pkg_name = LLVMConstNull(lb_type(m, t_string));
  344. }
  345. String proc_name = {};
  346. if (t->Named.type_name->parent_proc_decl) {
  347. DeclInfo *decl = t->Named.type_name->parent_proc_decl;
  348. if (decl->entity && decl->entity->kind == Entity_Procedure) {
  349. proc_name = decl->entity->token.string;
  350. }
  351. }
  352. TokenPos pos = t->Named.type_name->token.pos;
  353. lbValue loc = lb_const_source_code_location_const(m, proc_name, pos);
  354. LLVMValueRef vals[4] = {
  355. lb_const_string(m, t->Named.type_name->token.string).value,
  356. get_type_info_ptr(m, t->Named.base),
  357. pkg_name,
  358. loc.value
  359. };
  360. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  361. break;
  362. }
  363. case Type_Basic:
  364. switch (t->Basic.kind) {
  365. case Basic_bool:
  366. case Basic_b8:
  367. case Basic_b16:
  368. case Basic_b32:
  369. case Basic_b64:
  370. tag_type = t_type_info_boolean;
  371. break;
  372. case Basic_i8:
  373. case Basic_u8:
  374. case Basic_i16:
  375. case Basic_u16:
  376. case Basic_i32:
  377. case Basic_u32:
  378. case Basic_i64:
  379. case Basic_u64:
  380. case Basic_i128:
  381. case Basic_u128:
  382. case Basic_i16le:
  383. case Basic_u16le:
  384. case Basic_i32le:
  385. case Basic_u32le:
  386. case Basic_i64le:
  387. case Basic_u64le:
  388. case Basic_i128le:
  389. case Basic_u128le:
  390. case Basic_i16be:
  391. case Basic_u16be:
  392. case Basic_i32be:
  393. case Basic_u32be:
  394. case Basic_i64be:
  395. case Basic_u64be:
  396. case Basic_i128be:
  397. case Basic_u128be:
  398. case Basic_int:
  399. case Basic_uint:
  400. case Basic_uintptr: {
  401. tag_type = t_type_info_integer;
  402. lbValue is_signed = lb_const_bool(m, t_bool, (t->Basic.flags & BasicFlag_Unsigned) == 0);
  403. // NOTE(bill): This is matches the runtime layout
  404. u8 endianness_value = 0;
  405. if (t->Basic.flags & BasicFlag_EndianLittle) {
  406. endianness_value = 1;
  407. } else if (t->Basic.flags & BasicFlag_EndianBig) {
  408. endianness_value = 2;
  409. }
  410. lbValue endianness = lb_const_int(m, t_u8, endianness_value);
  411. LLVMValueRef vals[2] = {
  412. is_signed.value,
  413. endianness.value,
  414. };
  415. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  416. break;
  417. }
  418. case Basic_rune:
  419. tag_type = t_type_info_rune;
  420. break;
  421. case Basic_f16:
  422. case Basic_f32:
  423. case Basic_f64:
  424. case Basic_f16le:
  425. case Basic_f32le:
  426. case Basic_f64le:
  427. case Basic_f16be:
  428. case Basic_f32be:
  429. case Basic_f64be:
  430. {
  431. tag_type = t_type_info_float;
  432. // NOTE(bill): This is matches the runtime layout
  433. u8 endianness_value = 0;
  434. if (t->Basic.flags & BasicFlag_EndianLittle) {
  435. endianness_value = 1;
  436. } else if (t->Basic.flags & BasicFlag_EndianBig) {
  437. endianness_value = 2;
  438. }
  439. lbValue endianness = lb_const_int(m, t_u8, endianness_value);
  440. LLVMValueRef vals[1] = {
  441. endianness.value,
  442. };
  443. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  444. }
  445. break;
  446. case Basic_complex32:
  447. case Basic_complex64:
  448. case Basic_complex128:
  449. tag_type = t_type_info_complex;
  450. break;
  451. case Basic_quaternion64:
  452. case Basic_quaternion128:
  453. case Basic_quaternion256:
  454. tag_type = t_type_info_quaternion;
  455. break;
  456. case Basic_rawptr:
  457. tag_type = t_type_info_pointer;
  458. break;
  459. case Basic_string:
  460. tag_type = t_type_info_string;
  461. break;
  462. case Basic_cstring:
  463. {
  464. tag_type = t_type_info_string;
  465. LLVMValueRef vals[1] = {
  466. lb_const_bool(m, t_bool, true).value,
  467. };
  468. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  469. }
  470. break;
  471. case Basic_any:
  472. tag_type = t_type_info_any;
  473. break;
  474. case Basic_typeid:
  475. tag_type = t_type_info_typeid;
  476. break;
  477. }
  478. break;
  479. case Type_Pointer: {
  480. tag_type = t_type_info_pointer;
  481. LLVMValueRef vals[1] = {
  482. get_type_info_ptr(m, t->Pointer.elem),
  483. };
  484. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  485. break;
  486. }
  487. case Type_MultiPointer: {
  488. tag_type = t_type_info_multi_pointer;
  489. LLVMValueRef vals[1] = {
  490. get_type_info_ptr(m, t->MultiPointer.elem),
  491. };
  492. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  493. break;
  494. }
  495. case Type_SoaPointer: {
  496. tag_type = t_type_info_soa_pointer;
  497. LLVMValueRef vals[1] = {
  498. get_type_info_ptr(m, t->SoaPointer.elem),
  499. };
  500. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  501. break;
  502. }
  503. case Type_Array: {
  504. tag_type = t_type_info_array;
  505. i64 ez = type_size_of(t->Array.elem);
  506. LLVMValueRef vals[3] = {
  507. get_type_info_ptr(m, t->Array.elem),
  508. lb_const_int(m, t_int, ez).value,
  509. lb_const_int(m, t_int, t->Array.count).value,
  510. };
  511. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  512. break;
  513. }
  514. case Type_EnumeratedArray: {
  515. tag_type = t_type_info_enumerated_array;
  516. LLVMValueRef vals[7] = {
  517. get_type_info_ptr(m, t->EnumeratedArray.elem),
  518. get_type_info_ptr(m, t->EnumeratedArray.index),
  519. lb_const_int(m, t_int, type_size_of(t->EnumeratedArray.elem)).value,
  520. lb_const_int(m, t_int, t->EnumeratedArray.count).value,
  521. // Unions
  522. lb_const_value(m, t_type_info_enum_value, *t->EnumeratedArray.min_value).value,
  523. lb_const_value(m, t_type_info_enum_value, *t->EnumeratedArray.max_value).value,
  524. lb_const_bool(m, t_bool, t->EnumeratedArray.is_sparse).value,
  525. };
  526. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  527. break;
  528. }
  529. case Type_DynamicArray: {
  530. tag_type = t_type_info_dynamic_array;
  531. LLVMValueRef vals[2] = {
  532. get_type_info_ptr(m, t->DynamicArray.elem),
  533. lb_const_int(m, t_int, type_size_of(t->DynamicArray.elem)).value,
  534. };
  535. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  536. break;
  537. }
  538. case Type_Slice: {
  539. tag_type = t_type_info_slice;
  540. LLVMValueRef vals[2] = {
  541. get_type_info_ptr(m, t->Slice.elem),
  542. lb_const_int(m, t_int, type_size_of(t->Slice.elem)).value,
  543. };
  544. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  545. break;
  546. }
  547. case Type_Proc: {
  548. tag_type = t_type_info_procedure;
  549. LLVMValueRef params = LLVMConstNull(lb_type(m, t_type_info_ptr));
  550. LLVMValueRef results = LLVMConstNull(lb_type(m, t_type_info_ptr));
  551. if (t->Proc.params != nullptr) {
  552. params = get_type_info_ptr(m, t->Proc.params);
  553. }
  554. if (t->Proc.results != nullptr) {
  555. results = get_type_info_ptr(m, t->Proc.results);
  556. }
  557. LLVMValueRef vals[4] = {
  558. params,
  559. results,
  560. lb_const_bool(m, t_bool, t->Proc.variadic).value,
  561. lb_const_int(m, t_u8, t->Proc.calling_convention).value,
  562. };
  563. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  564. break;
  565. }
  566. case Type_Tuple: {
  567. tag_type = t_type_info_parameters;
  568. i64 type_offset = 0;
  569. i64 name_offset = 0;
  570. lbValue memory_types = lb_type_info_member_types_offset(m, t->Tuple.variables.count, &type_offset);
  571. lbValue memory_names = lb_type_info_member_names_offset(m, t->Tuple.variables.count, &name_offset);
  572. for_array(i, t->Tuple.variables) {
  573. // NOTE(bill): offset is not used for tuples
  574. Entity *f = t->Tuple.variables[i];
  575. lbValue index = lb_const_int(m, t_int, i);
  576. lbValue type_info = lb_const_ptr_offset(m, memory_types, index);
  577. lb_global_type_info_member_types_values[type_offset+i] = get_type_info_ptr(m, f->type);
  578. if (f->token.string.len > 0) {
  579. lb_global_type_info_member_names_values[name_offset+i] = lb_const_string(m, f->token.string).value;
  580. }
  581. }
  582. lbValue count = lb_const_int(m, t_int, t->Tuple.variables.count);
  583. LLVMValueRef types_slice = llvm_const_slice(m, memory_types, count);
  584. LLVMValueRef names_slice = llvm_const_slice(m, memory_names, count);
  585. LLVMValueRef vals[2] = {
  586. types_slice,
  587. names_slice,
  588. };
  589. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  590. break;
  591. }
  592. case Type_Enum:
  593. tag_type = t_type_info_enum;
  594. {
  595. GB_ASSERT(t->Enum.base_type != nullptr);
  596. // GB_ASSERT_MSG(type_size_of(t_type_info_enum_value) == 16, "%lld == 16", cast(long long)type_size_of(t_type_info_enum_value));
  597. LLVMValueRef vals[3] = {};
  598. vals[0] = get_type_info_ptr(m, t->Enum.base_type);
  599. if (t->Enum.fields.count > 0) {
  600. auto fields = t->Enum.fields;
  601. lbValue name_array = lb_generate_global_array(m, t_string, fields.count,
  602. str_lit("$enum_names"), cast(i64)entry_index);
  603. lbValue value_array = lb_generate_global_array(m, t_type_info_enum_value, fields.count,
  604. str_lit("$enum_values"), cast(i64)entry_index);
  605. LLVMValueRef *name_values = gb_alloc_array(temporary_allocator(), LLVMValueRef, fields.count);
  606. LLVMValueRef *value_values = gb_alloc_array(temporary_allocator(), LLVMValueRef, fields.count);
  607. GB_ASSERT(is_type_integer(t->Enum.base_type));
  608. for_array(i, fields) {
  609. name_values[i] = lb_const_string(m, fields[i]->token.string).value;
  610. value_values[i] = lb_const_value(m, t_i64, fields[i]->Constant.value).value;
  611. }
  612. LLVMValueRef name_init = llvm_const_array(lb_type(m, t_string), name_values, cast(unsigned)fields.count);
  613. LLVMValueRef value_init = llvm_const_array(lb_type(m, t_type_info_enum_value), value_values, cast(unsigned)fields.count);
  614. LLVMSetInitializer(name_array.value, name_init);
  615. LLVMSetInitializer(value_array.value, value_init);
  616. LLVMSetGlobalConstant(name_array.value, true);
  617. LLVMSetGlobalConstant(value_array.value, true);
  618. lbValue v_count = lb_const_int(m, t_int, fields.count);
  619. vals[1] = llvm_const_slice(m, lbValue{name_array.value, alloc_type_pointer(t_string)}, v_count);
  620. vals[2] = llvm_const_slice(m, lbValue{value_array.value, alloc_type_pointer(t_type_info_enum_value)}, v_count);
  621. } else {
  622. vals[1] = LLVMConstNull(lb_type(m, base_type(t_type_info_enum)->Struct.fields[1]->type));
  623. vals[2] = LLVMConstNull(lb_type(m, base_type(t_type_info_enum)->Struct.fields[2]->type));
  624. }
  625. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  626. }
  627. break;
  628. case Type_Union: {
  629. tag_type = t_type_info_union;
  630. {
  631. LLVMValueRef vals[7] = {};
  632. isize variant_count = gb_max(0, t->Union.variants.count);
  633. i64 variant_offset = 0;
  634. lbValue memory_types = lb_type_info_member_types_offset(m, variant_count, &variant_offset);
  635. for (isize variant_index = 0; variant_index < variant_count; variant_index++) {
  636. Type *vt = t->Union.variants[variant_index];
  637. lb_global_type_info_member_types_values[variant_offset+variant_index] = get_type_info_ptr(m, vt);
  638. }
  639. lbValue count = lb_const_int(m, t_int, variant_count);
  640. vals[0] = llvm_const_slice(m, memory_types, count);
  641. i64 tag_size = union_tag_size(t);
  642. if (tag_size > 0) {
  643. i64 tag_offset = align_formula(t->Union.variant_block_size, tag_size);
  644. vals[1] = lb_const_int(m, t_uintptr, tag_offset).value;
  645. vals[2] = get_type_info_ptr(m, union_tag_type(t));
  646. } else {
  647. vals[1] = lb_const_int(m, t_uintptr, 0).value;
  648. vals[2] = LLVMConstNull(lb_type(m, t_type_info_ptr));
  649. }
  650. if (is_type_comparable(t) && !is_type_simple_compare(t)) {
  651. vals[3] = lb_equal_proc_for_type(m, t).value;
  652. }
  653. vals[4] = lb_const_bool(m, t_bool, t->Union.custom_align != 0).value;
  654. vals[5] = lb_const_bool(m, t_bool, t->Union.kind == UnionType_no_nil).value;
  655. vals[6] = lb_const_bool(m, t_bool, t->Union.kind == UnionType_shared_nil).value;
  656. for (isize i = 0; i < gb_count_of(vals); i++) {
  657. if (vals[i] == nullptr) {
  658. vals[i] = LLVMConstNull(lb_type(m, get_struct_field_type(tag_type, i)));
  659. }
  660. }
  661. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  662. }
  663. break;
  664. }
  665. case Type_Struct: {
  666. tag_type = t_type_info_struct;
  667. LLVMValueRef vals[13] = {};
  668. {
  669. lbValue is_packed = lb_const_bool(m, t_bool, t->Struct.is_packed);
  670. lbValue is_raw_union = lb_const_bool(m, t_bool, t->Struct.is_raw_union);
  671. lbValue is_no_copy = lb_const_bool(m, t_bool, t->Struct.is_no_copy);
  672. lbValue is_custom_align = lb_const_bool(m, t_bool, t->Struct.custom_align != 0);
  673. vals[5] = is_packed.value;
  674. vals[6] = is_raw_union.value;
  675. vals[7] = is_no_copy.value;
  676. vals[8] = is_custom_align.value;
  677. if (is_type_comparable(t) && !is_type_simple_compare(t)) {
  678. vals[9] = lb_equal_proc_for_type(m, t).value;
  679. }
  680. if (t->Struct.soa_kind != StructSoa_None) {
  681. Type *kind_type = get_struct_field_type(tag_type, 10);
  682. lbValue soa_kind = lb_const_value(m, kind_type, exact_value_i64(t->Struct.soa_kind));
  683. LLVMValueRef soa_type = get_type_info_ptr(m, t->Struct.soa_elem);
  684. lbValue soa_len = lb_const_int(m, t_int, t->Struct.soa_count);
  685. vals[10] = soa_kind.value;
  686. vals[11] = soa_type;
  687. vals[12] = soa_len.value;
  688. }
  689. }
  690. isize count = t->Struct.fields.count;
  691. if (count > 0) {
  692. i64 types_offset = 0;
  693. i64 names_offset = 0;
  694. i64 offsets_offset = 0;
  695. i64 usings_offset = 0;
  696. i64 tags_offset = 0;
  697. lbValue memory_types = lb_type_info_member_types_offset (m, count, &types_offset);
  698. lbValue memory_names = lb_type_info_member_names_offset (m, count, &names_offset);
  699. lbValue memory_offsets = lb_type_info_member_offsets_offset(m, count, &offsets_offset);
  700. lbValue memory_usings = lb_type_info_member_usings_offset (m, count, &usings_offset);
  701. lbValue memory_tags = lb_type_info_member_tags_offset (m, count, &tags_offset);
  702. type_set_offsets(t); // NOTE(bill): Just incase the offsets have not been set yet
  703. for (isize source_index = 0; source_index < count; source_index++) {
  704. Entity *f = t->Struct.fields[source_index];
  705. i64 foffset = 0;
  706. if (!t->Struct.is_raw_union) {
  707. GB_ASSERT(t->Struct.offsets != nullptr);
  708. GB_ASSERT(0 <= f->Variable.field_index && f->Variable.field_index < count);
  709. foffset = t->Struct.offsets[source_index];
  710. }
  711. GB_ASSERT(f->kind == Entity_Variable && f->flags & EntityFlag_Field);
  712. lb_global_type_info_member_types_values[types_offset+source_index] = get_type_info_ptr(m, f->type);
  713. lb_global_type_info_member_offsets_values[offsets_offset+source_index] = lb_const_int(m, t_uintptr, foffset).value;
  714. lb_global_type_info_member_usings_values[usings_offset+source_index] = lb_const_bool(m, t_bool, (f->flags&EntityFlag_Using) != 0).value;
  715. if (f->token.string.len > 0) {
  716. lb_global_type_info_member_names_values[names_offset+source_index] = lb_const_string(m, f->token.string).value;
  717. }
  718. if (t->Struct.tags != nullptr) {
  719. String tag_string = t->Struct.tags[source_index];
  720. if (tag_string.len > 0) {
  721. lb_global_type_info_member_tags_values[tags_offset+source_index] = lb_const_string(m, tag_string).value;
  722. }
  723. }
  724. }
  725. lbValue cv = lb_const_int(m, t_int, count);
  726. vals[0] = llvm_const_slice(m, memory_types, cv);
  727. vals[1] = llvm_const_slice(m, memory_names, cv);
  728. vals[2] = llvm_const_slice(m, memory_offsets, cv);
  729. vals[3] = llvm_const_slice(m, memory_usings, cv);
  730. vals[4] = llvm_const_slice(m, memory_tags, cv);
  731. }
  732. for (isize i = 0; i < gb_count_of(vals); i++) {
  733. if (vals[i] == nullptr) {
  734. vals[i] = LLVMConstNull(lb_type(m, get_struct_field_type(tag_type, i)));
  735. }
  736. }
  737. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  738. break;
  739. }
  740. case Type_Map: {
  741. tag_type = t_type_info_map;
  742. init_map_internal_types(t);
  743. LLVMValueRef vals[3] = {
  744. get_type_info_ptr(m, t->Map.key),
  745. get_type_info_ptr(m, t->Map.value),
  746. lb_gen_map_info_ptr(m, t).value
  747. };
  748. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  749. break;
  750. }
  751. case Type_BitSet:
  752. {
  753. tag_type = t_type_info_bit_set;
  754. GB_ASSERT(is_type_typed(t->BitSet.elem));
  755. LLVMValueRef vals[4] = {
  756. get_type_info_ptr(m, t->BitSet.elem),
  757. LLVMConstNull(lb_type(m, t_type_info_ptr)),
  758. lb_const_int(m, t_i64, t->BitSet.lower).value,
  759. lb_const_int(m, t_i64, t->BitSet.upper).value,
  760. };
  761. if (t->BitSet.underlying != nullptr) {
  762. vals[1] = get_type_info_ptr(m, t->BitSet.underlying);
  763. }
  764. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  765. }
  766. break;
  767. case Type_SimdVector:
  768. {
  769. tag_type = t_type_info_simd_vector;
  770. LLVMValueRef vals[3] = {};
  771. vals[0] = get_type_info_ptr(m, t->SimdVector.elem);
  772. vals[1] = lb_const_int(m, t_int, type_size_of(t->SimdVector.elem)).value;
  773. vals[2] = lb_const_int(m, t_int, t->SimdVector.count).value;
  774. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  775. }
  776. break;
  777. case Type_RelativePointer:
  778. {
  779. tag_type = t_type_info_relative_pointer;
  780. LLVMValueRef vals[2] = {
  781. get_type_info_ptr(m, t->RelativePointer.pointer_type),
  782. get_type_info_ptr(m, t->RelativePointer.base_integer),
  783. };
  784. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  785. }
  786. break;
  787. case Type_RelativeMultiPointer:
  788. {
  789. tag_type = t_type_info_relative_multi_pointer;
  790. LLVMValueRef vals[2] = {
  791. get_type_info_ptr(m, t->RelativeMultiPointer.pointer_type),
  792. get_type_info_ptr(m, t->RelativeMultiPointer.base_integer),
  793. };
  794. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  795. }
  796. break;
  797. case Type_Matrix:
  798. {
  799. tag_type = t_type_info_matrix;
  800. i64 ez = type_size_of(t->Matrix.elem);
  801. LLVMValueRef vals[5] = {
  802. get_type_info_ptr(m, t->Matrix.elem),
  803. lb_const_int(m, t_int, ez).value,
  804. lb_const_int(m, t_int, matrix_type_stride_in_elems(t)).value,
  805. lb_const_int(m, t_int, t->Matrix.row_count).value,
  806. lb_const_int(m, t_int, t->Matrix.column_count).value,
  807. };
  808. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  809. }
  810. break;
  811. case Type_BitField:
  812. {
  813. tag_type = t_type_info_bit_field;
  814. LLVMValueRef vals[6] = {};
  815. vals[0] = get_type_info_ptr(m, t->BitField.backing_type);
  816. isize count = t->BitField.fields.count;
  817. if (count > 0) {
  818. i64 names_offset = 0;
  819. i64 types_offset = 0;
  820. i64 bit_sizes_offset = 0;
  821. i64 bit_offsets_offset = 0;
  822. i64 tags_offset = 0;
  823. lbValue memory_names = lb_type_info_member_names_offset (m, count, &names_offset);
  824. lbValue memory_types = lb_type_info_member_types_offset (m, count, &types_offset);
  825. lbValue memory_bit_sizes = lb_type_info_member_offsets_offset(m, count, &bit_sizes_offset);
  826. lbValue memory_bit_offsets = lb_type_info_member_offsets_offset(m, count, &bit_offsets_offset);
  827. lbValue memory_tags = lb_type_info_member_tags_offset (m, count, &tags_offset);
  828. u64 bit_offset = 0;
  829. for (isize source_index = 0; source_index < count; source_index++) {
  830. Entity *f = t->BitField.fields[source_index];
  831. u64 bit_size = cast(u64)t->BitField.bit_sizes[source_index];
  832. lbValue index = lb_const_int(m, t_int, source_index);
  833. if (f->token.string.len > 0) {
  834. lb_global_type_info_member_names_values[names_offset+source_index] = lb_const_string(m, f->token.string).value;
  835. }
  836. lb_global_type_info_member_types_values[types_offset+source_index] = get_type_info_ptr(m, f->type);
  837. lb_global_type_info_member_offsets_values[bit_sizes_offset+source_index] = lb_const_int(m, t_uintptr, bit_size).value;
  838. lb_global_type_info_member_offsets_values[bit_offsets_offset+source_index] = lb_const_int(m, t_uintptr, bit_offset).value;
  839. if (t->BitField.tags) {
  840. String tag = t->BitField.tags[source_index];
  841. if (tag.len > 0) {
  842. lb_global_type_info_member_tags_values[tags_offset+source_index] = lb_const_string(m, tag).value;
  843. }
  844. }
  845. bit_offset += bit_size;
  846. }
  847. lbValue cv = lb_const_int(m, t_int, count);
  848. vals[1] = llvm_const_slice(m, memory_names, cv);
  849. vals[2] = llvm_const_slice(m, memory_types, cv);
  850. vals[3] = llvm_const_slice(m, memory_bit_sizes, cv);
  851. vals[4] = llvm_const_slice(m, memory_bit_offsets, cv);
  852. vals[5] = llvm_const_slice(m, memory_tags, cv);
  853. }
  854. for (isize i = 0; i < gb_count_of(vals); i++) {
  855. if (vals[i] == nullptr) {
  856. vals[i] = LLVMConstNull(lb_type(m, get_struct_field_type(tag_type, i)));
  857. }
  858. }
  859. variant_value = llvm_const_named_struct(m, tag_type, vals, gb_count_of(vals));
  860. break;
  861. }
  862. }
  863. i64 tag_index = 0;
  864. if (tag_type != nullptr) {
  865. tag_index = union_variant_index(ut, tag_type);
  866. }
  867. GB_ASSERT(tag_index <= Typeid__COUNT);
  868. LLVMValueRef full_variant_values[3] = {};
  869. if (full_variant_elem_count == 2) {
  870. if (variant_value == nullptr) {
  871. full_variant_values[0] = LLVMConstNull(LLVMStructGetTypeAtIndex(full_variant_type, 0));
  872. full_variant_values[1] = LLVMConstInt(LLVMStructGetTypeAtIndex(full_variant_type, 1), tag_index, false);
  873. } else {
  874. full_variant_values[0] = variant_value;
  875. full_variant_values[1] = LLVMConstInt(LLVMStructGetTypeAtIndex(full_variant_type, 1), tag_index, false);
  876. }
  877. } else {
  878. if (variant_value == nullptr) {
  879. variant_value = LLVMConstNull(LLVMStructGetTypeAtIndex(full_variant_type, 0));
  880. } else {
  881. GB_ASSERT_MSG(LLVMStructGetTypeAtIndex(full_variant_type, 0) == LLVMTypeOf(variant_value),
  882. "\n%s -> %s\n%s vs %s\n",
  883. type_to_string(t), LLVMPrintValueToString(variant_value),
  884. LLVMPrintTypeToString(LLVMStructGetTypeAtIndex(full_variant_type, 0)), LLVMPrintTypeToString(LLVMTypeOf(variant_value))
  885. );
  886. }
  887. full_variant_values[0] = variant_value;
  888. full_variant_values[1] = LLVMConstNull(LLVMStructGetTypeAtIndex(full_variant_type, 1));
  889. full_variant_values[2] = LLVMConstInt(LLVMStructGetTypeAtIndex(full_variant_type, 2), tag_index, false);
  890. }
  891. LLVMValueRef full_variant_value = LLVMConstNamedStruct(full_variant_type, full_variant_values, full_variant_elem_count);
  892. small_const_values[variant_index] = full_variant_value;
  893. LLVMSetInitializer(giant_const_values[entry_index], LLVMConstNamedStruct(stype, small_const_values, variant_index+1));
  894. }
  895. for (isize i = 0; i < global_type_info_data_entity_count; i++) {
  896. giant_const_values[i] = LLVMConstPointerCast(giant_const_values[i], lb_type(m, t_type_info_ptr));
  897. }
  898. LLVMValueRef giant_const = LLVMConstArray(lb_type(m, t_type_info_ptr), giant_const_values, cast(unsigned)global_type_info_data_entity_count);
  899. LLVMValueRef giant_array = lb_global_type_info_data_ptr(m).value;
  900. LLVMSetInitializer(giant_array, giant_const);
  901. }
  902. gb_internal void lb_setup_type_info_data(lbModule *m) { // NOTE(bill): Setup type_info data
  903. if (build_context.no_rtti) {
  904. return;
  905. }
  906. i64 global_type_info_data_entity_count = 0;
  907. // NOTE(bill): Set the type_table slice with the global backing array
  908. lbValue global_type_table = lb_find_runtime_value(m, str_lit("type_table"));
  909. Type *type = base_type(lb_global_type_info_data_entity->type);
  910. GB_ASSERT(type->kind == Type_Array);
  911. global_type_info_data_entity_count = type->Array.count;
  912. if (true) {
  913. lb_setup_type_info_data_giant_array(m, global_type_info_data_entity_count);
  914. }
  915. LLVMValueRef data = lb_global_type_info_data_ptr(m).value;
  916. data = LLVMConstPointerCast(data, lb_type(m, alloc_type_pointer(type->Array.elem)));
  917. LLVMValueRef len = LLVMConstInt(lb_type(m, t_int), type->Array.count, true);
  918. Type *t = type_deref(global_type_table.type);
  919. GB_ASSERT(is_type_slice(t));
  920. LLVMValueRef slice = llvm_const_slice_internal(m, data, len);
  921. LLVMSetInitializer(global_type_table.value, slice);
  922. }