types.cpp 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485
  1. struct Scope;
  2. struct AstNode;
  3. enum BasicKind {
  4. Basic_Invalid,
  5. Basic_bool,
  6. Basic_i8,
  7. Basic_u8,
  8. Basic_i16,
  9. Basic_u16,
  10. Basic_i32,
  11. Basic_u32,
  12. Basic_i64,
  13. Basic_u64,
  14. Basic_i128,
  15. Basic_u128,
  16. Basic_rune,
  17. // Basic_f16,
  18. Basic_f32,
  19. Basic_f64,
  20. // Basic_complex32,
  21. Basic_complex64,
  22. Basic_complex128,
  23. Basic_int,
  24. Basic_uint,
  25. Basic_uintptr,
  26. Basic_rawptr,
  27. Basic_string, // ^u8 + int
  28. Basic_any, // rawptr + ^Type_Info
  29. Basic_UntypedBool,
  30. Basic_UntypedInteger,
  31. Basic_UntypedFloat,
  32. Basic_UntypedComplex,
  33. Basic_UntypedString,
  34. Basic_UntypedRune,
  35. Basic_UntypedNil,
  36. Basic_UntypedUndef,
  37. Basic_COUNT,
  38. Basic_byte = Basic_u8,
  39. };
  40. enum BasicFlag {
  41. BasicFlag_Boolean = GB_BIT(0),
  42. BasicFlag_Integer = GB_BIT(1),
  43. BasicFlag_Unsigned = GB_BIT(2),
  44. BasicFlag_Float = GB_BIT(3),
  45. BasicFlag_Complex = GB_BIT(4),
  46. BasicFlag_Pointer = GB_BIT(5),
  47. BasicFlag_String = GB_BIT(6),
  48. BasicFlag_Rune = GB_BIT(7),
  49. BasicFlag_Untyped = GB_BIT(8),
  50. BasicFlag_Numeric = BasicFlag_Integer | BasicFlag_Float | BasicFlag_Complex,
  51. BasicFlag_Ordered = BasicFlag_Integer | BasicFlag_Float | BasicFlag_String | BasicFlag_Pointer | BasicFlag_Rune,
  52. BasicFlag_ConstantType = BasicFlag_Boolean | BasicFlag_Numeric | BasicFlag_String | BasicFlag_Pointer | BasicFlag_Rune,
  53. };
  54. struct BasicType {
  55. BasicKind kind;
  56. u32 flags;
  57. i64 size; // -1 if arch. dep.
  58. String name;
  59. };
  60. struct TypeStruct {
  61. Array<Entity *> fields;
  62. Array<Entity *> fields_in_src_order;
  63. AstNode *node;
  64. Scope * scope;
  65. Array<i64> offsets;
  66. bool are_offsets_set;
  67. bool are_offsets_being_processed;
  68. bool is_packed;
  69. bool is_raw_union;
  70. bool is_polymorphic;
  71. bool is_poly_specialized;
  72. bool has_proc_default_values;
  73. Type * polymorphic_params; // Type_Tuple
  74. Type * polymorphic_parent;
  75. i64 custom_align; // NOTE(bill): Only used in structs at the moment
  76. Entity * names;
  77. };
  78. #define TYPE_KINDS \
  79. TYPE_KIND(Basic, BasicType) \
  80. TYPE_KIND(Generic, struct { \
  81. i64 id; \
  82. String name; \
  83. Type * specialized; \
  84. Scope *scope; \
  85. }) \
  86. TYPE_KIND(Pointer, struct { Type *elem; }) \
  87. TYPE_KIND(Array, struct { \
  88. Type *elem; \
  89. i64 count; \
  90. Type *generic_type; \
  91. }) \
  92. TYPE_KIND(DynamicArray, struct { Type *elem; }) \
  93. TYPE_KIND(Slice, struct { Type *elem; }) \
  94. TYPE_KIND(Struct, TypeStruct) \
  95. TYPE_KIND(Enum, struct { \
  96. Entity **fields; \
  97. i32 field_count; \
  98. AstNode *node; \
  99. Scope * scope; \
  100. Entity * names; \
  101. Type * base_type; \
  102. Entity * count; \
  103. Entity * min_value; \
  104. Entity * max_value; \
  105. }) \
  106. TYPE_KIND(Union, struct { \
  107. Array<Type *> variants; \
  108. AstNode *node; \
  109. Scope * scope; \
  110. i64 variant_block_size; \
  111. i64 custom_align; \
  112. i64 tag_size; \
  113. }) \
  114. TYPE_KIND(Named, struct { \
  115. String name; \
  116. Type * base; \
  117. Entity *type_name; /* Entity_TypeName */ \
  118. }) \
  119. TYPE_KIND(Tuple, struct { \
  120. Array<Entity *> variables; /* Entity_Variable */ \
  121. Array<i64> offsets; \
  122. bool are_offsets_set; \
  123. }) \
  124. TYPE_KIND(Proc, struct { \
  125. AstNode *node; \
  126. Scope * scope; \
  127. Type * params; /* Type_Tuple */ \
  128. Type * results; /* Type_Tuple */ \
  129. i32 param_count; \
  130. i32 result_count; \
  131. Type ** abi_compat_params; \
  132. Type * abi_compat_result_type; \
  133. bool return_by_pointer; \
  134. bool variadic; \
  135. i32 variadic_index; \
  136. bool require_results; \
  137. bool c_vararg; \
  138. bool is_polymorphic; \
  139. bool is_poly_specialized; \
  140. bool has_proc_default_values; \
  141. isize specialization_count; \
  142. ProcCallingConvention calling_convention; \
  143. }) \
  144. TYPE_KIND(Map, struct { \
  145. Type * key; \
  146. Type * value; \
  147. Type * entry_type; \
  148. Type * generated_struct_type; \
  149. Type * internal_type; \
  150. Type * lookup_result_type; \
  151. }) \
  152. TYPE_KIND(BitFieldValue, struct { u32 bits; }) \
  153. TYPE_KIND(BitField, struct { \
  154. Scope * scope; \
  155. Entity **fields; \
  156. i32 field_count; \
  157. u32 * offsets; \
  158. u32 * sizes; \
  159. i64 custom_align; \
  160. }) \
  161. enum TypeKind {
  162. Type_Invalid,
  163. #define TYPE_KIND(k, ...) GB_JOIN2(Type_, k),
  164. TYPE_KINDS
  165. #undef TYPE_KIND
  166. Type_Count,
  167. };
  168. String const type_strings[] = {
  169. {cast(u8 *)"Invalid", gb_size_of("Invalid")},
  170. #define TYPE_KIND(k, ...) {cast(u8 *)#k, gb_size_of(#k)-1},
  171. TYPE_KINDS
  172. #undef TYPE_KIND
  173. };
  174. #define TYPE_KIND(k, ...) typedef __VA_ARGS__ GB_JOIN2(Type, k);
  175. TYPE_KINDS
  176. #undef TYPE_KIND
  177. struct Type {
  178. TypeKind kind;
  179. union {
  180. #define TYPE_KIND(k, ...) GB_JOIN2(Type, k) k;
  181. TYPE_KINDS
  182. #undef TYPE_KIND
  183. };
  184. bool failure;
  185. };
  186. // TODO(bill): Should I add extra information here specifying the kind of selection?
  187. // e.g. field, constant, array field, type field, etc.
  188. struct Selection {
  189. Entity * entity;
  190. Array<i32> index;
  191. bool indirect; // Set if there was a pointer deref anywhere down the line
  192. };
  193. Selection empty_selection = {0};
  194. Selection make_selection(Entity *entity, Array<i32> index, bool indirect) {
  195. Selection s = {entity, index, indirect};
  196. return s;
  197. }
  198. void selection_add_index(Selection *s, isize index) {
  199. // IMPORTANT NOTE(bill): this requires a stretchy buffer/dynamic array so it requires some form
  200. // of heap allocation
  201. // TODO(bill): Find a way to use a backing buffer for initial use as the general case is probably .count<3
  202. if (s->index.data == nullptr) {
  203. array_init(&s->index, heap_allocator());
  204. }
  205. array_add(&s->index, cast(i32)index);
  206. }
  207. gb_global Type basic_types[] = {
  208. {Type_Basic, {Basic_Invalid, 0, 0, STR_LIT("invalid type")}},
  209. {Type_Basic, {Basic_bool, BasicFlag_Boolean, 1, STR_LIT("bool")}},
  210. {Type_Basic, {Basic_i8, BasicFlag_Integer, 1, STR_LIT("i8")}},
  211. {Type_Basic, {Basic_u8, BasicFlag_Integer | BasicFlag_Unsigned, 1, STR_LIT("u8")}},
  212. {Type_Basic, {Basic_i16, BasicFlag_Integer, 2, STR_LIT("i16")}},
  213. {Type_Basic, {Basic_u16, BasicFlag_Integer | BasicFlag_Unsigned, 2, STR_LIT("u16")}},
  214. {Type_Basic, {Basic_i32, BasicFlag_Integer, 4, STR_LIT("i32")}},
  215. {Type_Basic, {Basic_u32, BasicFlag_Integer | BasicFlag_Unsigned, 4, STR_LIT("u32")}},
  216. {Type_Basic, {Basic_i64, BasicFlag_Integer, 8, STR_LIT("i64")}},
  217. {Type_Basic, {Basic_u64, BasicFlag_Integer | BasicFlag_Unsigned, 8, STR_LIT("u64")}},
  218. {Type_Basic, {Basic_i128, BasicFlag_Integer, 16, STR_LIT("i128")}},
  219. {Type_Basic, {Basic_u128, BasicFlag_Integer | BasicFlag_Unsigned, 16, STR_LIT("u128")}},
  220. {Type_Basic, {Basic_rune, BasicFlag_Integer | BasicFlag_Rune, 4, STR_LIT("rune")}},
  221. // {Type_Basic, {Basic_f16, BasicFlag_Float, 2, STR_LIT("f16")}},
  222. {Type_Basic, {Basic_f32, BasicFlag_Float, 4, STR_LIT("f32")}},
  223. {Type_Basic, {Basic_f64, BasicFlag_Float, 8, STR_LIT("f64")}},
  224. // {Type_Basic, {Basic_complex32, BasicFlag_Complex, 4, STR_LIT("complex32")}},
  225. {Type_Basic, {Basic_complex64, BasicFlag_Complex, 8, STR_LIT("complex64")}},
  226. {Type_Basic, {Basic_complex128, BasicFlag_Complex, 16, STR_LIT("complex128")}},
  227. {Type_Basic, {Basic_int, BasicFlag_Integer, -1, STR_LIT("int")}},
  228. {Type_Basic, {Basic_uint, BasicFlag_Integer | BasicFlag_Unsigned, -1, STR_LIT("uint")}},
  229. {Type_Basic, {Basic_uintptr, BasicFlag_Integer | BasicFlag_Unsigned, -1, STR_LIT("uintptr")}},
  230. {Type_Basic, {Basic_rawptr, BasicFlag_Pointer, -1, STR_LIT("rawptr")}},
  231. {Type_Basic, {Basic_string, BasicFlag_String, -1, STR_LIT("string")}},
  232. {Type_Basic, {Basic_any, 0, -1, STR_LIT("any")}},
  233. {Type_Basic, {Basic_UntypedBool, BasicFlag_Boolean | BasicFlag_Untyped, 0, STR_LIT("untyped bool")}},
  234. {Type_Basic, {Basic_UntypedInteger, BasicFlag_Integer | BasicFlag_Untyped, 0, STR_LIT("untyped integer")}},
  235. {Type_Basic, {Basic_UntypedFloat, BasicFlag_Float | BasicFlag_Untyped, 0, STR_LIT("untyped float")}},
  236. {Type_Basic, {Basic_UntypedComplex, BasicFlag_Complex | BasicFlag_Untyped, 0, STR_LIT("untyped complex")}},
  237. {Type_Basic, {Basic_UntypedString, BasicFlag_String | BasicFlag_Untyped, 0, STR_LIT("untyped string")}},
  238. {Type_Basic, {Basic_UntypedRune, BasicFlag_Integer | BasicFlag_Untyped, 0, STR_LIT("untyped rune")}},
  239. {Type_Basic, {Basic_UntypedNil, BasicFlag_Untyped, 0, STR_LIT("untyped nil")}},
  240. {Type_Basic, {Basic_UntypedUndef, BasicFlag_Untyped, 0, STR_LIT("untyped undefined")}},
  241. };
  242. // gb_global Type basic_type_aliases[] = {
  243. // // {Type_Basic, {Basic_byte, BasicFlag_Integer | BasicFlag_Unsigned, 1, STR_LIT("byte")}},
  244. // // {Type_Basic, {Basic_rune, BasicFlag_Integer, 4, STR_LIT("rune")}},
  245. // };
  246. gb_global Type *t_invalid = &basic_types[Basic_Invalid];
  247. gb_global Type *t_bool = &basic_types[Basic_bool];
  248. gb_global Type *t_i8 = &basic_types[Basic_i8];
  249. gb_global Type *t_u8 = &basic_types[Basic_u8];
  250. gb_global Type *t_i16 = &basic_types[Basic_i16];
  251. gb_global Type *t_u16 = &basic_types[Basic_u16];
  252. gb_global Type *t_i32 = &basic_types[Basic_i32];
  253. gb_global Type *t_u32 = &basic_types[Basic_u32];
  254. gb_global Type *t_i64 = &basic_types[Basic_i64];
  255. gb_global Type *t_u64 = &basic_types[Basic_u64];
  256. gb_global Type *t_i128 = &basic_types[Basic_i128];
  257. gb_global Type *t_u128 = &basic_types[Basic_u128];
  258. gb_global Type *t_rune = &basic_types[Basic_rune];
  259. // gb_global Type *t_f16 = &basic_types[Basic_f16];
  260. gb_global Type *t_f32 = &basic_types[Basic_f32];
  261. gb_global Type *t_f64 = &basic_types[Basic_f64];
  262. // gb_global Type *t_complex32 = &basic_types[Basic_complex32];
  263. gb_global Type *t_complex64 = &basic_types[Basic_complex64];
  264. gb_global Type *t_complex128 = &basic_types[Basic_complex128];
  265. gb_global Type *t_int = &basic_types[Basic_int];
  266. gb_global Type *t_uint = &basic_types[Basic_uint];
  267. gb_global Type *t_uintptr = &basic_types[Basic_uintptr];
  268. gb_global Type *t_rawptr = &basic_types[Basic_rawptr];
  269. gb_global Type *t_string = &basic_types[Basic_string];
  270. gb_global Type *t_any = &basic_types[Basic_any];
  271. gb_global Type *t_untyped_bool = &basic_types[Basic_UntypedBool];
  272. gb_global Type *t_untyped_integer = &basic_types[Basic_UntypedInteger];
  273. gb_global Type *t_untyped_float = &basic_types[Basic_UntypedFloat];
  274. gb_global Type *t_untyped_complex = &basic_types[Basic_UntypedComplex];
  275. gb_global Type *t_untyped_string = &basic_types[Basic_UntypedString];
  276. gb_global Type *t_untyped_rune = &basic_types[Basic_UntypedRune];
  277. gb_global Type *t_untyped_nil = &basic_types[Basic_UntypedNil];
  278. gb_global Type *t_untyped_undef = &basic_types[Basic_UntypedUndef];
  279. gb_global Type *t_u8_ptr = nullptr;
  280. gb_global Type *t_int_ptr = nullptr;
  281. gb_global Type *t_i64_ptr = nullptr;
  282. gb_global Type *t_i128_ptr = nullptr;
  283. gb_global Type *t_f64_ptr = nullptr;
  284. gb_global Type *t_u8_slice = nullptr;
  285. gb_global Type *t_string_slice = nullptr;
  286. // Type generated for the "preload" file
  287. gb_global Type *t_type_info = nullptr;
  288. gb_global Type *t_type_info_enum_value = nullptr;
  289. gb_global Type *t_type_info_ptr = nullptr;
  290. gb_global Type *t_type_info_enum_value_ptr = nullptr;
  291. gb_global Type *t_type_info_named = nullptr;
  292. gb_global Type *t_type_info_integer = nullptr;
  293. gb_global Type *t_type_info_rune = nullptr;
  294. gb_global Type *t_type_info_float = nullptr;
  295. gb_global Type *t_type_info_complex = nullptr;
  296. gb_global Type *t_type_info_any = nullptr;
  297. gb_global Type *t_type_info_string = nullptr;
  298. gb_global Type *t_type_info_boolean = nullptr;
  299. gb_global Type *t_type_info_pointer = nullptr;
  300. gb_global Type *t_type_info_procedure = nullptr;
  301. gb_global Type *t_type_info_array = nullptr;
  302. gb_global Type *t_type_info_dynamic_array = nullptr;
  303. gb_global Type *t_type_info_slice = nullptr;
  304. gb_global Type *t_type_info_tuple = nullptr;
  305. gb_global Type *t_type_info_struct = nullptr;
  306. gb_global Type *t_type_info_union = nullptr;
  307. gb_global Type *t_type_info_enum = nullptr;
  308. gb_global Type *t_type_info_map = nullptr;
  309. gb_global Type *t_type_info_bit_field = nullptr;
  310. gb_global Type *t_type_info_named_ptr = nullptr;
  311. gb_global Type *t_type_info_integer_ptr = nullptr;
  312. gb_global Type *t_type_info_rune_ptr = nullptr;
  313. gb_global Type *t_type_info_float_ptr = nullptr;
  314. gb_global Type *t_type_info_complex_ptr = nullptr;
  315. gb_global Type *t_type_info_quaternion_ptr = nullptr;
  316. gb_global Type *t_type_info_any_ptr = nullptr;
  317. gb_global Type *t_type_info_string_ptr = nullptr;
  318. gb_global Type *t_type_info_boolean_ptr = nullptr;
  319. gb_global Type *t_type_info_pointer_ptr = nullptr;
  320. gb_global Type *t_type_info_procedure_ptr = nullptr;
  321. gb_global Type *t_type_info_array_ptr = nullptr;
  322. gb_global Type *t_type_info_dynamic_array_ptr = nullptr;
  323. gb_global Type *t_type_info_slice_ptr = nullptr;
  324. gb_global Type *t_type_info_tuple_ptr = nullptr;
  325. gb_global Type *t_type_info_struct_ptr = nullptr;
  326. gb_global Type *t_type_info_union_ptr = nullptr;
  327. gb_global Type *t_type_info_enum_ptr = nullptr;
  328. gb_global Type *t_type_info_map_ptr = nullptr;
  329. gb_global Type *t_type_info_bit_field_ptr = nullptr;
  330. gb_global Type *t_allocator = nullptr;
  331. gb_global Type *t_allocator_ptr = nullptr;
  332. gb_global Type *t_context = nullptr;
  333. gb_global Type *t_context_ptr = nullptr;
  334. gb_global Type *t_source_code_location = nullptr;
  335. gb_global Type *t_source_code_location_ptr = nullptr;
  336. gb_global Type *t_map_key = nullptr;
  337. gb_global Type *t_map_header = nullptr;
  338. i64 type_size_of (gbAllocator allocator, Type *t);
  339. i64 type_align_of (gbAllocator allocator, Type *t);
  340. i64 type_offset_of (gbAllocator allocator, Type *t, i32 index);
  341. gbString type_to_string (Type *type);
  342. void generate_map_internal_types(gbAllocator a, Type *type);
  343. Type *base_type(Type *t) {
  344. for (;;) {
  345. if (t == nullptr) {
  346. break;
  347. }
  348. if (t->kind != Type_Named) {
  349. break;
  350. }
  351. if (t == t->Named.base) {
  352. return t_invalid;
  353. }
  354. t = t->Named.base;
  355. }
  356. return t;
  357. }
  358. Type *base_enum_type(Type *t) {
  359. Type *bt = base_type(t);
  360. if (bt != nullptr &&
  361. bt->kind == Type_Enum) {
  362. return bt->Enum.base_type;
  363. }
  364. return t;
  365. }
  366. Type *core_type(Type *t) {
  367. for (;;) {
  368. if (t == nullptr) {
  369. break;
  370. }
  371. switch (t->kind) {
  372. case Type_Named:
  373. if (t == t->Named.base) {
  374. return t_invalid;
  375. }
  376. t = t->Named.base;
  377. continue;
  378. case Type_Enum:
  379. t = t->Enum.base_type;
  380. continue;
  381. }
  382. break;
  383. }
  384. return t;
  385. }
  386. void set_base_type(Type *t, Type *base) {
  387. if (t && t->kind == Type_Named) {
  388. t->Named.base = base;
  389. }
  390. }
  391. Type *alloc_type(gbAllocator a, TypeKind kind) {
  392. Type *t = gb_alloc_item(a, Type);
  393. gb_zero_item(t);
  394. t->kind = kind;
  395. return t;
  396. }
  397. Type *make_type_basic(gbAllocator a, BasicType basic) {
  398. Type *t = alloc_type(a, Type_Basic);
  399. t->Basic = basic;
  400. return t;
  401. }
  402. Type *make_type_generic(gbAllocator a, Scope *scope, i64 id, String name, Type *specialized) {
  403. Type *t = alloc_type(a, Type_Generic);
  404. t->Generic.id = id;
  405. t->Generic.name = name;
  406. t->Generic.specialized = specialized;
  407. t->Generic.scope = scope;
  408. return t;
  409. }
  410. Type *make_type_pointer(gbAllocator a, Type *elem) {
  411. Type *t = alloc_type(a, Type_Pointer);
  412. t->Pointer.elem = elem;
  413. return t;
  414. }
  415. Type *make_type_array(gbAllocator a, Type *elem, i64 count, Type *generic_type = nullptr) {
  416. Type *t = alloc_type(a, Type_Array);
  417. t->Array.elem = elem;
  418. t->Array.count = count;
  419. t->Array.generic_type = generic_type;
  420. return t;
  421. }
  422. Type *make_type_dynamic_array(gbAllocator a, Type *elem) {
  423. Type *t = alloc_type(a, Type_DynamicArray);
  424. t->DynamicArray.elem = elem;
  425. return t;
  426. }
  427. Type *make_type_slice(gbAllocator a, Type *elem) {
  428. Type *t = alloc_type(a, Type_Slice);
  429. t->Array.elem = elem;
  430. return t;
  431. }
  432. Type *make_type_struct(gbAllocator a) {
  433. Type *t = alloc_type(a, Type_Struct);
  434. return t;
  435. }
  436. Type *make_type_union(gbAllocator a) {
  437. Type *t = alloc_type(a, Type_Union);
  438. return t;
  439. }
  440. Type *make_type_enum(gbAllocator a) {
  441. Type *t = alloc_type(a, Type_Enum);
  442. return t;
  443. }
  444. Type *make_type_named(gbAllocator a, String name, Type *base, Entity *type_name) {
  445. Type *t = alloc_type(a, Type_Named);
  446. t->Named.name = name;
  447. t->Named.base = base;
  448. t->Named.type_name = type_name;
  449. return t;
  450. }
  451. Type *make_type_tuple(gbAllocator a) {
  452. Type *t = alloc_type(a, Type_Tuple);
  453. return t;
  454. }
  455. Type *make_type_proc(gbAllocator a, Scope *scope, Type *params, isize param_count, Type *results, isize result_count, bool variadic, ProcCallingConvention calling_convention) {
  456. Type *t = alloc_type(a, Type_Proc);
  457. if (variadic) {
  458. if (param_count == 0) {
  459. GB_PANIC("variadic procedure must have at least one parameter");
  460. }
  461. GB_ASSERT(params != nullptr && params->kind == Type_Tuple);
  462. Entity *e = params->Tuple.variables[param_count-1];
  463. if (base_type(e->type)->kind != Type_Slice) {
  464. // NOTE(bill): For custom calling convention
  465. GB_PANIC("variadic parameter must be of type slice");
  466. }
  467. }
  468. t->Proc.scope = scope;
  469. t->Proc.params = params;
  470. t->Proc.param_count = cast(i32)param_count;
  471. t->Proc.results = results;
  472. t->Proc.result_count = cast(i32)result_count;
  473. t->Proc.variadic = variadic;
  474. t->Proc.calling_convention = calling_convention;
  475. return t;
  476. }
  477. bool is_type_valid_for_keys(Type *t);
  478. Type *make_type_map(gbAllocator a, i64 count, Type *key, Type *value) {
  479. Type *t = alloc_type(a, Type_Map);
  480. if (key != nullptr) {
  481. GB_ASSERT(is_type_valid_for_keys(key));
  482. }
  483. t->Map.key = key;
  484. t->Map.value = value;
  485. return t;
  486. }
  487. Type *make_type_bit_field_value(gbAllocator a, u32 bits) {
  488. Type *t = alloc_type(a, Type_BitFieldValue);
  489. t->BitFieldValue.bits = bits;
  490. return t;
  491. }
  492. Type *make_type_bit_field(gbAllocator a) {
  493. Type *t = alloc_type(a, Type_BitField);
  494. return t;
  495. }
  496. ////////////////////////////////////////////////////////////////
  497. Type *type_deref(Type *t) {
  498. if (t != nullptr) {
  499. Type *bt = base_type(t);
  500. if (bt == nullptr)
  501. return nullptr;
  502. if (bt != nullptr && bt->kind == Type_Pointer)
  503. return bt->Pointer.elem;
  504. }
  505. return t;
  506. }
  507. bool is_type_named(Type *t) {
  508. if (t->kind == Type_Basic) {
  509. return true;
  510. }
  511. return t->kind == Type_Named;
  512. }
  513. bool is_type_named_alias(Type *t) {
  514. if (!is_type_named(t)) {
  515. return false;
  516. }
  517. Entity *e = t->Named.type_name;
  518. if (e == nullptr) {
  519. return false;
  520. }
  521. if (e->kind != Entity_TypeName) {
  522. return false;
  523. }
  524. return e->TypeName.is_type_alias;
  525. }
  526. bool is_type_boolean(Type *t) {
  527. t = core_type(t);
  528. if (t->kind == Type_Basic) {
  529. return (t->Basic.flags & BasicFlag_Boolean) != 0;
  530. }
  531. return false;
  532. }
  533. bool is_type_integer(Type *t) {
  534. t = core_type(t);
  535. if (t->kind == Type_Basic) {
  536. return (t->Basic.flags & BasicFlag_Integer) != 0;
  537. }
  538. return false;
  539. }
  540. bool is_type_unsigned(Type *t) {
  541. t = core_type(t);
  542. if (t->kind == Type_Basic) {
  543. return (t->Basic.flags & BasicFlag_Unsigned) != 0;
  544. }
  545. return false;
  546. }
  547. bool is_type_rune(Type *t) {
  548. t = core_type(t);
  549. if (t->kind == Type_Basic) {
  550. return (t->Basic.flags & BasicFlag_Rune) != 0;
  551. }
  552. return false;
  553. }
  554. bool is_type_numeric(Type *t) {
  555. t = core_type(t);
  556. if (t->kind == Type_Basic) {
  557. return (t->Basic.flags & BasicFlag_Numeric) != 0;
  558. }
  559. // TODO(bill): Should this be here?
  560. if (t->kind == Type_Array) {
  561. return is_type_numeric(t->Array.elem);
  562. }
  563. return false;
  564. }
  565. bool is_type_string(Type *t) {
  566. t = base_type(t);
  567. if (t->kind == Type_Basic) {
  568. return (t->Basic.flags & BasicFlag_String) != 0;
  569. }
  570. return false;
  571. }
  572. bool is_type_typed(Type *t) {
  573. t = base_type(t);
  574. if (t == nullptr) {
  575. return false;
  576. }
  577. if (t->kind == Type_Basic) {
  578. return (t->Basic.flags & BasicFlag_Untyped) == 0;
  579. }
  580. return true;
  581. }
  582. bool is_type_untyped(Type *t) {
  583. t = base_type(t);
  584. if (t->kind == Type_Basic) {
  585. return (t->Basic.flags & BasicFlag_Untyped) != 0;
  586. }
  587. return false;
  588. }
  589. bool is_type_ordered(Type *t) {
  590. t = core_type(t);
  591. switch (t->kind) {
  592. case Type_Basic:
  593. return (t->Basic.flags & BasicFlag_Ordered) != 0;
  594. case Type_Pointer:
  595. return true;
  596. }
  597. return false;
  598. }
  599. bool is_type_constant_type(Type *t) {
  600. t = core_type(t);
  601. if (t->kind == Type_Basic) {
  602. return (t->Basic.flags & BasicFlag_ConstantType) != 0;
  603. }
  604. return false;
  605. }
  606. bool is_type_float(Type *t) {
  607. t = core_type(t);
  608. if (t->kind == Type_Basic) {
  609. return (t->Basic.flags & BasicFlag_Float) != 0;
  610. }
  611. return false;
  612. }
  613. bool is_type_complex(Type *t) {
  614. t = core_type(t);
  615. if (t->kind == Type_Basic) {
  616. return (t->Basic.flags & BasicFlag_Complex) != 0;
  617. }
  618. return false;
  619. }
  620. bool is_type_f32(Type *t) {
  621. t = core_type(t);
  622. if (t->kind == Type_Basic) {
  623. return t->Basic.kind == Basic_f32;
  624. }
  625. return false;
  626. }
  627. bool is_type_f64(Type *t) {
  628. t = core_type(t);
  629. if (t->kind == Type_Basic) {
  630. return t->Basic.kind == Basic_f64;
  631. }
  632. return false;
  633. }
  634. bool is_type_pointer(Type *t) {
  635. t = base_type(t);
  636. if (t->kind == Type_Basic) {
  637. return (t->Basic.flags & BasicFlag_Pointer) != 0;
  638. }
  639. return t->kind == Type_Pointer;
  640. }
  641. bool is_type_tuple(Type *t) {
  642. t = base_type(t);
  643. return t->kind == Type_Tuple;
  644. }
  645. bool is_type_uintptr(Type *t) {
  646. if (t->kind == Type_Basic) {
  647. return (t->Basic.kind == Basic_uintptr);
  648. }
  649. return false;
  650. }
  651. bool is_type_i128_or_u128(Type *t) {
  652. if (t->kind == Type_Basic) {
  653. return (t->Basic.kind == Basic_i128) || (t->Basic.kind == Basic_u128);
  654. }
  655. return false;
  656. }
  657. bool is_type_rawptr(Type *t) {
  658. if (t->kind == Type_Basic) {
  659. return t->Basic.kind == Basic_rawptr;
  660. }
  661. return false;
  662. }
  663. bool is_type_u8(Type *t) {
  664. if (t->kind == Type_Basic) {
  665. return t->Basic.kind == Basic_u8;
  666. }
  667. return false;
  668. }
  669. bool is_type_array(Type *t) {
  670. t = base_type(t);
  671. return t->kind == Type_Array;
  672. }
  673. bool is_type_dynamic_array(Type *t) {
  674. t = base_type(t);
  675. return t->kind == Type_DynamicArray;
  676. }
  677. bool is_type_slice(Type *t) {
  678. t = base_type(t);
  679. return t->kind == Type_Slice;
  680. }
  681. bool is_type_u8_slice(Type *t) {
  682. t = base_type(t);
  683. if (t->kind == Type_Slice) {
  684. return is_type_u8(t->Slice.elem);
  685. }
  686. return false;
  687. }
  688. bool is_type_proc(Type *t) {
  689. t = base_type(t);
  690. return t->kind == Type_Proc;
  691. }
  692. bool is_type_poly_proc(Type *t) {
  693. t = base_type(t);
  694. return t->kind == Type_Proc && t->Proc.is_polymorphic;
  695. }
  696. Type *base_array_type(Type *t) {
  697. if (is_type_array(t)) {
  698. t = base_type(t);
  699. return t->Array.elem;
  700. }
  701. return t;
  702. }
  703. bool is_type_generic(Type *t) {
  704. t = base_type(t);
  705. return t->kind == Type_Generic;
  706. }
  707. Type *core_array_type(Type *t) {
  708. for (;;) {
  709. Type *prev = t;
  710. t = base_array_type(t);
  711. if (prev == t) break;
  712. }
  713. return t;
  714. }
  715. Type *base_complex_elem_type(Type *t) {
  716. t = core_type(t);
  717. if (is_type_complex(t)) {
  718. switch (t->Basic.kind) {
  719. // case Basic_complex32: return t_f16;
  720. case Basic_complex64: return t_f32;
  721. case Basic_complex128: return t_f64;
  722. case Basic_UntypedComplex: return t_untyped_float;
  723. }
  724. }
  725. GB_PANIC("Invalid complex type");
  726. return t_invalid;
  727. }
  728. bool is_type_struct(Type *t) {
  729. t = base_type(t);
  730. return (t->kind == Type_Struct && !t->Struct.is_raw_union);
  731. }
  732. bool is_type_union(Type *t) {
  733. t = base_type(t);
  734. return t->kind == Type_Union;
  735. }
  736. bool is_type_raw_union(Type *t) {
  737. t = base_type(t);
  738. return (t->kind == Type_Struct && t->Struct.is_raw_union);
  739. }
  740. bool is_type_enum(Type *t) {
  741. t = base_type(t);
  742. return (t->kind == Type_Enum);
  743. }
  744. bool is_type_bit_field(Type *t) {
  745. t = base_type(t);
  746. return (t->kind == Type_BitField);
  747. }
  748. bool is_type_bit_field_value(Type *t) {
  749. t = base_type(t);
  750. return (t->kind == Type_BitFieldValue);
  751. }
  752. bool is_type_map(Type *t) {
  753. t = base_type(t);
  754. return t->kind == Type_Map;
  755. }
  756. bool is_type_any(Type *t) {
  757. t = base_type(t);
  758. return (t->kind == Type_Basic && t->Basic.kind == Basic_any);
  759. }
  760. bool is_type_untyped_nil(Type *t) {
  761. t = base_type(t);
  762. return (t->kind == Type_Basic && t->Basic.kind == Basic_UntypedNil);
  763. }
  764. bool is_type_untyped_undef(Type *t) {
  765. t = base_type(t);
  766. return (t->kind == Type_Basic && t->Basic.kind == Basic_UntypedUndef);
  767. }
  768. bool is_type_empty_union(Type *t) {
  769. t = base_type(t);
  770. return t->kind == Type_Union && t->Union.variants.count == 0;
  771. }
  772. bool is_type_empty_struct(Type *t) {
  773. t = base_type(t);
  774. return t->kind == Type_Struct && !t->Struct.is_raw_union && t->Struct.fields.count == 0;
  775. }
  776. bool is_type_valid_for_keys(Type *t) {
  777. t = core_type(t);
  778. if (t->kind == Type_Generic) {
  779. return true;
  780. }
  781. if (is_type_untyped(t)) {
  782. return false;
  783. }
  784. if (is_type_integer(t)) {
  785. return true;
  786. }
  787. if (is_type_float(t)) {
  788. return true;
  789. }
  790. if (is_type_string(t)) {
  791. return true;
  792. }
  793. if (is_type_pointer(t)) {
  794. return true;
  795. }
  796. return false;
  797. }
  798. bool is_type_indexable(Type *t) {
  799. Type *bt = base_type(t);
  800. switch (bt->kind) {
  801. case Type_Basic:
  802. return is_type_string(bt);
  803. case Type_Array:
  804. case Type_Slice:
  805. case Type_DynamicArray:
  806. case Type_Map:
  807. return true;
  808. }
  809. return false;
  810. }
  811. bool is_type_polymorphic_struct(Type *t) {
  812. t = base_type(t);
  813. if (t->kind == Type_Struct) {
  814. return t->Struct.is_polymorphic;
  815. }
  816. return false;
  817. }
  818. bool is_type_polymorphic_struct_specialized(Type *t) {
  819. t = base_type(t);
  820. if (t->kind == Type_Struct) {
  821. return t->Struct.is_polymorphic && t->Struct.is_poly_specialized;
  822. }
  823. return false;
  824. }
  825. bool is_type_polymorphic(Type *t) {
  826. switch (t->kind) {
  827. case Type_Generic:
  828. return true;
  829. case Type_Named:
  830. return is_type_polymorphic_struct(t->Named.base);
  831. case Type_Pointer:
  832. return is_type_polymorphic(t->Pointer.elem);
  833. case Type_Array:
  834. if (t->Array.generic_type != nullptr) {
  835. return true;
  836. }
  837. return is_type_polymorphic(t->Array.elem);
  838. case Type_DynamicArray:
  839. return is_type_polymorphic(t->DynamicArray.elem);
  840. case Type_Slice:
  841. return is_type_polymorphic(t->Slice.elem);
  842. case Type_Tuple:
  843. for_array(i, t->Tuple.variables) {
  844. if (is_type_polymorphic(t->Tuple.variables[i]->type)) {
  845. return true;
  846. }
  847. }
  848. break;
  849. case Type_Proc:
  850. if (t->Proc.is_polymorphic) {
  851. return true;
  852. }
  853. #if 1
  854. if (t->Proc.param_count > 0 &&
  855. is_type_polymorphic(t->Proc.params)) {
  856. return true;
  857. }
  858. if (t->Proc.result_count > 0 &&
  859. is_type_polymorphic(t->Proc.results)) {
  860. return true;
  861. }
  862. #endif
  863. break;
  864. case Type_Enum:
  865. if (t->kind == Type_Enum) {
  866. if (t->Enum.base_type != nullptr) {
  867. return is_type_polymorphic(t->Enum.base_type);
  868. }
  869. return false;
  870. }
  871. break;
  872. case Type_Union:
  873. for_array(i, t->Union.variants) {
  874. if (is_type_polymorphic(t->Union.variants[i])) {
  875. return true;
  876. }
  877. }
  878. break;
  879. case Type_Struct:
  880. if (t->Struct.is_polymorphic) {
  881. return true;
  882. }
  883. for_array(i, t->Struct.fields) {
  884. if (is_type_polymorphic(t->Struct.fields[i]->type)) {
  885. return true;
  886. }
  887. }
  888. break;
  889. case Type_Map:
  890. if (is_type_polymorphic(t->Map.key)) {
  891. return true;
  892. }
  893. if (is_type_polymorphic(t->Map.value)) {
  894. return true;
  895. }
  896. break;
  897. }
  898. return false;
  899. }
  900. bool type_has_undef(Type *t) {
  901. t = base_type(t);
  902. return true;
  903. }
  904. bool type_has_nil(Type *t) {
  905. t = base_type(t);
  906. switch (t->kind) {
  907. case Type_Basic: {
  908. switch (t->Basic.kind) {
  909. case Basic_rawptr:
  910. case Basic_any:
  911. return true;
  912. }
  913. return false;
  914. } break;
  915. case Type_Slice:
  916. case Type_Proc:
  917. case Type_Pointer:
  918. case Type_DynamicArray:
  919. case Type_Map:
  920. return true;
  921. case Type_Union:
  922. return true;
  923. case Type_Struct:
  924. return false;
  925. }
  926. return false;
  927. }
  928. bool elem_type_can_be_constant(Type *t) {
  929. if (is_type_any(t) || is_type_union(t)) {
  930. return false;
  931. }
  932. return true;
  933. }
  934. bool is_type_comparable(Type *t) {
  935. t = base_type(t);
  936. switch (t->kind) {
  937. case Type_Basic:
  938. switch (t->Basic.kind) {
  939. case Basic_UntypedNil:
  940. case Basic_any:
  941. return false;
  942. case Basic_rune:
  943. return true;
  944. }
  945. return true;
  946. case Type_Pointer:
  947. return true;
  948. case Type_Enum:
  949. return is_type_comparable(core_type(t));
  950. case Type_Array:
  951. return is_type_comparable(t->Array.elem);
  952. case Type_Proc:
  953. return true;
  954. }
  955. return false;
  956. }
  957. bool are_types_identical(Type *x, Type *y) {
  958. if (x == y) {
  959. return true;
  960. }
  961. if ((x == nullptr && y != nullptr) ||
  962. (x != nullptr && y == nullptr)) {
  963. return false;
  964. }
  965. switch (x->kind) {
  966. case Type_Generic:
  967. if (y->kind == Type_Generic) {
  968. return are_types_identical(x->Generic.specialized, y->Generic.specialized);
  969. }
  970. break;
  971. case Type_Basic:
  972. if (y->kind == Type_Basic) {
  973. return x->Basic.kind == y->Basic.kind;
  974. }
  975. break;
  976. case Type_Array:
  977. if (y->kind == Type_Array) {
  978. return (x->Array.count == y->Array.count) && are_types_identical(x->Array.elem, y->Array.elem);
  979. }
  980. break;
  981. case Type_DynamicArray:
  982. if (y->kind == Type_DynamicArray) {
  983. return are_types_identical(x->DynamicArray.elem, y->DynamicArray.elem);
  984. }
  985. break;
  986. case Type_Slice:
  987. if (y->kind == Type_Slice) {
  988. return are_types_identical(x->Slice.elem, y->Slice.elem);
  989. }
  990. break;
  991. case Type_BitField:
  992. if (y->kind == Type_BitField) {
  993. if (x->BitField.field_count == y->BitField.field_count &&
  994. x->BitField.custom_align == y->BitField.custom_align) {
  995. for (i32 i = 0; i < x->BitField.field_count; i++) {
  996. if (x->BitField.offsets[i] != y->BitField.offsets[i]) {
  997. return false;
  998. }
  999. if (x->BitField.sizes[i] != y->BitField.sizes[i]) {
  1000. return false;
  1001. }
  1002. }
  1003. return true;
  1004. }
  1005. }
  1006. break;
  1007. case Type_Enum:
  1008. return x == y; // NOTE(bill): All enums are unique
  1009. case Type_Union:
  1010. if (y->kind == Type_Union) {
  1011. if (x->Union.variants.count == y->Union.variants.count &&
  1012. x->Union.custom_align == y->Union.custom_align) {
  1013. // NOTE(bill): zeroth variant is nullptr
  1014. for_array(i, x->Union.variants) {
  1015. if (!are_types_identical(x->Union.variants[i], y->Union.variants[i])) {
  1016. return false;
  1017. }
  1018. }
  1019. return true;
  1020. }
  1021. }
  1022. break;
  1023. case Type_Struct:
  1024. if (y->kind == Type_Struct) {
  1025. if (x->Struct.is_raw_union == y->Struct.is_raw_union &&
  1026. x->Struct.fields.count == y->Struct.fields.count &&
  1027. x->Struct.is_packed == y->Struct.is_packed &&
  1028. x->Struct.custom_align == y->Struct.custom_align) {
  1029. // TODO(bill); Fix the custom alignment rule
  1030. for_array(i, x->Struct.fields) {
  1031. Entity *xf = x->Struct.fields[i];
  1032. Entity *yf = y->Struct.fields[i];
  1033. if (!are_types_identical(xf->type, yf->type)) {
  1034. return false;
  1035. }
  1036. if (xf->token.string != yf->token.string) {
  1037. return false;
  1038. }
  1039. bool xf_is_using = (xf->flags&EntityFlag_Using) != 0;
  1040. bool yf_is_using = (yf->flags&EntityFlag_Using) != 0;
  1041. if (xf_is_using ^ yf_is_using) {
  1042. return false;
  1043. }
  1044. }
  1045. return true;
  1046. }
  1047. }
  1048. break;
  1049. case Type_Pointer:
  1050. if (y->kind == Type_Pointer) {
  1051. return are_types_identical(x->Pointer.elem, y->Pointer.elem);
  1052. }
  1053. break;
  1054. case Type_Named:
  1055. if (y->kind == Type_Named) {
  1056. return x->Named.type_name == y->Named.type_name;
  1057. }
  1058. break;
  1059. case Type_Tuple:
  1060. if (y->kind == Type_Tuple) {
  1061. if (x->Tuple.variables.count == y->Tuple.variables.count) {
  1062. for_array(i, x->Tuple.variables) {
  1063. Entity *xe = x->Tuple.variables[i];
  1064. Entity *ye = y->Tuple.variables[i];
  1065. if (xe->kind != ye->kind || !are_types_identical(xe->type, ye->type)) {
  1066. return false;
  1067. }
  1068. }
  1069. return true;
  1070. }
  1071. }
  1072. break;
  1073. case Type_Proc:
  1074. if (y->kind == Type_Proc) {
  1075. return x->Proc.calling_convention == y->Proc.calling_convention &&
  1076. x->Proc.c_vararg == y->Proc.c_vararg &&
  1077. x->Proc.variadic == y->Proc.variadic &&
  1078. are_types_identical(x->Proc.params, y->Proc.params) &&
  1079. are_types_identical(x->Proc.results, y->Proc.results);
  1080. }
  1081. break;
  1082. case Type_Map:
  1083. if (y->kind == Type_Map) {
  1084. return are_types_identical(x->Map.key, y->Map.key) &&
  1085. are_types_identical(x->Map.value, y->Map.value);
  1086. }
  1087. break;
  1088. }
  1089. return false;
  1090. }
  1091. Type *default_bit_field_value_type(Type *type) {
  1092. if (type == nullptr) {
  1093. return t_invalid;
  1094. }
  1095. Type *t = base_type(type);
  1096. if (t->kind == Type_BitFieldValue) {
  1097. i32 bits = t->BitFieldValue.bits;
  1098. i32 size = 8*next_pow2((bits+7)/8);
  1099. switch (size) {
  1100. case 8: return t_u8;
  1101. case 16: return t_u16;
  1102. case 32: return t_u32;
  1103. case 64: return t_u64;
  1104. case 128: return t_u128;
  1105. default: GB_PANIC("Too big of a bit size!"); break;
  1106. }
  1107. }
  1108. return type;
  1109. }
  1110. Type *default_type(Type *type) {
  1111. if (type == nullptr) {
  1112. return t_invalid;
  1113. }
  1114. if (type->kind == Type_Basic) {
  1115. switch (type->Basic.kind) {
  1116. case Basic_UntypedBool: return t_bool;
  1117. case Basic_UntypedInteger: return t_int;
  1118. case Basic_UntypedFloat: return t_f64;
  1119. case Basic_UntypedComplex: return t_complex128;
  1120. case Basic_UntypedString: return t_string;
  1121. case Basic_UntypedRune: return t_rune;
  1122. }
  1123. }
  1124. if (type->kind == Type_BitFieldValue) {
  1125. return default_bit_field_value_type(type);
  1126. }
  1127. return type;
  1128. }
  1129. // NOTE(bill): Valid Compile time execution #run type
  1130. bool is_type_cte_safe(Type *type) {
  1131. type = default_type(base_type(type));
  1132. switch (type->kind) {
  1133. case Type_Basic:
  1134. switch (type->Basic.kind) {
  1135. case Basic_rawptr:
  1136. case Basic_any:
  1137. return false;
  1138. }
  1139. return true;
  1140. case Type_Pointer:
  1141. return false;
  1142. case Type_Array:
  1143. return is_type_cte_safe(type->Array.elem);
  1144. case Type_DynamicArray:
  1145. return false;
  1146. case Type_Map:
  1147. return false;
  1148. case Type_Slice:
  1149. return false;
  1150. case Type_Struct: {
  1151. if (type->Struct.is_raw_union) {
  1152. return false;
  1153. }
  1154. for_array(i, type->Struct.fields) {
  1155. Entity *v = type->Struct.fields[i];
  1156. if (!is_type_cte_safe(v->type)) {
  1157. return false;
  1158. }
  1159. }
  1160. return true;
  1161. }
  1162. case Type_Tuple: {
  1163. for_array(i, type->Tuple.variables) {
  1164. Entity *v = type->Tuple.variables[i];
  1165. if (!is_type_cte_safe(v->type)) {
  1166. return false;
  1167. }
  1168. }
  1169. return true;
  1170. }
  1171. case Type_Proc:
  1172. // TODO(bill): How should I handle procedures in the CTE stage?
  1173. // return type->Proc.calling_convention == ProcCC_Odin;
  1174. return false;
  1175. }
  1176. return false;
  1177. }
  1178. i64 union_variant_index(Type *u, Type *v) {
  1179. u = base_type(u);
  1180. GB_ASSERT(u->kind == Type_Union);
  1181. for_array(i, u->Union.variants) {
  1182. Type *vt = u->Union.variants[i];
  1183. if (are_types_identical(v, vt)) {
  1184. return cast(i64)(i+1);
  1185. }
  1186. }
  1187. return 0;
  1188. }
  1189. i64 union_tag_size(gbAllocator a, Type *u) {
  1190. u = base_type(u);
  1191. GB_ASSERT(u->kind == Type_Union);
  1192. if (u->Union.tag_size > 0) {
  1193. return u->Union.tag_size;
  1194. }
  1195. u64 n = cast(u64)u->Union.variants.count;
  1196. i64 bytes = next_pow2(cast(i64)(floor_log2(n)/8 + 1));
  1197. i64 tag_size = gb_max(bytes, 1);
  1198. u->Union.tag_size = tag_size;
  1199. return tag_size;
  1200. }
  1201. Type *union_tag_type(gbAllocator a, Type *u) {
  1202. i64 s = union_tag_size(a, u);
  1203. switch (s) {
  1204. case 1: return t_u8;
  1205. case 2: return t_u16;
  1206. case 4: return t_u32;
  1207. case 8: return t_u64;
  1208. case 16: return t_u128;
  1209. }
  1210. GB_PANIC("Invalid union_tag_size");
  1211. return t_uint;
  1212. }
  1213. enum ProcTypeOverloadKind {
  1214. ProcOverload_Identical, // The types are identical
  1215. ProcOverload_CallingConvention,
  1216. ProcOverload_ParamCount,
  1217. ProcOverload_ParamVariadic,
  1218. ProcOverload_ParamTypes,
  1219. ProcOverload_ResultCount,
  1220. ProcOverload_ResultTypes,
  1221. ProcOverload_Polymorphic,
  1222. ProcOverload_NotProcedure,
  1223. };
  1224. ProcTypeOverloadKind are_proc_types_overload_safe(Type *x, Type *y) {
  1225. if (x == nullptr && y == nullptr) return ProcOverload_NotProcedure;
  1226. if (x == nullptr && y != nullptr) return ProcOverload_NotProcedure;
  1227. if (x != nullptr && y == nullptr) return ProcOverload_NotProcedure;
  1228. if (!is_type_proc(x)) return ProcOverload_NotProcedure;
  1229. if (!is_type_proc(y)) return ProcOverload_NotProcedure;
  1230. TypeProc px = base_type(x)->Proc;
  1231. TypeProc py = base_type(y)->Proc;
  1232. // if (px.calling_convention != py.calling_convention) {
  1233. // return ProcOverload_CallingConvention;
  1234. // }
  1235. // if (px.is_polymorphic != py.is_polymorphic) {
  1236. // return ProcOverload_Polymorphic;
  1237. // }
  1238. if (px.param_count != py.param_count) {
  1239. return ProcOverload_ParamCount;
  1240. }
  1241. for (isize i = 0; i < px.param_count; i++) {
  1242. Entity *ex = px.params->Tuple.variables[i];
  1243. Entity *ey = py.params->Tuple.variables[i];
  1244. if (!are_types_identical(ex->type, ey->type)) {
  1245. return ProcOverload_ParamTypes;
  1246. }
  1247. }
  1248. // IMPORTANT TODO(bill): Determine the rules for overloading procedures with variadic parameters
  1249. if (px.variadic != py.variadic) {
  1250. return ProcOverload_ParamVariadic;
  1251. }
  1252. if (px.is_polymorphic != py.is_polymorphic) {
  1253. return ProcOverload_Polymorphic;
  1254. }
  1255. if (px.result_count != py.result_count) {
  1256. return ProcOverload_ResultCount;
  1257. }
  1258. for (isize i = 0; i < px.result_count; i++) {
  1259. Entity *ex = px.results->Tuple.variables[i];
  1260. Entity *ey = py.results->Tuple.variables[i];
  1261. if (!are_types_identical(ex->type, ey->type)) {
  1262. return ProcOverload_ResultTypes;
  1263. }
  1264. }
  1265. if (px.params != nullptr && py.params != nullptr) {
  1266. Entity *ex = px.params->Tuple.variables[0];
  1267. Entity *ey = py.params->Tuple.variables[0];
  1268. bool ok = are_types_identical(ex->type, ey->type);
  1269. if (ok) {
  1270. }
  1271. }
  1272. return ProcOverload_Identical;
  1273. }
  1274. Selection lookup_field_with_selection(gbAllocator a, Type *type_, String field_name, bool is_type, Selection sel);
  1275. Selection lookup_field(gbAllocator a, Type *type_, String field_name, bool is_type) {
  1276. return lookup_field_with_selection(a, type_, field_name, is_type, empty_selection);
  1277. }
  1278. Selection lookup_field_from_index(gbAllocator a, Type *type, i64 index) {
  1279. GB_ASSERT(is_type_struct(type) || is_type_union(type) || is_type_tuple(type));
  1280. type = base_type(type);
  1281. isize max_count = 0;
  1282. switch (type->kind) {
  1283. case Type_Struct: max_count = type->Struct.fields.count; break;
  1284. case Type_Tuple: max_count = type->Tuple.variables.count; break;
  1285. case Type_BitField: max_count = type->BitField.field_count; break;
  1286. }
  1287. if (index >= max_count) {
  1288. return empty_selection;
  1289. }
  1290. switch (type->kind) {
  1291. case Type_Struct:
  1292. for (isize i = 0; i < max_count; i++) {
  1293. Entity *f = type->Struct.fields[i];
  1294. if (f->kind == Entity_Variable) {
  1295. if (f->Variable.field_src_index == index) {
  1296. Array<i32> sel_array = {0};
  1297. array_init_count(&sel_array, a, 1);
  1298. sel_array[0] = cast(i32)i;
  1299. return make_selection(f, sel_array, false);
  1300. }
  1301. }
  1302. }
  1303. break;
  1304. case Type_Tuple:
  1305. for (isize i = 0; i < max_count; i++) {
  1306. Entity *f = type->Tuple.variables[i];
  1307. if (i == index) {
  1308. Array<i32> sel_array = {0};
  1309. array_init_count(&sel_array, a, 1);
  1310. sel_array[0] = cast(i32)i;
  1311. return make_selection(f, sel_array, false);
  1312. }
  1313. }
  1314. break;
  1315. case Type_BitField: {
  1316. Array<i32> sel_array = {0};
  1317. array_init_count(&sel_array, a, 1);
  1318. sel_array[0] = cast(i32)index;
  1319. return make_selection(type->BitField.fields[index], sel_array, false);
  1320. } break;
  1321. }
  1322. GB_PANIC("Illegal index");
  1323. return empty_selection;
  1324. }
  1325. gb_global Entity *entity__any_data = nullptr;
  1326. gb_global Entity *entity__any_type_info = nullptr;
  1327. Entity *current_scope_lookup_entity(Scope *s, String name);
  1328. Selection lookup_field_with_selection(gbAllocator a, Type *type_, String field_name, bool is_type, Selection sel) {
  1329. GB_ASSERT(type_ != nullptr);
  1330. if (is_blank_ident(field_name)) {
  1331. return empty_selection;
  1332. }
  1333. Type *type = type_deref(type_);
  1334. bool is_ptr = type != type_;
  1335. sel.indirect = sel.indirect || is_ptr;
  1336. type = base_type(type);
  1337. if (type->kind == Type_Basic) {
  1338. switch (type->Basic.kind) {
  1339. case Basic_any: {
  1340. #if 1
  1341. // IMPORTANT TODO(bill): Should these members be available to should I only allow them with
  1342. // `Raw_Any` type?
  1343. String data_str = str_lit("data");
  1344. String type_info_str = str_lit("type_info");
  1345. if (entity__any_data == nullptr) {
  1346. entity__any_data = make_entity_field(a, nullptr, make_token_ident(data_str), t_rawptr, false, 0);
  1347. }
  1348. if (entity__any_type_info == nullptr) {
  1349. entity__any_type_info = make_entity_field(a, nullptr, make_token_ident(type_info_str), t_type_info_ptr, false, 1);
  1350. }
  1351. if (field_name == data_str) {
  1352. selection_add_index(&sel, 0);
  1353. sel.entity = entity__any_data;;
  1354. return sel;
  1355. } else if (field_name == type_info_str) {
  1356. selection_add_index(&sel, 1);
  1357. sel.entity = entity__any_type_info;
  1358. return sel;
  1359. }
  1360. #endif
  1361. } break;
  1362. }
  1363. return sel;
  1364. } else if (type->kind == Type_Array) {
  1365. if (type->Array.count <= 4) {
  1366. // HACK(bill): Memory leak
  1367. switch (type->Array.count) {
  1368. #define _ARRAY_FIELD_CASE(_length, _name) \
  1369. case (_length): \
  1370. if (field_name == _name) { \
  1371. selection_add_index(&sel, (_length)-1); \
  1372. sel.entity = make_entity_array_elem(a, nullptr, make_token_ident(str_lit(_name)), type->Array.elem, (_length)-1); \
  1373. return sel; \
  1374. } \
  1375. /*fallthrough*/
  1376. _ARRAY_FIELD_CASE(4, "w");
  1377. _ARRAY_FIELD_CASE(3, "z");
  1378. _ARRAY_FIELD_CASE(2, "y");
  1379. _ARRAY_FIELD_CASE(1, "x");
  1380. default: break;
  1381. #undef _ARRAY_FIELD_CASE
  1382. }
  1383. }
  1384. }
  1385. if (is_type) {
  1386. switch (type->kind) {
  1387. case Type_Struct:
  1388. if (type->Struct.names != nullptr &&
  1389. field_name == "names") {
  1390. sel.entity = type->Struct.names;
  1391. return sel;
  1392. }
  1393. break;
  1394. case Type_Enum:
  1395. if (type->Enum.names != nullptr &&
  1396. field_name == "names") {
  1397. sel.entity = type->Enum.names;
  1398. return sel;
  1399. }
  1400. break;
  1401. }
  1402. if (is_type_enum(type)) {
  1403. // NOTE(bill): These may not have been added yet, so check in case
  1404. if (type->Enum.count != nullptr) {
  1405. if (field_name == "count") {
  1406. sel.entity = type->Enum.count;
  1407. return sel;
  1408. }
  1409. if (field_name == "min_value") {
  1410. sel.entity = type->Enum.min_value;
  1411. return sel;
  1412. }
  1413. if (field_name == "max_value") {
  1414. sel.entity = type->Enum.max_value;
  1415. return sel;
  1416. }
  1417. }
  1418. for (isize i = 0; i < type->Enum.field_count; i++) {
  1419. Entity *f = type->Enum.fields[i];
  1420. GB_ASSERT(f->kind == Entity_Constant);
  1421. String str = f->token.string;
  1422. if (field_name == str) {
  1423. sel.entity = f;
  1424. // selection_add_index(&sel, i);
  1425. return sel;
  1426. }
  1427. }
  1428. }
  1429. if (type->kind == Type_Struct) {
  1430. Scope *s = type->Struct.scope;
  1431. if (s != nullptr) {
  1432. Entity *found = current_scope_lookup_entity(s, field_name);
  1433. if (found != nullptr && found->kind != Entity_Variable) {
  1434. sel.entity = found;
  1435. return sel;
  1436. }
  1437. }
  1438. }
  1439. if (type->kind == Type_Generic && type->Generic.specialized != nullptr) {
  1440. Type *specialized = type->Generic.specialized;
  1441. return lookup_field_with_selection(a, specialized, field_name, is_type, sel);
  1442. }
  1443. } else if (type->kind == Type_Union) {
  1444. } else if (type->kind == Type_Struct) {
  1445. for_array(i, type->Struct.fields) {
  1446. Entity *f = type->Struct.fields[i];
  1447. if (f->kind != Entity_Variable || (f->flags & EntityFlag_Field) == 0) {
  1448. continue;
  1449. }
  1450. String str = f->token.string;
  1451. if (field_name == str) {
  1452. selection_add_index(&sel, i); // HACK(bill): Leaky memory
  1453. sel.entity = f;
  1454. return sel;
  1455. }
  1456. if (f->flags & EntityFlag_Using) {
  1457. isize prev_count = sel.index.count;
  1458. selection_add_index(&sel, i); // HACK(bill): Leaky memory
  1459. sel = lookup_field_with_selection(a, f->type, field_name, is_type, sel);
  1460. if (sel.entity != nullptr) {
  1461. if (is_type_pointer(f->type)) {
  1462. sel.indirect = true;
  1463. }
  1464. return sel;
  1465. }
  1466. sel.index.count = prev_count;
  1467. }
  1468. }
  1469. } else if (type->kind == Type_BitField) {
  1470. for (isize i = 0; i < type->BitField.field_count; i++) {
  1471. Entity *f = type->BitField.fields[i];
  1472. if (f->kind != Entity_Variable ||
  1473. (f->flags & EntityFlag_BitFieldValue) == 0) {
  1474. continue;
  1475. }
  1476. String str = f->token.string;
  1477. if (field_name == str) {
  1478. selection_add_index(&sel, i); // HACK(bill): Leaky memory
  1479. sel.entity = f;
  1480. return sel;
  1481. }
  1482. }
  1483. }
  1484. return sel;
  1485. }
  1486. struct TypePath {
  1487. Array<Entity *> path; // Entity_TypeName;
  1488. bool failure;
  1489. };
  1490. void type_path_init(TypePath *tp) {
  1491. // TODO(bill): Use an allocator that uses a backing array if it can and then use alternative allocator when exhausted
  1492. array_init(&tp->path, heap_allocator());
  1493. }
  1494. void type_path_free(TypePath *tp) {
  1495. array_free(&tp->path);
  1496. }
  1497. void type_path_print_illegal_cycle(TypePath *tp, isize start_index) {
  1498. GB_ASSERT(tp != nullptr);
  1499. GB_ASSERT(start_index < tp->path.count);
  1500. Entity *e = tp->path[start_index];
  1501. GB_ASSERT(e != nullptr);
  1502. error(e->token, "Illegal declaration cycle of `%.*s`", LIT(e->token.string));
  1503. // NOTE(bill): Print cycle, if it's deep enough
  1504. for (isize j = start_index; j < tp->path.count; j++) {
  1505. Entity *e = tp->path[j];
  1506. error(e->token, "\t%.*s refers to", LIT(e->token.string));
  1507. }
  1508. // NOTE(bill): This will only print if the path count > 1
  1509. error(e->token, "\t%.*s", LIT(e->token.string));
  1510. tp->failure = true;
  1511. e->type->failure = true;
  1512. base_type(e->type)->failure = true;
  1513. }
  1514. bool type_path_push(TypePath *tp, Type *t) {
  1515. GB_ASSERT(tp != nullptr);
  1516. if (t->kind != Type_Named) {
  1517. return false;
  1518. }
  1519. Entity *e = t->Named.type_name;
  1520. for (isize i = 0; i < tp->path.count; i++) {
  1521. Entity *p = tp->path[i];
  1522. if (p == e) {
  1523. type_path_print_illegal_cycle(tp, i);
  1524. }
  1525. }
  1526. array_add(&tp->path, e);
  1527. return true;
  1528. }
  1529. void type_path_pop(TypePath *tp) {
  1530. if (tp != nullptr && tp->path.count > 0) {
  1531. array_pop(&tp->path);
  1532. }
  1533. }
  1534. #define FAILURE_SIZE 0
  1535. #define FAILURE_ALIGNMENT 0
  1536. i64 type_size_of_internal (gbAllocator allocator, Type *t, TypePath *path);
  1537. i64 type_align_of_internal(gbAllocator allocator, Type *t, TypePath *path);
  1538. i64 align_formula(i64 size, i64 align) {
  1539. if (align > 0) {
  1540. i64 result = size + align-1;
  1541. return result - result%align;
  1542. }
  1543. return size;
  1544. }
  1545. i64 type_size_of(gbAllocator allocator, Type *t) {
  1546. if (t == nullptr) {
  1547. return 0;
  1548. }
  1549. i64 size;
  1550. TypePath path = {0};
  1551. type_path_init(&path);
  1552. size = type_size_of_internal(allocator, t, &path);
  1553. type_path_free(&path);
  1554. return size;
  1555. }
  1556. i64 type_align_of(gbAllocator allocator, Type *t) {
  1557. if (t == nullptr) {
  1558. return 1;
  1559. }
  1560. i64 align;
  1561. TypePath path = {0};
  1562. type_path_init(&path);
  1563. align = type_align_of_internal(allocator, t, &path);
  1564. type_path_free(&path);
  1565. return align;
  1566. }
  1567. i64 type_align_of_internal(gbAllocator allocator, Type *t, TypePath *path) {
  1568. GB_ASSERT(path != nullptr);
  1569. if (t->failure) {
  1570. return FAILURE_ALIGNMENT;
  1571. }
  1572. t = base_type(t);
  1573. switch (t->kind) {
  1574. case Type_Basic: {
  1575. GB_ASSERT(is_type_typed(t));
  1576. switch (t->Basic.kind) {
  1577. case Basic_string: return build_context.word_size;
  1578. case Basic_any: return build_context.word_size;
  1579. case Basic_int: case Basic_uint: case Basic_uintptr: case Basic_rawptr:
  1580. return build_context.word_size;
  1581. case Basic_complex64: case Basic_complex128:
  1582. return type_size_of_internal(allocator, t, path) / 2;
  1583. }
  1584. } break;
  1585. case Type_Array: {
  1586. Type *elem = t->Array.elem;
  1587. bool pop = type_path_push(path, elem);
  1588. if (path->failure) {
  1589. return FAILURE_ALIGNMENT;
  1590. }
  1591. i64 align = type_align_of_internal(allocator, t->Array.elem, path);
  1592. if (pop) type_path_pop(path);
  1593. return align;
  1594. }
  1595. case Type_DynamicArray:
  1596. // data, count, capacity, allocator
  1597. return build_context.word_size;
  1598. case Type_Slice:
  1599. return build_context.word_size;
  1600. case Type_Tuple: {
  1601. i64 max = 1;
  1602. for_array(i, t->Tuple.variables) {
  1603. i64 align = type_align_of_internal(allocator, t->Tuple.variables[i]->type, path);
  1604. if (max < align) {
  1605. max = align;
  1606. }
  1607. }
  1608. return max;
  1609. } break;
  1610. case Type_Map:
  1611. generate_map_internal_types(allocator, t);
  1612. // return type_align_of_internal(allocator, t->Map.generated_struct_type, path);
  1613. return build_context.word_size;
  1614. case Type_Enum:
  1615. return type_align_of_internal(allocator, t->Enum.base_type, path);
  1616. case Type_Union: {
  1617. if (t->Union.variants.count == 0) {
  1618. return 1;
  1619. }
  1620. if (t->Union.custom_align > 0) {
  1621. return gb_clamp(t->Union.custom_align, 1, build_context.max_align);
  1622. }
  1623. i64 max = 1;
  1624. for_array(i, t->Union.variants) {
  1625. Type *variant = t->Union.variants[i];
  1626. bool pop = type_path_push(path, variant);
  1627. if (path->failure) {
  1628. return FAILURE_ALIGNMENT;
  1629. }
  1630. i64 align = type_align_of_internal(allocator, variant, path);
  1631. if (pop) type_path_pop(path);
  1632. if (max < align) {
  1633. max = align;
  1634. }
  1635. }
  1636. return max;
  1637. } break;
  1638. case Type_Struct: {
  1639. if (t->Struct.custom_align > 0) {
  1640. return gb_clamp(t->Struct.custom_align, 1, build_context.max_align);
  1641. }
  1642. if (t->Struct.is_raw_union) {
  1643. i64 max = 1;
  1644. for_array(i, t->Struct.fields) {
  1645. Type *field_type = t->Struct.fields[i]->type;
  1646. bool pop = type_path_push(path, field_type);
  1647. if (path->failure) {
  1648. return FAILURE_ALIGNMENT;
  1649. }
  1650. i64 align = type_align_of_internal(allocator, field_type, path);
  1651. if (pop) type_path_pop(path);
  1652. if (max < align) {
  1653. max = align;
  1654. }
  1655. }
  1656. return max;
  1657. } else if (t->Struct.fields.count > 0) {
  1658. i64 max = 1;
  1659. // NOTE(bill): Check the fields to check for cyclic definitions
  1660. for_array(i, t->Struct.fields) {
  1661. Type *field_type = t->Struct.fields[i]->type;
  1662. bool pop = type_path_push(path, field_type);
  1663. if (path->failure) return FAILURE_ALIGNMENT;
  1664. i64 align = type_align_of_internal(allocator, field_type, path);
  1665. if (pop) type_path_pop(path);
  1666. if (max < align) {
  1667. max = align;
  1668. }
  1669. }
  1670. if (t->Struct.is_packed) {
  1671. return 1;
  1672. }
  1673. return max;
  1674. }
  1675. } break;
  1676. case Type_BitField: {
  1677. i64 align = 1;
  1678. if (t->BitField.custom_align > 0) {
  1679. align = t->BitField.custom_align;
  1680. }
  1681. return gb_clamp(next_pow2(align), 1, build_context.max_align);
  1682. } break;
  1683. }
  1684. // return gb_clamp(next_pow2(type_size_of(allocator, t)), 1, build_context.max_align);
  1685. // NOTE(bill): Things that are bigger than build_context.word_size, are actually comprised of smaller types
  1686. // TODO(bill): Is this correct for 128-bit types (integers)?
  1687. return gb_clamp(next_pow2(type_size_of_internal(allocator, t, path)), 1, build_context.word_size);
  1688. }
  1689. Array<i64> type_set_offsets_of(gbAllocator allocator, Array<Entity *> fields, bool is_packed, bool is_raw_union) {
  1690. Array<i64> offsets = {};
  1691. array_init_count(&offsets, allocator, fields.count);
  1692. i64 curr_offset = 0;
  1693. if (is_raw_union) {
  1694. for_array(i, fields) {
  1695. offsets[i] = 0;
  1696. }
  1697. } else if (is_packed) {
  1698. for_array(i, fields) {
  1699. i64 size = type_size_of(allocator, fields[i]->type);
  1700. offsets[i] = curr_offset;
  1701. curr_offset += size;
  1702. }
  1703. } else {
  1704. for_array(i, fields) {
  1705. Type *t = fields[i]->type;
  1706. i64 align = gb_max(type_align_of(allocator, t), 1);
  1707. i64 size = gb_max(type_size_of(allocator, t), 0);
  1708. curr_offset = align_formula(curr_offset, align);
  1709. offsets[i] = curr_offset;
  1710. curr_offset += size;
  1711. }
  1712. }
  1713. return offsets;
  1714. }
  1715. bool type_set_offsets(gbAllocator allocator, Type *t) {
  1716. t = base_type(t);
  1717. if (t->kind == Type_Struct) {
  1718. if (!t->Struct.are_offsets_set) {
  1719. t->Struct.are_offsets_being_processed = true;
  1720. t->Struct.offsets = type_set_offsets_of(allocator, t->Struct.fields, t->Struct.is_packed, t->Struct.is_raw_union);
  1721. t->Struct.are_offsets_being_processed = false;
  1722. t->Struct.are_offsets_set = true;
  1723. return true;
  1724. }
  1725. } else if (is_type_tuple(t)) {
  1726. if (!t->Tuple.are_offsets_set) {
  1727. t->Struct.are_offsets_being_processed = true;
  1728. t->Tuple.offsets = type_set_offsets_of(allocator, t->Tuple.variables, false, false);
  1729. t->Struct.are_offsets_being_processed = false;
  1730. t->Tuple.are_offsets_set = true;
  1731. return true;
  1732. }
  1733. } else {
  1734. GB_PANIC("Invalid type for setting offsets");
  1735. }
  1736. return false;
  1737. }
  1738. i64 type_size_of_internal(gbAllocator allocator, Type *t, TypePath *path) {
  1739. if (t->failure) {
  1740. return FAILURE_SIZE;
  1741. }
  1742. switch (t->kind) {
  1743. case Type_Named: {
  1744. bool pop = type_path_push(path, t);
  1745. if (path->failure) {
  1746. return FAILURE_ALIGNMENT;
  1747. }
  1748. i64 size = type_size_of_internal(allocator, t->Named.base, path);
  1749. if (pop) type_path_pop(path);
  1750. return size;
  1751. } break;
  1752. case Type_Basic: {
  1753. GB_ASSERT_MSG(is_type_typed(t), "%s", type_to_string(t));
  1754. BasicKind kind = t->Basic.kind;
  1755. i64 size = t->Basic.size;
  1756. if (size > 0) {
  1757. return size;
  1758. }
  1759. switch (kind) {
  1760. case Basic_string: return 2*build_context.word_size;
  1761. case Basic_any: return 2*build_context.word_size;
  1762. case Basic_int: case Basic_uint: case Basic_uintptr: case Basic_rawptr:
  1763. return build_context.word_size;
  1764. }
  1765. } break;
  1766. case Type_Pointer:
  1767. return build_context.word_size;
  1768. case Type_Array: {
  1769. i64 count, align, size, alignment;
  1770. count = t->Array.count;
  1771. if (count == 0) {
  1772. return 0;
  1773. }
  1774. align = type_align_of_internal(allocator, t->Array.elem, path);
  1775. if (path->failure) {
  1776. return FAILURE_SIZE;
  1777. }
  1778. size = type_size_of_internal( allocator, t->Array.elem, path);
  1779. alignment = align_formula(size, align);
  1780. return alignment*(count-1) + size;
  1781. } break;
  1782. case Type_Slice: // ptr + len
  1783. return 2 * build_context.word_size;
  1784. case Type_DynamicArray:
  1785. // data + len + cap + allocator(procedure+data)
  1786. return 3*build_context.word_size + 2*build_context.word_size;
  1787. case Type_Map:
  1788. generate_map_internal_types(allocator, t);
  1789. // return type_size_of_internal(allocator, t->Map.generated_struct_type, path);
  1790. return build_context.word_size;
  1791. case Type_Tuple: {
  1792. i64 count, align, size;
  1793. count = t->Tuple.variables.count;
  1794. if (count == 0) {
  1795. return 0;
  1796. }
  1797. align = type_align_of_internal(allocator, t, path);
  1798. type_set_offsets(allocator, t);
  1799. size = t->Tuple.offsets[count-1] + type_size_of_internal(allocator, t->Tuple.variables[count-1]->type, path);
  1800. return align_formula(size, align);
  1801. } break;
  1802. case Type_Enum:
  1803. return type_size_of_internal(allocator, t->Enum.base_type, path);
  1804. case Type_Union: {
  1805. if (t->Union.variants.count == 0) {
  1806. return 0;
  1807. }
  1808. i64 align = type_align_of_internal(allocator, t, path);
  1809. if (path->failure) {
  1810. return FAILURE_SIZE;
  1811. }
  1812. i64 max = 0;
  1813. i64 field_size = 0;
  1814. for_array(i, t->Union.variants) {
  1815. Type *variant_type = t->Union.variants[i];
  1816. i64 size = type_size_of_internal(allocator, variant_type, path);
  1817. if (max < size) {
  1818. max = size;
  1819. }
  1820. }
  1821. // NOTE(bill): Align to tag
  1822. i64 tag_size = union_tag_size(allocator, t);
  1823. i64 size = align_formula(max, tag_size);
  1824. // NOTE(bill): Calculate the padding between the common fields and the tag
  1825. t->Union.tag_size = tag_size;
  1826. t->Union.variant_block_size = size - field_size;
  1827. return align_formula(size + tag_size, align);
  1828. } break;
  1829. case Type_Struct: {
  1830. if (t->Struct.is_raw_union) {
  1831. i64 count = t->Struct.fields.count;
  1832. i64 align = type_align_of_internal(allocator, t, path);
  1833. if (path->failure) {
  1834. return FAILURE_SIZE;
  1835. }
  1836. i64 max = 0;
  1837. for (isize i = 0; i < count; i++) {
  1838. i64 size = type_size_of_internal(allocator, t->Struct.fields[i]->type, path);
  1839. if (max < size) {
  1840. max = size;
  1841. }
  1842. }
  1843. // TODO(bill): Is this how it should work?
  1844. return align_formula(max, align);
  1845. } else {
  1846. i64 count = 0, size = 0, align = 0;
  1847. count = t->Struct.fields.count;
  1848. if (count == 0) {
  1849. return 0;
  1850. }
  1851. align = type_align_of_internal(allocator, t, path);
  1852. if (path->failure) {
  1853. return FAILURE_SIZE;
  1854. }
  1855. if (t->Struct.are_offsets_being_processed && t->Struct.offsets.data == nullptr) {
  1856. type_path_print_illegal_cycle(path, path->path.count-1);
  1857. return FAILURE_SIZE;
  1858. }
  1859. type_set_offsets(allocator, t);
  1860. size = t->Struct.offsets[count-1] + type_size_of_internal(allocator, t->Struct.fields[count-1]->type, path);
  1861. return align_formula(size, align);
  1862. }
  1863. } break;
  1864. case Type_BitField: {
  1865. i64 align = 8*type_align_of_internal(allocator, t, path);
  1866. i64 end = 0;
  1867. if (t->BitField.field_count > 0) {
  1868. i64 last = t->BitField.field_count-1;
  1869. end = t->BitField.offsets[last] + t->BitField.sizes[last];
  1870. }
  1871. i64 bits = align_formula(end, align);
  1872. GB_ASSERT((bits%8) == 0);
  1873. return bits/8;
  1874. } break;
  1875. }
  1876. // Catch all
  1877. return build_context.word_size;
  1878. }
  1879. i64 type_offset_of(gbAllocator allocator, Type *t, i32 index) {
  1880. t = base_type(t);
  1881. if (t->kind == Type_Struct) {
  1882. type_set_offsets(allocator, t);
  1883. if (gb_is_between(index, 0, t->Struct.fields.count-1)) {
  1884. return t->Struct.offsets[index];
  1885. }
  1886. } else if (t->kind == Type_Tuple) {
  1887. type_set_offsets(allocator, t);
  1888. if (gb_is_between(index, 0, t->Tuple.variables.count-1)) {
  1889. return t->Tuple.offsets[index];
  1890. }
  1891. } else if (t->kind == Type_Basic) {
  1892. if (t->Basic.kind == Basic_string) {
  1893. switch (index) {
  1894. case 0: return 0; // data
  1895. case 1: return build_context.word_size; // len
  1896. }
  1897. } else if (t->Basic.kind == Basic_any) {
  1898. switch (index) {
  1899. case 0: return 0; // type_info
  1900. case 1: return build_context.word_size; // data
  1901. }
  1902. }
  1903. } else if (t->kind == Type_Slice) {
  1904. switch (index) {
  1905. case 0: return 0; // data
  1906. case 1: return 1*build_context.word_size; // len
  1907. case 2: return 2*build_context.word_size; // cap
  1908. }
  1909. } else if (t->kind == Type_DynamicArray) {
  1910. switch (index) {
  1911. case 0: return 0; // data
  1912. case 1: return 1*build_context.word_size; // len
  1913. case 2: return 2*build_context.word_size; // cap
  1914. case 3: return 3*build_context.word_size; // allocator
  1915. }
  1916. } else if (t->kind == Type_Union) {
  1917. i64 s = type_size_of(allocator, t);
  1918. switch (index) {
  1919. case -1: return align_formula(t->Union.variant_block_size, build_context.word_size); // __type_info
  1920. }
  1921. }
  1922. return 0;
  1923. }
  1924. i64 type_offset_of_from_selection(gbAllocator allocator, Type *type, Selection sel) {
  1925. GB_ASSERT(sel.indirect == false);
  1926. Type *t = type;
  1927. i64 offset = 0;
  1928. for_array(i, sel.index) {
  1929. i32 index = sel.index[i];
  1930. t = base_type(t);
  1931. offset += type_offset_of(allocator, t, index);
  1932. if (t->kind == Type_Struct && !t->Struct.is_raw_union) {
  1933. t = t->Struct.fields[index]->type;
  1934. } else {
  1935. // NOTE(bill): No need to worry about custom types, just need the alignment
  1936. switch (t->kind) {
  1937. case Type_Basic:
  1938. if (t->Basic.kind == Basic_string) {
  1939. switch (index) {
  1940. case 0: t = t_rawptr; break;
  1941. case 1: t = t_int; break;
  1942. }
  1943. } else if (t->Basic.kind == Basic_any) {
  1944. switch (index) {
  1945. case 0: t = t_type_info_ptr; break;
  1946. case 1: t = t_rawptr; break;
  1947. }
  1948. }
  1949. break;
  1950. case Type_Slice:
  1951. switch (index) {
  1952. case 0: t = t_rawptr; break;
  1953. case 1: t = t_int; break;
  1954. case 2: t = t_int; break;
  1955. }
  1956. break;
  1957. case Type_DynamicArray:
  1958. switch (index) {
  1959. case 0: t = t_rawptr; break;
  1960. case 1: t = t_int; break;
  1961. case 2: t = t_int; break;
  1962. case 3: t = t_allocator; break;
  1963. }
  1964. break;
  1965. }
  1966. }
  1967. }
  1968. return offset;
  1969. }
  1970. gbString write_type_to_string(gbString str, Type *type) {
  1971. if (type == nullptr) {
  1972. return gb_string_appendc(str, "<no type>");
  1973. }
  1974. switch (type->kind) {
  1975. case Type_Basic:
  1976. str = gb_string_append_length(str, type->Basic.name.text, type->Basic.name.len);
  1977. break;
  1978. case Type_Generic:
  1979. if (type->Generic.name.len == 0) {
  1980. str = gb_string_appendc(str, "type");
  1981. } else {
  1982. String name = type->Generic.name;
  1983. str = gb_string_append_rune(str, '$');
  1984. str = gb_string_append_length(str, name.text, name.len);
  1985. if (type->Generic.specialized != nullptr) {
  1986. str = gb_string_append_rune(str, '/');
  1987. str = write_type_to_string(str, type->Generic.specialized);
  1988. }
  1989. }
  1990. break;
  1991. case Type_Pointer:
  1992. str = gb_string_append_rune(str, '^');
  1993. str = write_type_to_string(str, type->Pointer.elem);
  1994. break;
  1995. case Type_Array:
  1996. str = gb_string_appendc(str, gb_bprintf("[%d]", cast(int)type->Array.count));
  1997. str = write_type_to_string(str, type->Array.elem);
  1998. break;
  1999. case Type_Slice:
  2000. str = gb_string_appendc(str, "[]");
  2001. str = write_type_to_string(str, type->Array.elem);
  2002. break;
  2003. case Type_DynamicArray:
  2004. str = gb_string_appendc(str, "[dynamic]");
  2005. str = write_type_to_string(str, type->DynamicArray.elem);
  2006. break;
  2007. case Type_Enum:
  2008. str = gb_string_appendc(str, "enum");
  2009. if (type->Enum.base_type != nullptr) {
  2010. str = gb_string_appendc(str, " ");
  2011. str = write_type_to_string(str, type->Enum.base_type);
  2012. }
  2013. str = gb_string_appendc(str, " {");
  2014. for (isize i = 0; i < type->Enum.field_count; i++) {
  2015. Entity *f = type->Enum.fields[i];
  2016. GB_ASSERT(f->kind == Entity_Constant);
  2017. if (i > 0) {
  2018. str = gb_string_appendc(str, ", ");
  2019. }
  2020. str = gb_string_append_length(str, f->token.string.text, f->token.string.len);
  2021. // str = gb_string_appendc(str, " = ");
  2022. }
  2023. str = gb_string_append_rune(str, '}');
  2024. break;
  2025. case Type_Union:
  2026. str = gb_string_appendc(str, "union {");
  2027. for_array(i, type->Union.variants) {
  2028. Type *t = type->Union.variants[i];
  2029. if (i > 0) str = gb_string_appendc(str, ", ");
  2030. str = write_type_to_string(str, t);
  2031. }
  2032. str = gb_string_append_rune(str, '}');
  2033. break;
  2034. case Type_Struct: {
  2035. str = gb_string_appendc(str, "struct");
  2036. if (type->Struct.is_packed) str = gb_string_appendc(str, " #packed");
  2037. if (type->Struct.is_raw_union) str = gb_string_appendc(str, " #raw_union");
  2038. str = gb_string_appendc(str, " {");
  2039. for_array(i, type->Struct.fields) {
  2040. Entity *f = type->Struct.fields[i];
  2041. GB_ASSERT(f->kind == Entity_Variable);
  2042. if (i > 0) {
  2043. str = gb_string_appendc(str, ", ");
  2044. }
  2045. str = gb_string_append_length(str, f->token.string.text, f->token.string.len);
  2046. str = gb_string_appendc(str, ": ");
  2047. str = write_type_to_string(str, f->type);
  2048. }
  2049. str = gb_string_append_rune(str, '}');
  2050. } break;
  2051. case Type_Map: {
  2052. str = gb_string_appendc(str, "map[");
  2053. str = write_type_to_string(str, type->Map.key);
  2054. str = gb_string_append_rune(str, ']');
  2055. str = write_type_to_string(str, type->Map.value);
  2056. } break;
  2057. case Type_Named:
  2058. if (type->Named.type_name != nullptr) {
  2059. str = gb_string_append_length(str, type->Named.name.text, type->Named.name.len);
  2060. } else {
  2061. // NOTE(bill): Just in case
  2062. str = gb_string_appendc(str, "<named type>");
  2063. }
  2064. break;
  2065. case Type_Tuple:
  2066. if (type->Tuple.variables.count > 0) {
  2067. isize comma_index = 0;
  2068. for_array(i, type->Tuple.variables) {
  2069. Entity *var = type->Tuple.variables[i];
  2070. if (var != nullptr) {
  2071. if (var->kind == Entity_Constant) {
  2072. // Ignore
  2073. continue;
  2074. }
  2075. if (comma_index++ > 0) {
  2076. str = gb_string_appendc(str, ", ");
  2077. }
  2078. if (var->kind == Entity_Variable) {
  2079. if (var->flags&EntityFlag_CVarArg) {
  2080. str = gb_string_appendc(str, "#c_vararg ");
  2081. }
  2082. if (var->flags&EntityFlag_Ellipsis) {
  2083. Type *slice = base_type(var->type);
  2084. str = gb_string_appendc(str, "...");
  2085. GB_ASSERT(var->type->kind == Type_Slice);
  2086. str = write_type_to_string(str, slice->Slice.elem);
  2087. } else {
  2088. str = write_type_to_string(str, var->type);
  2089. }
  2090. } else {
  2091. GB_ASSERT(var->kind == Entity_TypeName);
  2092. if (var->type->kind == Type_Generic) {
  2093. str = gb_string_appendc(str, "type/");
  2094. str = write_type_to_string(str, var->type);
  2095. } else {
  2096. str = gb_string_appendc(str, "type");
  2097. }
  2098. }
  2099. }
  2100. }
  2101. }
  2102. break;
  2103. case Type_Proc:
  2104. str = gb_string_appendc(str, "proc");
  2105. switch (type->Proc.calling_convention) {
  2106. case ProcCC_Odin:
  2107. break;
  2108. case ProcCC_Contextless:
  2109. str = gb_string_appendc(str, " \"contextless\" ");
  2110. break;
  2111. case ProcCC_CDecl:
  2112. str = gb_string_appendc(str, " \"cdecl\" ");
  2113. break;
  2114. case ProcCC_StdCall:
  2115. str = gb_string_appendc(str, " \"stdcall\" ");
  2116. break;
  2117. case ProcCC_FastCall:
  2118. str = gb_string_appendc(str, " \"fastcall\" ");
  2119. break;
  2120. // case ProcCC_VectorCall:
  2121. // str = gb_string_appendc(str, " \"vectorcall\" ");
  2122. // break;
  2123. // case ProcCC_ClrCall:
  2124. // str = gb_string_appendc(str, " \"clrcall\" ");
  2125. // break;
  2126. }
  2127. str = gb_string_appendc(str, "(");
  2128. if (type->Proc.params) {
  2129. str = write_type_to_string(str, type->Proc.params);
  2130. }
  2131. str = gb_string_appendc(str, ")");
  2132. if (type->Proc.results) {
  2133. str = gb_string_appendc(str, " -> ");
  2134. str = write_type_to_string(str, type->Proc.results);
  2135. }
  2136. break;
  2137. case Type_BitField:
  2138. str = gb_string_appendc(str, "bit_field ");
  2139. if (type->BitField.custom_align != 0) {
  2140. str = gb_string_append_fmt(str, "#align %d ", cast(int)type->BitField.custom_align);
  2141. }
  2142. str = gb_string_append_rune(str, '{');
  2143. for (isize i = 0; i < type->BitField.field_count; i++) {
  2144. Entity *f = type->BitField.fields[i];
  2145. GB_ASSERT(f->kind == Entity_Variable);
  2146. GB_ASSERT(f->type != nullptr && f->type->kind == Type_BitFieldValue);
  2147. str = gb_string_append_rune(str, '{');
  2148. if (i > 0) {
  2149. str = gb_string_appendc(str, ", ");
  2150. }
  2151. str = gb_string_append_length(str, f->token.string.text, f->token.string.len);
  2152. str = gb_string_appendc(str, ": ");
  2153. str = gb_string_append_fmt(str, "%lld", cast(long long)f->type->BitFieldValue.bits);
  2154. }
  2155. str = gb_string_append_rune(str, '}');
  2156. break;
  2157. case Type_BitFieldValue:
  2158. str = gb_string_append_fmt(str, "(bit field value with %d bits)", cast(int)type->BitFieldValue.bits);
  2159. break;
  2160. }
  2161. return str;
  2162. }
  2163. gbString type_to_string(Type *type) {
  2164. return write_type_to_string(gb_string_make(heap_allocator(), ""), type);
  2165. }