core.odin 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. // This is the runtime code required by the compiler
  2. // IMPORTANT NOTE(bill): Do not change the order of any of this data
  3. // The compiler relies upon this _exact_ order
  4. package runtime
  5. import "core:os"
  6. import "core:mem"
  7. import "core:log"
  8. // Naming Conventions:
  9. // In general, Ada_Case for types and snake_case for values
  10. //
  11. // Package Name: snake_case (but prefer single word)
  12. // Import Name: snake_case (but prefer single word)
  13. // Types: Ada_Case
  14. // Enum Values: Ada_Case
  15. // Procedures: snake_case
  16. // Local Variables: snake_case
  17. // Constant Variables: SCREAMING_SNAKE_CASE
  18. // IMPORTANT NOTE(bill): `type_info_of` cannot be used within a
  19. // #shared_global_scope due to the internals of the compiler.
  20. // This could change at a later date if the all these data structures are
  21. // implemented within the compiler rather than in this "preload" file
  22. // NOTE(bill): This must match the compiler's
  23. Calling_Convention :: enum {
  24. Invalid = 0,
  25. Odin = 1,
  26. Contextless = 2,
  27. C = 3,
  28. Std = 4,
  29. Fast = 5,
  30. }
  31. Type_Info_Enum_Value :: union {
  32. rune,
  33. i8, i16, i32, i64, int,
  34. u8, u16, u32, u64, uint, uintptr,
  35. };
  36. // Variant Types
  37. Type_Info_Named :: struct {name: string, base: ^Type_Info};
  38. Type_Info_Integer :: struct {signed: bool};
  39. Type_Info_Rune :: struct {};
  40. Type_Info_Float :: struct {};
  41. Type_Info_Complex :: struct {};
  42. Type_Info_String :: struct {is_cstring: bool};
  43. Type_Info_Boolean :: struct {};
  44. Type_Info_Any :: struct {};
  45. Type_Info_Type_Id :: struct {};
  46. Type_Info_Pointer :: struct {
  47. elem: ^Type_Info // nil -> rawptr
  48. };
  49. Type_Info_Procedure :: struct {
  50. params: ^Type_Info, // Type_Info_Tuple
  51. results: ^Type_Info, // Type_Info_Tuple
  52. variadic: bool,
  53. convention: Calling_Convention,
  54. };
  55. Type_Info_Array :: struct {
  56. elem: ^Type_Info,
  57. elem_size: int,
  58. count: int,
  59. };
  60. Type_Info_Dynamic_Array :: struct {elem: ^Type_Info, elem_size: int};
  61. Type_Info_Slice :: struct {elem: ^Type_Info, elem_size: int};
  62. Type_Info_Tuple :: struct { // Only really used for procedures
  63. types: []^Type_Info,
  64. names: []string,
  65. };
  66. Type_Info_Struct :: struct {
  67. types: []^Type_Info,
  68. names: []string,
  69. offsets: []uintptr, // offsets may not be used in tuples
  70. usings: []bool, // usings may not be used in tuples
  71. is_packed: bool,
  72. is_raw_union: bool,
  73. custom_align: bool,
  74. };
  75. Type_Info_Union :: struct {
  76. variants: []^Type_Info,
  77. tag_offset: uintptr,
  78. tag_type: ^Type_Info,
  79. custom_align: bool,
  80. };
  81. Type_Info_Enum :: struct {
  82. base: ^Type_Info,
  83. names: []string,
  84. values: []Type_Info_Enum_Value,
  85. };
  86. Type_Info_Map :: struct {
  87. key: ^Type_Info,
  88. value: ^Type_Info,
  89. generated_struct: ^Type_Info,
  90. };
  91. Type_Info_Bit_Field :: struct {
  92. names: []string,
  93. bits: []i32,
  94. offsets: []i32,
  95. };
  96. Type_Info_Bit_Set :: struct {
  97. elem: ^Type_Info,
  98. underlying: ^Type_Info, // Possibly nil
  99. lower: i64,
  100. upper: i64,
  101. };
  102. Type_Info_Opaque :: struct {
  103. elem: ^Type_Info,
  104. }
  105. Type_Info :: struct {
  106. size: int,
  107. align: int,
  108. id: typeid,
  109. variant: union {
  110. Type_Info_Named,
  111. Type_Info_Integer,
  112. Type_Info_Rune,
  113. Type_Info_Float,
  114. Type_Info_Complex,
  115. Type_Info_String,
  116. Type_Info_Boolean,
  117. Type_Info_Any,
  118. Type_Info_Type_Id,
  119. Type_Info_Pointer,
  120. Type_Info_Procedure,
  121. Type_Info_Array,
  122. Type_Info_Dynamic_Array,
  123. Type_Info_Slice,
  124. Type_Info_Tuple,
  125. Type_Info_Struct,
  126. Type_Info_Union,
  127. Type_Info_Enum,
  128. Type_Info_Map,
  129. Type_Info_Bit_Field,
  130. Type_Info_Bit_Set,
  131. Type_Info_Opaque,
  132. },
  133. }
  134. // NOTE(bill): This must match the compiler's
  135. Typeid_Kind :: enum u8 {
  136. Invalid,
  137. Integer,
  138. Rune,
  139. Float,
  140. Complex,
  141. String,
  142. Boolean,
  143. Any,
  144. Type_Id,
  145. Pointer,
  146. Procedure,
  147. Array,
  148. Dynamic_Array,
  149. Slice,
  150. Tuple,
  151. Struct,
  152. Union,
  153. Enum,
  154. Map,
  155. Bit_Field,
  156. Bit_Set,
  157. Opaque,
  158. }
  159. Typeid_Bit_Field :: bit_field #align align_of(uintptr) {
  160. index: 8*size_of(align_of(uintptr)) - 8,
  161. kind: 5, // Typeid_Kind
  162. named: 1,
  163. special: 1, // signed, cstring, etc
  164. reserved: 1,
  165. }
  166. // NOTE(bill): only the ones that are needed (not all types)
  167. // This will be set by the compiler
  168. type_table: []Type_Info;
  169. args__: []cstring;
  170. // IMPORTANT NOTE(bill): Must be in this order (as the compiler relies upon it)
  171. Source_Code_Location :: struct {
  172. file_path: string,
  173. line, column: int,
  174. procedure: string,
  175. }
  176. Assertion_Failure_Proc :: #type proc(prefix, message: string, loc: Source_Code_Location);
  177. Context :: struct {
  178. allocator: mem.Allocator,
  179. temp_allocator: mem.Allocator,
  180. assertion_failure_proc: Assertion_Failure_Proc,
  181. logger: log.Logger,
  182. thread_id: int,
  183. user_data: any,
  184. user_index: int,
  185. derived: any, // May be used for derived data types
  186. }
  187. global_scratch_allocator_data: mem.Scratch_Allocator;
  188. INITIAL_MAP_CAP :: 16;
  189. Map_Key :: struct {
  190. hash: u64,
  191. str: string,
  192. }
  193. Map_Find_Result :: struct {
  194. hash_index: int,
  195. entry_prev: int,
  196. entry_index: int,
  197. }
  198. Map_Entry_Header :: struct {
  199. key: Map_Key,
  200. next: int,
  201. /*
  202. value: Value_Type,
  203. */
  204. }
  205. Map_Header :: struct {
  206. m: ^mem.Raw_Map,
  207. is_key_string: bool,
  208. entry_size: int,
  209. entry_align: int,
  210. value_offset: uintptr,
  211. value_size: int,
  212. }
  213. type_info_base :: proc "contextless" (info: ^Type_Info) -> ^Type_Info {
  214. if info == nil do return nil;
  215. base := info;
  216. loop: for {
  217. switch i in base.variant {
  218. case Type_Info_Named: base = i.base;
  219. case: break loop;
  220. }
  221. }
  222. return base;
  223. }
  224. type_info_base_without_enum :: proc "contextless" (info: ^Type_Info) -> ^Type_Info {
  225. if info == nil do return nil;
  226. base := info;
  227. loop: for {
  228. switch i in base.variant {
  229. case Type_Info_Named: base = i.base;
  230. case Type_Info_Enum: base = i.base;
  231. case: break loop;
  232. }
  233. }
  234. return base;
  235. }
  236. __type_info_of :: proc "contextless" (id: typeid) -> ^Type_Info {
  237. data := transmute(Typeid_Bit_Field)id;
  238. n := int(data.index);
  239. if n < 0 || n >= len(type_table) {
  240. n = 0;
  241. }
  242. return &type_table[n];
  243. }
  244. typeid_base :: proc "contextless" (id: typeid) -> typeid {
  245. ti := type_info_of(id);
  246. ti = type_info_base(ti);
  247. return ti.id;
  248. }
  249. typeid_base_without_enum :: proc "contextless" (id: typeid) -> typeid {
  250. ti := type_info_base_without_enum(type_info_of(id));
  251. return ti.id;
  252. }
  253. @(default_calling_convention = "c")
  254. foreign {
  255. @(link_name="llvm.assume")
  256. assume :: proc(cond: bool) ---;
  257. @(link_name="llvm.debugtrap")
  258. debug_trap :: proc() ---;
  259. @(link_name="llvm.trap")
  260. trap :: proc() -> ! ---;
  261. @(link_name="llvm.readcyclecounter")
  262. read_cycle_counter :: proc() -> u64 ---;
  263. }
  264. __init_context_from_ptr :: proc "contextless" (c: ^Context, other: ^Context) {
  265. if c == nil do return;
  266. c^ = other^;
  267. __init_context(c);
  268. }
  269. __init_context :: proc "contextless" (c: ^Context) {
  270. if c == nil do return;
  271. c.allocator.procedure = os.heap_allocator_proc;
  272. c.allocator.data = nil;
  273. c.temp_allocator.procedure = mem.scratch_allocator_proc;
  274. c.temp_allocator.data = &global_scratch_allocator_data;
  275. c.thread_id = os.current_thread_id(); // NOTE(bill): This is "contextless" so it is okay to call
  276. c.assertion_failure_proc = default_assertion_failure_proc;
  277. c.logger.procedure = log.nil_logger_proc;
  278. c.logger.data = nil;
  279. }
  280. @(builtin)
  281. init_global_temporary_allocator :: proc(data: []byte, backup_allocator := context.allocator) {
  282. mem.scratch_allocator_init(&global_scratch_allocator_data, data, backup_allocator);
  283. }
  284. default_assertion_failure_proc :: proc(prefix, message: string, loc: Source_Code_Location) {
  285. fd := os.stderr;
  286. print_caller_location(fd, loc);
  287. os.write_string(fd, " ");
  288. os.write_string(fd, prefix);
  289. if len(message) > 0 {
  290. os.write_string(fd, ": ");
  291. os.write_string(fd, message);
  292. }
  293. os.write_byte(fd, '\n');
  294. debug_trap();
  295. }
  296. @(builtin)
  297. copy :: proc "contextless" (dst, src: $T/[]$E) -> int {
  298. n := max(0, min(len(dst), len(src)));
  299. if n > 0 do mem.copy(&dst[0], &src[0], n*size_of(E));
  300. return n;
  301. }
  302. @(builtin)
  303. pop :: proc "contextless" (array: ^$T/[dynamic]$E) -> E {
  304. if array == nil do return E{};
  305. assert(len(array) > 0);
  306. res := array[len(array)-1];
  307. (^mem.Raw_Dynamic_Array)(array).len -= 1;
  308. return res;
  309. }
  310. @(builtin)
  311. unordered_remove :: proc(array: ^$D/[dynamic]$T, index: int, loc := #caller_location) {
  312. bounds_check_error_loc(loc, index, len(array));
  313. n := len(array)-1;
  314. if index != n {
  315. array[index] = array[n];
  316. }
  317. pop(array);
  318. }
  319. @(builtin)
  320. ordered_remove :: proc(array: ^$D/[dynamic]$T, index: int, loc := #caller_location) {
  321. bounds_check_error_loc(loc, index, len(array));
  322. copy(array[index:], array[index+1:]);
  323. pop(array);
  324. }
  325. @(builtin)
  326. clear :: proc[clear_dynamic_array, clear_map];
  327. @(builtin)
  328. reserve :: proc[reserve_dynamic_array, reserve_map];
  329. @(builtin)
  330. resize :: proc[resize_dynamic_array];
  331. @(builtin)
  332. new :: proc[mem.new];
  333. @(builtin)
  334. new_clone :: proc[mem.new_clone];
  335. @(builtin)
  336. free :: proc[mem.free];
  337. @(builtin)
  338. free_all :: proc[mem.free_all];
  339. @(builtin)
  340. delete :: proc[
  341. mem.delete_string,
  342. mem.delete_cstring,
  343. mem.delete_dynamic_array,
  344. mem.delete_slice,
  345. mem.delete_map,
  346. ];
  347. @(builtin)
  348. make :: proc[
  349. mem.make_slice,
  350. mem.make_dynamic_array,
  351. mem.make_dynamic_array_len,
  352. mem.make_dynamic_array_len_cap,
  353. mem.make_map,
  354. ];
  355. @(builtin)
  356. clear_map :: inline proc "contextless" (m: ^$T/map[$K]$V) {
  357. if m == nil do return;
  358. raw_map := (^mem.Raw_Map)(m);
  359. entries := (^mem.Raw_Dynamic_Array)(&raw_map.entries);
  360. entries.len = 0;
  361. for _, i in raw_map.hashes {
  362. raw_map.hashes[i] = -1;
  363. }
  364. }
  365. @(builtin)
  366. reserve_map :: proc(m: ^$T/map[$K]$V, capacity: int) {
  367. if m != nil do __dynamic_map_reserve(__get_map_header(m), capacity);
  368. }
  369. @(builtin)
  370. delete_key :: proc(m: ^$T/map[$K]$V, key: K) {
  371. if m != nil do __dynamic_map_delete_key(__get_map_header(m), __get_map_key(key));
  372. }
  373. @(builtin)
  374. append_elem :: proc(array: ^$T/[dynamic]$E, arg: E, loc := #caller_location) -> int {
  375. if array == nil do return 0;
  376. arg_len := 1;
  377. if cap(array) <= len(array)+arg_len {
  378. cap := 2 * cap(array) + max(8, arg_len);
  379. _ = reserve(array, cap, loc);
  380. }
  381. arg_len = min(cap(array)-len(array), arg_len);
  382. if arg_len > 0 {
  383. a := (^mem.Raw_Dynamic_Array)(array);
  384. data := (^E)(a.data);
  385. assert(data != nil);
  386. mem.copy(mem.ptr_offset(data, a.len), &arg, size_of(E));
  387. a.len += arg_len;
  388. }
  389. return len(array);
  390. }
  391. @(builtin)
  392. append_elems :: proc(array: ^$T/[dynamic]$E, args: ..E, loc := #caller_location) -> int {
  393. if array == nil do return 0;
  394. arg_len := len(args);
  395. if arg_len <= 0 do return len(array);
  396. if cap(array) <= len(array)+arg_len {
  397. cap := 2 * cap(array) + max(8, arg_len);
  398. _ = reserve(array, cap, loc);
  399. }
  400. arg_len = min(cap(array)-len(array), arg_len);
  401. if arg_len > 0 {
  402. a := (^mem.Raw_Dynamic_Array)(array);
  403. data := (^E)(a.data);
  404. assert(data != nil);
  405. mem.copy(mem.ptr_offset(data, a.len), &args[0], size_of(E) * arg_len);
  406. a.len += arg_len;
  407. }
  408. return len(array);
  409. }
  410. @(builtin) append :: proc[append_elem, append_elems];
  411. @(builtin)
  412. append_string :: proc(array: ^$T/[dynamic]$E/u8, args: ..string, loc := #caller_location) -> int {
  413. for arg in args {
  414. append(array = array, args = ([]E)(arg), loc = loc);
  415. }
  416. return len(array);
  417. }
  418. @(builtin)
  419. clear_dynamic_array :: inline proc "contextless" (array: ^$T/[dynamic]$E) {
  420. if array != nil do (^mem.Raw_Dynamic_Array)(array).len = 0;
  421. }
  422. @(builtin)
  423. reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #caller_location) -> bool {
  424. if array == nil do return false;
  425. a := (^mem.Raw_Dynamic_Array)(array);
  426. if capacity <= a.cap do return true;
  427. if a.allocator.procedure == nil {
  428. a.allocator = context.allocator;
  429. }
  430. assert(a.allocator.procedure != nil);
  431. old_size := a.cap * size_of(E);
  432. new_size := capacity * size_of(E);
  433. allocator := a.allocator;
  434. new_data := allocator.procedure(
  435. allocator.data, mem.Allocator_Mode.Resize, new_size, align_of(E),
  436. a.data, old_size, 0, loc,
  437. );
  438. if new_data == nil do return false;
  439. a.data = new_data;
  440. a.cap = capacity;
  441. return true;
  442. }
  443. @(builtin)
  444. resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller_location) -> bool {
  445. if array == nil do return false;
  446. a := (^mem.Raw_Dynamic_Array)(array);
  447. if length <= a.cap {
  448. a.len = max(length, 0);
  449. return true;
  450. }
  451. if a.allocator.procedure == nil {
  452. a.allocator = context.allocator;
  453. }
  454. assert(a.allocator.procedure != nil);
  455. old_size := a.cap * size_of(E);
  456. new_size := length * size_of(E);
  457. allocator := a.allocator;
  458. new_data := allocator.procedure(
  459. allocator.data, mem.Allocator_Mode.Resize, new_size, align_of(E),
  460. a.data, old_size, 0, loc,
  461. );
  462. if new_data == nil do return false;
  463. a.data = new_data;
  464. a.len = length;
  465. a.cap = length;
  466. return true;
  467. }
  468. @(builtin)
  469. incl_elem :: inline proc(s: ^$S/bit_set[$E; $U], elem: E) -> S {
  470. s^ |= {elem};
  471. return s^;
  472. }
  473. @(builtin)
  474. incl_elems :: inline proc(s: ^$S/bit_set[$E; $U], elems: ..E) -> S {
  475. for elem in elems do s^ |= {elem};
  476. return s^;
  477. }
  478. @(builtin)
  479. incl_bit_set :: inline proc(s: ^$S/bit_set[$E; $U], other: S) -> S {
  480. s^ |= other;
  481. return s^;
  482. }
  483. @(builtin)
  484. excl_elem :: inline proc(s: ^$S/bit_set[$E; $U], elem: E) -> S {
  485. s^ &~= {elem};
  486. return s^;
  487. }
  488. @(builtin)
  489. excl_elems :: inline proc(s: ^$S/bit_set[$E; $U], elems: ..E) -> S {
  490. for elem in elems do s^ &~= {elem};
  491. return s^;
  492. }
  493. @(builtin)
  494. excl_bit_set :: inline proc(s: ^$S/bit_set[$E; $U], other: S) -> S {
  495. s^ &~= other;
  496. return s^;
  497. }
  498. @(builtin) incl :: proc[incl_elem, incl_elems, incl_bit_set];
  499. @(builtin) excl :: proc[excl_elem, excl_elems, excl_bit_set];
  500. @(builtin)
  501. assert :: proc "contextless" (condition: bool, message := "", loc := #caller_location) -> bool {
  502. if !condition {
  503. p := context.assertion_failure_proc;
  504. if p == nil {
  505. p = default_assertion_failure_proc;
  506. }
  507. p("Runtime assertion", message, loc);
  508. }
  509. return condition;
  510. }
  511. @(builtin)
  512. panic :: proc "contextless" (message: string, loc := #caller_location) -> ! {
  513. p := context.assertion_failure_proc;
  514. if p == nil {
  515. p = default_assertion_failure_proc;
  516. }
  517. p("Panic", message, loc);
  518. }
  519. @(builtin)
  520. unimplemented :: proc "contextless" (message := "", loc := #caller_location) -> ! {
  521. p := context.assertion_failure_proc;
  522. if p == nil {
  523. p = default_assertion_failure_proc;
  524. }
  525. p("not yet implemented", message, loc);
  526. }
  527. @(builtin)
  528. unreachable :: proc "contextless" (message := "", loc := #caller_location) -> ! {
  529. p := context.assertion_failure_proc;
  530. if p == nil {
  531. p = default_assertion_failure_proc;
  532. }
  533. if message != "" {
  534. p("internal error", message, loc);
  535. } else {
  536. p("internal error", "entered unreachable code", loc);
  537. }
  538. }
  539. // Dynamic Array
  540. __dynamic_array_make :: proc(array_: rawptr, elem_size, elem_align: int, len, cap: int, loc := #caller_location) {
  541. array := (^mem.Raw_Dynamic_Array)(array_);
  542. array.allocator = context.allocator;
  543. assert(array.allocator.procedure != nil);
  544. if cap > 0 {
  545. __dynamic_array_reserve(array_, elem_size, elem_align, cap, loc);
  546. array.len = len;
  547. }
  548. }
  549. __dynamic_array_reserve :: proc(array_: rawptr, elem_size, elem_align: int, cap: int, loc := #caller_location) -> bool {
  550. array := (^mem.Raw_Dynamic_Array)(array_);
  551. if cap <= array.cap do return true;
  552. if array.allocator.procedure == nil {
  553. array.allocator = context.allocator;
  554. }
  555. assert(array.allocator.procedure != nil);
  556. old_size := array.cap * elem_size;
  557. new_size := cap * elem_size;
  558. allocator := array.allocator;
  559. new_data := allocator.procedure(allocator.data, mem.Allocator_Mode.Resize, new_size, elem_align, array.data, old_size, 0, loc);
  560. if new_data == nil do return false;
  561. array.data = new_data;
  562. array.cap = cap;
  563. return true;
  564. }
  565. __dynamic_array_resize :: proc(array_: rawptr, elem_size, elem_align: int, len: int, loc := #caller_location) -> bool {
  566. array := (^mem.Raw_Dynamic_Array)(array_);
  567. ok := __dynamic_array_reserve(array_, elem_size, elem_align, len, loc);
  568. if ok do array.len = len;
  569. return ok;
  570. }
  571. __dynamic_array_append :: proc(array_: rawptr, elem_size, elem_align: int,
  572. items: rawptr, item_count: int, loc := #caller_location) -> int {
  573. array := (^mem.Raw_Dynamic_Array)(array_);
  574. if items == nil do return 0;
  575. if item_count <= 0 do return 0;
  576. ok := true;
  577. if array.cap <= array.len+item_count {
  578. cap := 2 * array.cap + max(8, item_count);
  579. ok = __dynamic_array_reserve(array, elem_size, elem_align, cap, loc);
  580. }
  581. // TODO(bill): Better error handling for failed reservation
  582. if !ok do return array.len;
  583. assert(array.data != nil);
  584. data := uintptr(array.data) + uintptr(elem_size*array.len);
  585. mem.copy(rawptr(data), items, elem_size * item_count);
  586. array.len += item_count;
  587. return array.len;
  588. }
  589. __dynamic_array_append_nothing :: proc(array_: rawptr, elem_size, elem_align: int, loc := #caller_location) -> int {
  590. array := (^mem.Raw_Dynamic_Array)(array_);
  591. ok := true;
  592. if array.cap <= array.len+1 {
  593. cap := 2 * array.cap + max(8, 1);
  594. ok = __dynamic_array_reserve(array, elem_size, elem_align, cap, loc);
  595. }
  596. // TODO(bill): Better error handling for failed reservation
  597. if !ok do return array.len;
  598. assert(array.data != nil);
  599. data := uintptr(array.data) + uintptr(elem_size*array.len);
  600. mem.zero(rawptr(data), elem_size);
  601. array.len += 1;
  602. return array.len;
  603. }
  604. // Map
  605. __get_map_header :: proc "contextless" (m: ^$T/map[$K]$V) -> Map_Header {
  606. header := Map_Header{m = (^mem.Raw_Map)(m)};
  607. Entry :: struct {
  608. key: Map_Key,
  609. next: int,
  610. value: V,
  611. }
  612. _, is_string := type_info_base(type_info_of(K)).variant.(Type_Info_String);
  613. header.is_key_string = is_string;
  614. header.entry_size = int(size_of(Entry));
  615. header.entry_align = int(align_of(Entry));
  616. header.value_offset = uintptr(offset_of(Entry, value));
  617. header.value_size = int(size_of(V));
  618. return header;
  619. }
  620. __get_map_key :: proc "contextless" (key: $K) -> Map_Key {
  621. map_key: Map_Key;
  622. ti := type_info_base_without_enum(type_info_of(K));
  623. switch _ in ti.variant {
  624. case Type_Info_Integer:
  625. switch 8*size_of(key) {
  626. case 8: map_key.hash = u64(( ^u8)(&key)^);
  627. case 16: map_key.hash = u64(( ^u16)(&key)^);
  628. case 32: map_key.hash = u64(( ^u32)(&key)^);
  629. case 64: map_key.hash = u64(( ^u64)(&key)^);
  630. case: panic("Unhandled integer size");
  631. }
  632. case Type_Info_Rune:
  633. map_key.hash = u64((^rune)(&key)^);
  634. case Type_Info_Pointer:
  635. map_key.hash = u64(uintptr((^rawptr)(&key)^));
  636. case Type_Info_Float:
  637. switch 8*size_of(key) {
  638. case 32: map_key.hash = u64((^u32)(&key)^);
  639. case 64: map_key.hash = u64((^u64)(&key)^);
  640. case: panic("Unhandled float size");
  641. }
  642. case Type_Info_String:
  643. str := (^string)(&key)^;
  644. map_key.hash = default_hash_string(str);
  645. map_key.str = str;
  646. case:
  647. panic("Unhandled map key type");
  648. }
  649. return map_key;
  650. }
  651. default_hash :: proc(data: []byte) -> u64 {
  652. fnv64a :: proc(data: []byte) -> u64 {
  653. h: u64 = 0xcbf29ce484222325;
  654. for b in data {
  655. h = (h ~ u64(b)) * 0x100000001b3;
  656. }
  657. return h;
  658. }
  659. return fnv64a(data);
  660. }
  661. default_hash_string :: proc(s: string) -> u64 do return default_hash(([]byte)(s));
  662. __slice_resize :: proc(array_: ^$T/[]$E, new_count: int, allocator: mem.Allocator, loc := #caller_location) -> bool {
  663. array := (^mem.Raw_Slice)(array_);
  664. if new_count < array.len do return true;
  665. assert(allocator.procedure != nil);
  666. old_size := array.len*size_of(T);
  667. new_size := new_count*size_of(T);
  668. new_data := mem.resize(array.data, old_size, new_size, align_of(T), allocator, loc);
  669. if new_data == nil do return false;
  670. array.data = new_data;
  671. array.len = new_count;
  672. return true;
  673. }
  674. __dynamic_map_reserve :: proc(using header: Map_Header, cap: int, loc := #caller_location) {
  675. __dynamic_array_reserve(&m.entries, entry_size, entry_align, cap, loc);
  676. old_len := len(m.hashes);
  677. __slice_resize(&m.hashes, cap, m.entries.allocator, loc);
  678. for i in old_len..len(m.hashes)-1 do m.hashes[i] = -1;
  679. }
  680. __dynamic_map_rehash :: proc(using header: Map_Header, new_count: int, loc := #caller_location) #no_bounds_check {
  681. new_header: Map_Header = header;
  682. nm := mem.Raw_Map{};
  683. nm.entries.allocator = m.entries.allocator;
  684. new_header.m = &nm;
  685. c := context;
  686. if m.entries.allocator.procedure != nil {
  687. c.allocator = m.entries.allocator;
  688. }
  689. context = c;
  690. __dynamic_array_reserve(&nm.entries, entry_size, entry_align, m.entries.len, loc);
  691. __slice_resize(&nm.hashes, new_count, m.entries.allocator, loc);
  692. for i in 0 .. new_count-1 do nm.hashes[i] = -1;
  693. for i in 0 .. m.entries.len-1 {
  694. if len(nm.hashes) == 0 do __dynamic_map_grow(new_header, loc);
  695. entry_header := __dynamic_map_get_entry(header, i);
  696. data := uintptr(entry_header);
  697. fr := __dynamic_map_find(new_header, entry_header.key);
  698. j := __dynamic_map_add_entry(new_header, entry_header.key, loc);
  699. if fr.entry_prev < 0 {
  700. nm.hashes[fr.hash_index] = j;
  701. } else {
  702. e := __dynamic_map_get_entry(new_header, fr.entry_prev);
  703. e.next = j;
  704. }
  705. e := __dynamic_map_get_entry(new_header, j);
  706. e.next = fr.entry_index;
  707. ndata := uintptr(e);
  708. mem.copy(rawptr(ndata+value_offset), rawptr(data+value_offset), value_size);
  709. if __dynamic_map_full(new_header) do __dynamic_map_grow(new_header, loc);
  710. }
  711. delete(m.hashes, m.entries.allocator, loc);
  712. free(m.entries.data, m.entries.allocator, loc);
  713. header.m^ = nm;
  714. }
  715. __dynamic_map_get :: proc(h: Map_Header, key: Map_Key) -> rawptr {
  716. index := __dynamic_map_find(h, key).entry_index;
  717. if index >= 0 {
  718. data := uintptr(__dynamic_map_get_entry(h, index));
  719. return rawptr(data + h.value_offset);
  720. }
  721. return nil;
  722. }
  723. __dynamic_map_set :: proc(h: Map_Header, key: Map_Key, value: rawptr, loc := #caller_location) #no_bounds_check {
  724. index: int;
  725. assert(value != nil);
  726. if len(h.m.hashes) == 0 {
  727. __dynamic_map_reserve(h, INITIAL_MAP_CAP, loc);
  728. __dynamic_map_grow(h, loc);
  729. }
  730. fr := __dynamic_map_find(h, key);
  731. if fr.entry_index >= 0 {
  732. index = fr.entry_index;
  733. } else {
  734. index = __dynamic_map_add_entry(h, key, loc);
  735. if fr.entry_prev >= 0 {
  736. entry := __dynamic_map_get_entry(h, fr.entry_prev);
  737. entry.next = index;
  738. } else {
  739. h.m.hashes[fr.hash_index] = index;
  740. }
  741. }
  742. {
  743. e := __dynamic_map_get_entry(h, index);
  744. e.key = key;
  745. val := (^byte)(uintptr(e) + h.value_offset);
  746. mem.copy(val, value, h.value_size);
  747. }
  748. if __dynamic_map_full(h) {
  749. __dynamic_map_grow(h, loc);
  750. }
  751. }
  752. __dynamic_map_grow :: proc(using h: Map_Header, loc := #caller_location) {
  753. // TODO(bill): Determine an efficient growing rate
  754. new_count := max(4*m.entries.cap + 7, INITIAL_MAP_CAP);
  755. __dynamic_map_rehash(h, new_count, loc);
  756. }
  757. __dynamic_map_full :: inline proc(using h: Map_Header) -> bool {
  758. return int(0.75 * f64(len(m.hashes))) <= m.entries.cap;
  759. }
  760. __dynamic_map_hash_equal :: proc(h: Map_Header, a, b: Map_Key) -> bool {
  761. if a.hash == b.hash {
  762. if h.is_key_string do return a.str == b.str;
  763. return true;
  764. }
  765. return false;
  766. }
  767. __dynamic_map_find :: proc(using h: Map_Header, key: Map_Key) -> Map_Find_Result #no_bounds_check {
  768. fr := Map_Find_Result{-1, -1, -1};
  769. if n := u64(len(m.hashes)); n > 0 {
  770. fr.hash_index = int(key.hash % n);
  771. fr.entry_index = m.hashes[fr.hash_index];
  772. for fr.entry_index >= 0 {
  773. entry := __dynamic_map_get_entry(h, fr.entry_index);
  774. if __dynamic_map_hash_equal(h, entry.key, key) do return fr;
  775. fr.entry_prev = fr.entry_index;
  776. fr.entry_index = entry.next;
  777. }
  778. }
  779. return fr;
  780. }
  781. __dynamic_map_add_entry :: proc(using h: Map_Header, key: Map_Key, loc := #caller_location) -> int {
  782. prev := m.entries.len;
  783. c := __dynamic_array_append_nothing(&m.entries, entry_size, entry_align, loc);
  784. if c != prev {
  785. end := __dynamic_map_get_entry(h, c-1);
  786. end.key = key;
  787. end.next = -1;
  788. }
  789. return prev;
  790. }
  791. __dynamic_map_delete_key :: proc(using h: Map_Header, key: Map_Key) {
  792. fr := __dynamic_map_find(h, key);
  793. if fr.entry_index >= 0 {
  794. __dynamic_map_erase(h, fr);
  795. }
  796. }
  797. __dynamic_map_get_entry :: proc(using h: Map_Header, index: int) -> ^Map_Entry_Header {
  798. assert(0 <= index && index < m.entries.len);
  799. return (^Map_Entry_Header)(uintptr(m.entries.data) + uintptr(index*entry_size));
  800. }
  801. __dynamic_map_erase :: proc(using h: Map_Header, fr: Map_Find_Result) #no_bounds_check {
  802. if fr.entry_prev < 0 {
  803. m.hashes[fr.hash_index] = __dynamic_map_get_entry(h, fr.entry_index).next;
  804. } else {
  805. prev := __dynamic_map_get_entry(h, fr.entry_prev);
  806. curr := __dynamic_map_get_entry(h, fr.entry_index);
  807. prev.next = curr.next;
  808. }
  809. if (fr.entry_index == m.entries.len-1) {
  810. // NOTE(bill): No need to do anything else, just pop
  811. } else {
  812. old := __dynamic_map_get_entry(h, fr.entry_index);
  813. end := __dynamic_map_get_entry(h, m.entries.len-1);
  814. mem.copy(old, end, entry_size);
  815. if last := __dynamic_map_find(h, old.key); last.entry_prev >= 0 {
  816. last_entry := __dynamic_map_get_entry(h, last.entry_prev);
  817. last_entry.next = fr.entry_index;
  818. } else {
  819. m.hashes[last.hash_index] = fr.entry_index;
  820. }
  821. }
  822. // TODO(bill): Is this correct behaviour?
  823. m.entries.len -= 1;
  824. }