dynamic_map_internal.odin 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924
  1. package runtime
  2. import "core:intrinsics"
  3. _ :: intrinsics
  4. // High performance, cache-friendly, open-addressed Robin Hood hashing hash map
  5. // data structure with various optimizations for Odin.
  6. //
  7. // Copyright 2022 (c) Dale Weiler
  8. //
  9. // The core of the hash map data structure is the Raw_Map struct which is a
  10. // type-erased representation of the map. This type-erased representation is
  11. // used in two ways: static and dynamic. When static type information is known,
  12. // the procedures suffixed with _static should be used instead of _dynamic. The
  13. // static procedures are optimized since they have type information. Hashing of
  14. // keys, comparison of keys, and data lookup are all optimized. When type
  15. // information is not known, the procedures suffixed with _dynamic should be
  16. // used. The representation of the map is the same for both static and dynamic,
  17. // and procedures of each can be mixed and matched. The purpose of the dynamic
  18. // representation is to enable reflection and runtime manipulation of the map.
  19. // The dynamic procedures all take an additional Map_Info structure parameter
  20. // which carries runtime values describing the size, alignment, and offset of
  21. // various traits of a given key and value type pair. The Map_Info value can
  22. // be created by calling map_info(K, V) with the key and value typeids.
  23. //
  24. // This map implementation makes extensive use of uintptr for representing
  25. // sizes, lengths, capacities, masks, pointers, offsets, and addresses to avoid
  26. // expensive sign extension and masking that would be generated if types were
  27. // casted all over. The only place regular ints show up is in the cap() and
  28. // len() implementations.
  29. //
  30. // To make this map cache-friendly it uses a novel strategy to ensure keys and
  31. // values of the map are always cache-line aligned and that no single key or
  32. // value of any type ever straddles a cache-line. This cache efficiency makes
  33. // for quick lookups because the linear-probe always addresses data in a cache
  34. // friendly way. This is enabled through the use of a special meta-type called
  35. // a Map_Cell which packs as many values of a given type into a local array adding
  36. // internal padding to round to MAP_CACHE_LINE_SIZE. One other benefit to storing
  37. // the internal data in this manner is false sharing no longer occurs when using
  38. // a map, enabling efficient concurrent access of the map data structure with
  39. // minimal locking if desired.
  40. // With Robin Hood hashing a maximum load factor of 75% is ideal.
  41. MAP_LOAD_FACTOR :: 75
  42. // Minimum log2 capacity.
  43. MAP_MIN_LOG2_CAPACITY :: 3 // 8 elements
  44. // Has to be less than 100% though.
  45. #assert(MAP_LOAD_FACTOR < 100)
  46. // This is safe to change. The log2 size of a cache-line. At minimum it has to
  47. // be six though. Higher cache line sizes are permitted.
  48. MAP_CACHE_LINE_LOG2 :: 6
  49. // The size of a cache-line.
  50. MAP_CACHE_LINE_SIZE :: 1 << MAP_CACHE_LINE_LOG2
  51. // The minimum cache-line size allowed by this implementation is 64 bytes since
  52. // we need 6 bits in the base pointer to store the integer log2 capacity, which
  53. // at maximum is 63. Odin uses signed integers to represent length and capacity,
  54. // so only 63 bits are needed in the maximum case.
  55. #assert(MAP_CACHE_LINE_SIZE >= 64)
  56. // Map_Cell type that packs multiple T in such a way to ensure that each T stays
  57. // aligned by align_of(T) and such that align_of(Map_Cell(T)) % MAP_CACHE_LINE_SIZE == 0
  58. //
  59. // This means a value of type T will never straddle a cache-line.
  60. //
  61. // When multiple Ts can fit in a single cache-line the data array will have more
  62. // than one element. When it cannot, the data array will have one element and
  63. // an array of Map_Cell(T) will be padded to stay a multiple of MAP_CACHE_LINE_SIZE.
  64. //
  65. // We rely on the type system to do all the arithmetic and padding for us here.
  66. //
  67. // The usual array[index] indexing for []T backed by a []Map_Cell(T) becomes a bit
  68. // more involved as there now may be internal padding. The indexing now becomes
  69. //
  70. // N :: len(Map_Cell(T){}.data)
  71. // i := index / N
  72. // j := index % N
  73. // cell[i].data[j]
  74. //
  75. // However, since len(Map_Cell(T){}.data) is a compile-time constant, there are some
  76. // optimizations we can do to eliminate the need for any divisions as N will
  77. // be bounded by [1, 64).
  78. //
  79. // In the optimal case, len(Map_Cell(T){}.data) = 1 so the cell array can be treated
  80. // as a regular array of T, which is the case for hashes.
  81. Map_Cell :: struct($T: typeid) #align(MAP_CACHE_LINE_SIZE) {
  82. data: [MAP_CACHE_LINE_SIZE / size_of(T) when 0 < size_of(T) && size_of(T) < MAP_CACHE_LINE_SIZE else 1]T,
  83. }
  84. // So we can operate on a cell data structure at runtime without any type
  85. // information, we have a simple table that stores some traits about the cell.
  86. //
  87. // 32-bytes on 64-bit
  88. // 16-bytes on 32-bit
  89. Map_Cell_Info :: struct {
  90. size_of_type: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  91. align_of_type: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  92. size_of_cell: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  93. elements_per_cell: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  94. }
  95. // map_cell_info :: proc "contextless" ($T: typeid) -> ^Map_Cell_Info {...}
  96. map_cell_info :: intrinsics.type_map_cell_info
  97. // Same as the above procedure but at runtime with the cell Map_Cell_Info value.
  98. @(require_results)
  99. map_cell_index_dynamic :: #force_inline proc "contextless" (base: uintptr, #no_alias info: ^Map_Cell_Info, index: uintptr) -> uintptr {
  100. // Micro-optimize the common cases to save on integer division.
  101. elements_per_cell := uintptr(info.elements_per_cell)
  102. size_of_cell := uintptr(info.size_of_cell)
  103. switch elements_per_cell {
  104. case 1:
  105. return base + (index * size_of_cell)
  106. case 2:
  107. cell_index := index >> 1
  108. data_index := index & 1
  109. size_of_type := uintptr(info.size_of_type)
  110. return base + (cell_index * size_of_cell) + (data_index * size_of_type)
  111. case:
  112. cell_index := index / elements_per_cell
  113. data_index := index % elements_per_cell
  114. size_of_type := uintptr(info.size_of_type)
  115. return base + (cell_index * size_of_cell) + (data_index * size_of_type)
  116. }
  117. }
  118. // Same as above procedure but with compile-time constant index.
  119. @(require_results)
  120. map_cell_index_dynamic_const :: proc "contextless" (base: uintptr, #no_alias info: ^Map_Cell_Info, $INDEX: uintptr) -> uintptr {
  121. elements_per_cell := uintptr(info.elements_per_cell)
  122. size_of_cell := uintptr(info.size_of_cell)
  123. size_of_type := uintptr(info.size_of_type)
  124. cell_index := INDEX / elements_per_cell
  125. data_index := INDEX % elements_per_cell
  126. return base + (cell_index * size_of_cell) + (data_index * size_of_type)
  127. }
  128. // We always round the capacity to a power of two so this becomes [16]Foo, which
  129. // works out to [4]Cell(Foo).
  130. //
  131. // The following compile-time procedure indexes such a [N]Cell(T) structure as
  132. // if it were a flat array accounting for the internal padding introduced by the
  133. // Cell structure.
  134. @(require_results)
  135. map_cell_index_static :: #force_inline proc "contextless" (cells: [^]Map_Cell($T), index: uintptr) -> ^T #no_bounds_check {
  136. N :: size_of(Map_Cell(T){}.data) / size_of(T) when size_of(T) > 0 else 1
  137. #assert(N <= MAP_CACHE_LINE_SIZE)
  138. when size_of(Map_Cell(T)) == size_of([N]T) {
  139. // No padding case, can treat as a regular array of []T.
  140. return &([^]T)(cells)[index]
  141. } else when (N & (N - 1)) == 0 && N <= 8*size_of(uintptr) {
  142. // Likely case, N is a power of two because T is a power of two.
  143. // Compute the integer log 2 of N, this is the shift amount to index the
  144. // correct cell. Odin's intrinsics.count_leading_zeros does not produce a
  145. // constant, hence this approach. We only need to check up to N = 64.
  146. SHIFT :: 1 when N < 2 else
  147. 2 when N < 4 else
  148. 3 when N < 8 else
  149. 4 when N < 16 else
  150. 5 when N < 32 else 6
  151. #assert(SHIFT <= MAP_CACHE_LINE_LOG2)
  152. // Unique case, no need to index data here since only one element.
  153. when N == 1 {
  154. return &cells[index >> SHIFT].data[0]
  155. } else {
  156. return &cells[index >> SHIFT].data[index & (N - 1)]
  157. }
  158. } else {
  159. // Least likely (and worst case), we pay for a division operation but we
  160. // assume the compiler does not actually generate a division. N will be in the
  161. // range [1, CACHE_LINE_SIZE) and not a power of two.
  162. return &cells[index / N].data[index % N]
  163. }
  164. }
  165. // len() for map
  166. @(require_results)
  167. map_len :: #force_inline proc "contextless" (m: Raw_Map) -> int {
  168. return int(m.len)
  169. }
  170. // cap() for map
  171. @(require_results)
  172. map_cap :: #force_inline proc "contextless" (m: Raw_Map) -> int {
  173. // The data uintptr stores the capacity in the lower six bits which gives the
  174. // a maximum value of 2^6-1, or 63. We store the integer log2 of capacity
  175. // since our capacity is always a power of two. We only need 63 bits as Odin
  176. // represents length and capacity as a signed integer.
  177. return 0 if m.data == 0 else 1 << map_log2_cap(m)
  178. }
  179. // Query the load factor of the map. This is not actually configurable, but
  180. // some math is needed to compute it. Compute it as a fixed point percentage to
  181. // avoid floating point operations. This division can be optimized out by
  182. // multiplying by the multiplicative inverse of 100.
  183. @(require_results)
  184. map_load_factor :: #force_inline proc "contextless" (log2_capacity: uintptr) -> uintptr {
  185. return ((uintptr(1) << log2_capacity) * MAP_LOAD_FACTOR) / 100
  186. }
  187. @(require_results)
  188. map_resize_threshold :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  189. return map_load_factor(map_log2_cap(m))
  190. }
  191. // The data stores the log2 capacity in the lower six bits. This is primarily
  192. // used in the implementation rather than map_cap since the check for data = 0
  193. // isn't necessary in the implementation. cap() on the otherhand needs to work
  194. // when called on an empty map.
  195. @(require_results)
  196. map_log2_cap :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  197. return m.data & (64 - 1)
  198. }
  199. // Canonicalize the data by removing the tagged capacity stored in the lower six
  200. // bits of the data uintptr.
  201. @(require_results)
  202. map_data :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  203. return m.data &~ uintptr(64 - 1)
  204. }
  205. Map_Hash :: uintptr
  206. TOMBSTONE_MASK :: 1<<(size_of(Map_Hash)*8 - 1)
  207. // Procedure to check if a slot is empty for a given hash. This is represented
  208. // by the zero value to make the zero value useful. This is a procedure just
  209. // for prose reasons.
  210. @(require_results)
  211. map_hash_is_empty :: #force_inline proc "contextless" (hash: Map_Hash) -> bool {
  212. return hash == 0
  213. }
  214. @(require_results)
  215. map_hash_is_deleted :: #force_no_inline proc "contextless" (hash: Map_Hash) -> bool {
  216. // The MSB indicates a tombstone
  217. return hash & TOMBSTONE_MASK != 0
  218. }
  219. @(require_results)
  220. map_hash_is_valid :: #force_inline proc "contextless" (hash: Map_Hash) -> bool {
  221. // The MSB indicates a tombstone
  222. return (hash != 0) & (hash & TOMBSTONE_MASK == 0)
  223. }
  224. @(require_results)
  225. map_seed :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  226. return map_seed_from_map_data(map_data(m))
  227. }
  228. // splitmix for uintptr
  229. @(require_results)
  230. map_seed_from_map_data :: #force_inline proc "contextless" (data: uintptr) -> uintptr {
  231. when size_of(uintptr) == size_of(u64) {
  232. mix := data + 0x9e3779b97f4a7c15
  233. mix = (mix ~ (mix >> 30)) * 0xbf58476d1ce4e5b9
  234. mix = (mix ~ (mix >> 27)) * 0x94d049bb133111eb
  235. return mix ~ (mix >> 31)
  236. } else {
  237. mix := data + 0x9e3779b9
  238. mix = (mix ~ (mix >> 16)) * 0x21f0aaad
  239. mix = (mix ~ (mix >> 15)) * 0x735a2d97
  240. return mix ~ (mix >> 15)
  241. }
  242. }
  243. // Computes the desired position in the array. This is just index % capacity,
  244. // but a procedure as there's some math involved here to recover the capacity.
  245. @(require_results)
  246. map_desired_position :: #force_inline proc "contextless" (m: Raw_Map, hash: Map_Hash) -> uintptr {
  247. // We do not use map_cap since we know the capacity will not be zero here.
  248. capacity := uintptr(1) << map_log2_cap(m)
  249. return uintptr(hash & Map_Hash(capacity - 1))
  250. }
  251. @(require_results)
  252. map_probe_distance :: #force_inline proc "contextless" (m: Raw_Map, hash: Map_Hash, slot: uintptr) -> uintptr {
  253. // We do not use map_cap since we know the capacity will not be zero here.
  254. capacity := uintptr(1) << map_log2_cap(m)
  255. return (slot + capacity - map_desired_position(m, hash)) & (capacity - 1)
  256. }
  257. // When working with the type-erased structure at runtime we need information
  258. // about the map to make working with it possible. This info structure stores
  259. // that.
  260. //
  261. // `Map_Info` and `Map_Cell_Info` are read only data structures and cannot be
  262. // modified after creation
  263. //
  264. // 32-bytes on 64-bit
  265. // 16-bytes on 32-bit
  266. Map_Info :: struct {
  267. ks: ^Map_Cell_Info, // 8-bytes on 64-bit, 4-bytes on 32-bit
  268. vs: ^Map_Cell_Info, // 8-bytes on 64-bit, 4-bytes on 32-bit
  269. key_hasher: proc "contextless" (key: rawptr, seed: Map_Hash) -> Map_Hash, // 8-bytes on 64-bit, 4-bytes on 32-bit
  270. key_equal: proc "contextless" (lhs, rhs: rawptr) -> bool, // 8-bytes on 64-bit, 4-bytes on 32-bit
  271. }
  272. // The Map_Info structure is basically a pseudo-table of information for a given K and V pair.
  273. // map_info :: proc "contextless" ($T: typeid/map[$K]$V) -> ^Map_Info {...}
  274. map_info :: intrinsics.type_map_info
  275. @(require_results)
  276. map_kvh_data_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info) -> (ks: uintptr, vs: uintptr, hs: [^]Map_Hash, sk: uintptr, sv: uintptr) {
  277. INFO_HS := intrinsics.type_map_cell_info(Map_Hash)
  278. capacity := uintptr(1) << map_log2_cap(m)
  279. ks = map_data(m)
  280. vs = map_cell_index_dynamic(ks, info.ks, capacity) // Skip past ks to get start of vs
  281. hs_ := map_cell_index_dynamic(vs, info.vs, capacity) // Skip past vs to get start of hs
  282. sk = map_cell_index_dynamic(hs_, INFO_HS, capacity) // Skip past hs to get start of sk
  283. // Need to skip past two elements in the scratch key space to get to the start
  284. // of the scratch value space, of which there's only two elements as well.
  285. sv = map_cell_index_dynamic_const(sk, info.ks, 2)
  286. hs = ([^]Map_Hash)(hs_)
  287. return
  288. }
  289. @(require_results)
  290. map_kvh_data_values_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info) -> (vs: uintptr) {
  291. capacity := uintptr(1) << map_log2_cap(m)
  292. return map_cell_index_dynamic(map_data(m), info.ks, capacity) // Skip past ks to get start of vs
  293. }
  294. @(private, require_results)
  295. map_total_allocation_size :: #force_inline proc "contextless" (capacity: uintptr, info: ^Map_Info) -> uintptr {
  296. round :: #force_inline proc "contextless" (value: uintptr) -> uintptr {
  297. CACHE_MASK :: MAP_CACHE_LINE_SIZE - 1
  298. return (value + CACHE_MASK) &~ CACHE_MASK
  299. }
  300. INFO_HS := intrinsics.type_map_cell_info(Map_Hash)
  301. size := uintptr(0)
  302. size = round(map_cell_index_dynamic(size, info.ks, capacity))
  303. size = round(map_cell_index_dynamic(size, info.vs, capacity))
  304. size = round(map_cell_index_dynamic(size, INFO_HS, capacity))
  305. size = round(map_cell_index_dynamic(size, info.ks, 2)) // Two additional ks for scratch storage
  306. size = round(map_cell_index_dynamic(size, info.vs, 2)) // Two additional vs for scratch storage
  307. return size
  308. }
  309. // The only procedure which needs access to the context is the one which allocates the map.
  310. @(require_results)
  311. map_alloc_dynamic :: proc "odin" (info: ^Map_Info, log2_capacity: uintptr, allocator := context.allocator, loc := #caller_location) -> (result: Raw_Map, err: Allocator_Error) {
  312. result.allocator = allocator // set the allocator always
  313. if log2_capacity == 0 {
  314. return
  315. }
  316. if log2_capacity >= 64 {
  317. // Overflowed, would be caused by log2_capacity > 64
  318. return {}, .Out_Of_Memory
  319. }
  320. capacity := uintptr(1) << max(log2_capacity, MAP_MIN_LOG2_CAPACITY)
  321. CACHE_MASK :: MAP_CACHE_LINE_SIZE - 1
  322. size := map_total_allocation_size(capacity, info)
  323. data := mem_alloc_non_zeroed(int(size), MAP_CACHE_LINE_SIZE, allocator, loc) or_return
  324. data_ptr := uintptr(raw_data(data))
  325. if data_ptr == 0 {
  326. err = .Out_Of_Memory
  327. return
  328. }
  329. if intrinsics.expect(data_ptr & CACHE_MASK != 0, false) {
  330. panic("allocation not aligned to a cache line", loc)
  331. } else {
  332. result.data = data_ptr | log2_capacity // Tagged pointer representation for capacity.
  333. result.len = 0
  334. map_clear_dynamic(&result, info)
  335. }
  336. return
  337. }
  338. // This procedure has to stack allocate storage to store local keys during the
  339. // Robin Hood hashing technique where elements are swapped in the backing
  340. // arrays to reduce variance. This swapping can only be done with memcpy since
  341. // there is no type information.
  342. //
  343. // This procedure returns the address of the just inserted value.
  344. @(require_results)
  345. map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) -> (result: uintptr) {
  346. h := h
  347. pos := map_desired_position(m^, h)
  348. distance := uintptr(0)
  349. mask := (uintptr(1) << map_log2_cap(m^)) - 1
  350. ks, vs, hs, sk, sv := map_kvh_data_dynamic(m^, info)
  351. // Avoid redundant loads of these values
  352. size_of_k := info.ks.size_of_type
  353. size_of_v := info.vs.size_of_type
  354. k := map_cell_index_dynamic(sk, info.ks, 0)
  355. v := map_cell_index_dynamic(sv, info.vs, 0)
  356. intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(ik), size_of_k)
  357. intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(iv), size_of_v)
  358. // Temporary k and v dynamic storage for swap below
  359. tk := map_cell_index_dynamic(sk, info.ks, 1)
  360. tv := map_cell_index_dynamic(sv, info.vs, 1)
  361. swap_loop: for {
  362. element_hash := hs[pos]
  363. if map_hash_is_empty(element_hash) {
  364. k_dst := map_cell_index_dynamic(ks, info.ks, pos)
  365. v_dst := map_cell_index_dynamic(vs, info.vs, pos)
  366. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  367. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  368. hs[pos] = h
  369. return result if result != 0 else v_dst
  370. }
  371. if map_hash_is_deleted(element_hash) {
  372. break swap_loop
  373. }
  374. if probe_distance := map_probe_distance(m^, element_hash, pos); distance > probe_distance {
  375. if result == 0 {
  376. result = map_cell_index_dynamic(vs, info.vs, pos)
  377. }
  378. kp := map_cell_index_dynamic(ks, info.ks, pos)
  379. vp := map_cell_index_dynamic(vs, info.vs, pos)
  380. intrinsics.mem_copy_non_overlapping(rawptr(tk), rawptr(k), size_of_k)
  381. intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(kp), size_of_k)
  382. intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(tk), size_of_k)
  383. intrinsics.mem_copy_non_overlapping(rawptr(tv), rawptr(v), size_of_v)
  384. intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(vp), size_of_v)
  385. intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(tv), size_of_v)
  386. th := h
  387. h = hs[pos]
  388. hs[pos] = th
  389. distance = probe_distance
  390. }
  391. pos = (pos + 1) & mask
  392. distance += 1
  393. }
  394. // backward shift loop
  395. hs[pos] = 0
  396. look_ahead: uintptr = 1
  397. for {
  398. la_pos := (pos + look_ahead) & mask
  399. element_hash := hs[la_pos]
  400. if map_hash_is_deleted(element_hash) {
  401. look_ahead += 1
  402. hs[la_pos] = 0
  403. continue
  404. }
  405. k_dst := map_cell_index_dynamic(ks, info.ks, pos)
  406. v_dst := map_cell_index_dynamic(vs, info.vs, pos)
  407. if map_hash_is_empty(element_hash) {
  408. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  409. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  410. hs[pos] = h
  411. return result if result != 0 else v_dst
  412. }
  413. k_src := map_cell_index_dynamic(ks, info.ks, la_pos)
  414. v_src := map_cell_index_dynamic(vs, info.vs, la_pos)
  415. probe_distance := map_probe_distance(m^, element_hash, la_pos)
  416. if probe_distance < look_ahead {
  417. // probed can be made ideal while placing saved (ending condition)
  418. if result == 0 {
  419. result = v_dst
  420. }
  421. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  422. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  423. hs[pos] = h
  424. // This will be an ideal move
  425. pos = (la_pos - probe_distance) & mask
  426. look_ahead -= probe_distance
  427. // shift until we hit ideal/empty
  428. for probe_distance != 0 {
  429. k_dst = map_cell_index_dynamic(ks, info.ks, pos)
  430. v_dst = map_cell_index_dynamic(vs, info.vs, pos)
  431. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
  432. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
  433. hs[pos] = element_hash
  434. hs[la_pos] = 0
  435. pos = (pos + 1) & mask
  436. la_pos = (la_pos + 1) & mask
  437. look_ahead = (la_pos - pos) & mask
  438. element_hash = hs[la_pos]
  439. if map_hash_is_empty(element_hash) {
  440. return
  441. }
  442. probe_distance = map_probe_distance(m^, element_hash, la_pos)
  443. if probe_distance == 0 {
  444. return
  445. }
  446. // can be ideal?
  447. if probe_distance < look_ahead {
  448. pos = (la_pos - probe_distance) & mask
  449. }
  450. k_src = map_cell_index_dynamic(ks, info.ks, la_pos)
  451. v_src = map_cell_index_dynamic(vs, info.vs, la_pos)
  452. }
  453. return
  454. } else if distance < probe_distance - look_ahead {
  455. // shift back probed
  456. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
  457. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
  458. hs[pos] = element_hash
  459. hs[la_pos] = 0
  460. } else {
  461. // place saved, save probed
  462. if result == 0 {
  463. result = v_dst
  464. }
  465. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  466. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  467. hs[pos] = h
  468. intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(k_src), size_of_k)
  469. intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(v_src), size_of_v)
  470. h = hs[la_pos]
  471. hs[la_pos] = 0
  472. distance = probe_distance - look_ahead
  473. }
  474. pos = (pos + 1) & mask
  475. distance += 1
  476. }
  477. }
  478. @(require_results)
  479. map_grow_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> Allocator_Error {
  480. log2_capacity := map_log2_cap(m^)
  481. new_capacity := uintptr(1) << max(log2_capacity + 1, MAP_MIN_LOG2_CAPACITY)
  482. return map_reserve_dynamic(m, info, new_capacity, loc)
  483. }
  484. @(require_results)
  485. map_reserve_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, new_capacity: uintptr, loc := #caller_location) -> Allocator_Error {
  486. @(require_results)
  487. ceil_log2 :: #force_inline proc "contextless" (x: uintptr) -> uintptr {
  488. z := intrinsics.count_leading_zeros(x)
  489. if z > 0 && x & (x-1) != 0 {
  490. z -= 1
  491. }
  492. return size_of(uintptr)*8 - 1 - z
  493. }
  494. if m.allocator.procedure == nil {
  495. m.allocator = context.allocator
  496. }
  497. new_capacity := new_capacity
  498. old_capacity := uintptr(map_cap(m^))
  499. if old_capacity >= new_capacity {
  500. return nil
  501. }
  502. // ceiling nearest power of two
  503. log2_new_capacity := ceil_log2(new_capacity)
  504. log2_min_cap := max(MAP_MIN_LOG2_CAPACITY, log2_new_capacity)
  505. if m.data == 0 {
  506. m^ = map_alloc_dynamic(info, log2_min_cap, m.allocator, loc) or_return
  507. return nil
  508. }
  509. resized := map_alloc_dynamic(info, log2_min_cap, m.allocator, loc) or_return
  510. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  511. // Cache these loads to avoid hitting them in the for loop.
  512. n := m.len
  513. for i in 0..<old_capacity {
  514. hash := hs[i]
  515. if map_hash_is_empty(hash) {
  516. continue
  517. }
  518. if map_hash_is_deleted(hash) {
  519. continue
  520. }
  521. k := map_cell_index_dynamic(ks, info.ks, i)
  522. v := map_cell_index_dynamic(vs, info.vs, i)
  523. hash = info.key_hasher(rawptr(k), map_seed(resized))
  524. _ = map_insert_hash_dynamic(&resized, info, hash, k, v)
  525. // Only need to do this comparison on each actually added pair, so do not
  526. // fold it into the for loop comparator as a micro-optimization.
  527. n -= 1
  528. if n == 0 {
  529. break
  530. }
  531. }
  532. map_free_dynamic(m^, info, loc) or_return
  533. m.data = resized.data
  534. return nil
  535. }
  536. @(require_results)
  537. map_shrink_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> (did_shrink: bool, err: Allocator_Error) {
  538. if m.allocator.procedure == nil {
  539. m.allocator = context.allocator
  540. }
  541. // Cannot shrink the capacity if the number of items in the map would exceed
  542. // one minus the current log2 capacity's resize threshold. That is the shrunk
  543. // map needs to be within the max load factor.
  544. log2_capacity := map_log2_cap(m^)
  545. if uintptr(m.len) >= map_load_factor(log2_capacity - 1) {
  546. return false, nil
  547. }
  548. shrunk := map_alloc_dynamic(info, log2_capacity - 1, m.allocator) or_return
  549. capacity := uintptr(1) << log2_capacity
  550. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  551. n := m.len
  552. for i in 0..<capacity {
  553. hash := hs[i]
  554. if map_hash_is_empty(hash) {
  555. continue
  556. }
  557. if map_hash_is_deleted(hash) {
  558. continue
  559. }
  560. k := map_cell_index_dynamic(ks, info.ks, i)
  561. v := map_cell_index_dynamic(vs, info.vs, i)
  562. hash = info.key_hasher(rawptr(k), map_seed(shrunk))
  563. _ = map_insert_hash_dynamic(&shrunk, info, hash, k, v)
  564. // Only need to do this comparison on each actually added pair, so do not
  565. // fold it into the for loop comparator as a micro-optimization.
  566. n -= 1
  567. if n == 0 {
  568. break
  569. }
  570. }
  571. map_free_dynamic(m^, info, loc) or_return
  572. m.data = shrunk.data
  573. return true, nil
  574. }
  575. @(require_results)
  576. map_free_dynamic :: proc "odin" (m: Raw_Map, info: ^Map_Info, loc := #caller_location) -> Allocator_Error {
  577. ptr := rawptr(map_data(m))
  578. size := int(map_total_allocation_size(uintptr(map_cap(m)), info))
  579. err := mem_free_with_size(ptr, size, m.allocator, loc)
  580. #partial switch err {
  581. case .None, .Mode_Not_Implemented:
  582. return nil
  583. }
  584. return err
  585. }
  586. @(require_results)
  587. map_lookup_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (index: uintptr, ok: bool) {
  588. if map_len(m) == 0 {
  589. return 0, false
  590. }
  591. h := info.key_hasher(rawptr(k), map_seed(m))
  592. p := map_desired_position(m, h)
  593. d := uintptr(0)
  594. c := (uintptr(1) << map_log2_cap(m)) - 1
  595. ks, _, hs, _, _ := map_kvh_data_dynamic(m, info)
  596. for {
  597. element_hash := hs[p]
  598. if map_hash_is_empty(element_hash) {
  599. return 0, false
  600. } else if d > map_probe_distance(m, element_hash, p) {
  601. return 0, false
  602. } else if element_hash == h && info.key_equal(rawptr(k), rawptr(map_cell_index_dynamic(ks, info.ks, p))) {
  603. return p, true
  604. }
  605. p = (p + 1) & c
  606. d += 1
  607. }
  608. }
  609. @(require_results)
  610. map_exists_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (ok: bool) {
  611. if map_len(m) == 0 {
  612. return false
  613. }
  614. h := info.key_hasher(rawptr(k), map_seed(m))
  615. p := map_desired_position(m, h)
  616. d := uintptr(0)
  617. c := (uintptr(1) << map_log2_cap(m)) - 1
  618. ks, _, hs, _, _ := map_kvh_data_dynamic(m, info)
  619. for {
  620. element_hash := hs[p]
  621. if map_hash_is_empty(element_hash) {
  622. return false
  623. } else if d > map_probe_distance(m, element_hash, p) {
  624. return false
  625. } else if element_hash == h && info.key_equal(rawptr(k), rawptr(map_cell_index_dynamic(ks, info.ks, p))) {
  626. return true
  627. }
  628. p = (p + 1) & c
  629. d += 1
  630. }
  631. }
  632. @(require_results)
  633. map_erase_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (old_k, old_v: uintptr, ok: bool) {
  634. index := map_lookup_dynamic(m^, info, k) or_return
  635. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  636. hs[index] |= TOMBSTONE_MASK
  637. old_k = map_cell_index_dynamic(ks, info.ks, index)
  638. old_v = map_cell_index_dynamic(vs, info.vs, index)
  639. m.len -= 1
  640. ok = true
  641. mask := (uintptr(1)<<map_log2_cap(m^)) - 1
  642. curr_index := uintptr(index)
  643. next_index := (curr_index + 1) & mask
  644. // if the next element is empty or has zero probe distance, then any lookup
  645. // will always fail on the next, so we can clear both of them
  646. hash := hs[next_index]
  647. if map_hash_is_empty(hash) || map_probe_distance(m^, hash, next_index) == 0 {
  648. hs[curr_index] = 0
  649. } else {
  650. hs[curr_index] |= TOMBSTONE_MASK
  651. }
  652. return
  653. }
  654. map_clear_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info) {
  655. if m.data == 0 {
  656. return
  657. }
  658. _, _, hs, _, _ := map_kvh_data_dynamic(m^, info)
  659. intrinsics.mem_zero(rawptr(hs), map_cap(m^) * size_of(Map_Hash))
  660. m.len = 0
  661. }
  662. @(require_results)
  663. map_kvh_data_static :: #force_inline proc "contextless" (m: $T/map[$K]$V) -> (ks: [^]Map_Cell(K), vs: [^]Map_Cell(V), hs: [^]Map_Hash) {
  664. capacity := uintptr(cap(m))
  665. ks = ([^]Map_Cell(K))(map_data(transmute(Raw_Map)m))
  666. vs = ([^]Map_Cell(V))(map_cell_index_static(ks, capacity))
  667. hs = ([^]Map_Hash)(map_cell_index_static(vs, capacity))
  668. return
  669. }
  670. @(require_results)
  671. map_get :: proc "contextless" (m: $T/map[$K]$V, key: K) -> (stored_key: K, stored_value: V, ok: bool) {
  672. rm := transmute(Raw_Map)m
  673. if rm.len == 0 {
  674. return
  675. }
  676. info := intrinsics.type_map_info(T)
  677. key := key
  678. h := info.key_hasher(&key, map_seed(rm))
  679. pos := map_desired_position(rm, h)
  680. distance := uintptr(0)
  681. mask := (uintptr(1) << map_log2_cap(rm)) - 1
  682. ks, vs, hs := map_kvh_data_static(m)
  683. for {
  684. element_hash := hs[pos]
  685. if map_hash_is_empty(element_hash) {
  686. return
  687. } else if distance > map_probe_distance(rm, element_hash, pos) {
  688. return
  689. } else if element_hash == h {
  690. element_key := map_cell_index_static(ks, pos)
  691. if info.key_equal(&key, rawptr(element_key)) {
  692. element_value := map_cell_index_static(vs, pos)
  693. stored_key = (^K)(element_key)^
  694. stored_value = (^V)(element_value)^
  695. ok = true
  696. return
  697. }
  698. }
  699. pos = (pos + 1) & mask
  700. distance += 1
  701. }
  702. }
  703. // IMPORTANT: USED WITHIN THE COMPILER
  704. __dynamic_map_get :: proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, key: rawptr) -> (ptr: rawptr) {
  705. if m.len == 0 {
  706. return nil
  707. }
  708. pos := map_desired_position(m^, h)
  709. distance := uintptr(0)
  710. mask := (uintptr(1) << map_log2_cap(m^)) - 1
  711. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  712. for {
  713. element_hash := hs[pos]
  714. if map_hash_is_empty(element_hash) {
  715. return nil
  716. } else if distance > map_probe_distance(m^, element_hash, pos) {
  717. return nil
  718. } else if element_hash == h && info.key_equal(key, rawptr(map_cell_index_dynamic(ks, info.ks, pos))) {
  719. return rawptr(map_cell_index_dynamic(vs, info.vs, pos))
  720. }
  721. pos = (pos + 1) & mask
  722. distance += 1
  723. }
  724. }
  725. // IMPORTANT: USED WITHIN THE COMPILER
  726. __dynamic_map_check_grow :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> (err: Allocator_Error, has_grown: bool) {
  727. if m.len >= map_resize_threshold(m^) {
  728. return map_grow_dynamic(m, info, loc), true
  729. }
  730. return nil, false
  731. }
  732. __dynamic_map_set_without_hash :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key, value: rawptr, loc := #caller_location) -> rawptr {
  733. return __dynamic_map_set(m, info, info.key_hasher(key, map_seed(m^)), key, value, loc)
  734. }
  735. // IMPORTANT: USED WITHIN THE COMPILER
  736. __dynamic_map_set :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, hash: Map_Hash, key, value: rawptr, loc := #caller_location) -> rawptr {
  737. if found := __dynamic_map_get(m, info, hash, key); found != nil {
  738. intrinsics.mem_copy_non_overlapping(found, value, info.vs.size_of_type)
  739. return found
  740. }
  741. hash := hash
  742. err, has_grown := __dynamic_map_check_grow(m, info, loc)
  743. if err != nil {
  744. return nil
  745. }
  746. if has_grown {
  747. hash = info.key_hasher(key, map_seed(m^))
  748. }
  749. result := map_insert_hash_dynamic(m, info, hash, uintptr(key), uintptr(value))
  750. m.len += 1
  751. return rawptr(result)
  752. }
  753. // IMPORTANT: USED WITHIN THE COMPILER
  754. @(private)
  755. __dynamic_map_reserve :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, new_capacity: uint, loc := #caller_location) -> Allocator_Error {
  756. return map_reserve_dynamic(m, info, uintptr(new_capacity), loc)
  757. }
  758. // NOTE: the default hashing algorithm derives from fnv64a, with some minor modifications to work for `map` type:
  759. //
  760. // * Convert a `0` result to `1`
  761. // * "empty entry"
  762. // * Prevent the top bit from being set
  763. // * "deleted entry"
  764. //
  765. // Both of these modification are necessary for the implementation of the `map`
  766. INITIAL_HASH_SEED :: 0xcbf29ce484222325
  767. HASH_MASK :: 1 << (8*size_of(uintptr) - 1) -1
  768. default_hasher :: #force_inline proc "contextless" (data: rawptr, seed: uintptr, N: int) -> uintptr {
  769. h := u64(seed) + INITIAL_HASH_SEED
  770. p := ([^]byte)(data)
  771. for _ in 0..<N {
  772. h = (h ~ u64(p[0])) * 0x100000001b3
  773. p = p[1:]
  774. }
  775. h &= HASH_MASK
  776. return uintptr(h) | uintptr(uintptr(h) == 0)
  777. }
  778. default_hasher_string :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
  779. str := (^[]byte)(data)
  780. return default_hasher(raw_data(str^), seed, len(str))
  781. }
  782. default_hasher_cstring :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
  783. h := u64(seed) + INITIAL_HASH_SEED
  784. if ptr := (^[^]byte)(data)^; ptr != nil {
  785. for ptr[0] != 0 {
  786. h = (h ~ u64(ptr[0])) * 0x100000001b3
  787. ptr = ptr[1:]
  788. }
  789. }
  790. h &= HASH_MASK
  791. return uintptr(h) | uintptr(uintptr(h) == 0)
  792. }