dynamic_map_internal.odin 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. package runtime
  2. import "base:intrinsics"
  3. _ :: intrinsics
  4. // High performance, cache-friendly, open-addressed Robin Hood hashing hash map
  5. // data structure with various optimizations for Odin.
  6. //
  7. // Copyright 2022 (c) Dale Weiler
  8. //
  9. // The core of the hash map data structure is the Raw_Map struct which is a
  10. // type-erased representation of the map. This type-erased representation is
  11. // used in two ways: static and dynamic. When static type information is known,
  12. // the procedures suffixed with _static should be used instead of _dynamic. The
  13. // static procedures are optimized since they have type information. Hashing of
  14. // keys, comparison of keys, and data lookup are all optimized. When type
  15. // information is not known, the procedures suffixed with _dynamic should be
  16. // used. The representation of the map is the same for both static and dynamic,
  17. // and procedures of each can be mixed and matched. The purpose of the dynamic
  18. // representation is to enable reflection and runtime manipulation of the map.
  19. // The dynamic procedures all take an additional Map_Info structure parameter
  20. // which carries runtime values describing the size, alignment, and offset of
  21. // various traits of a given key and value type pair. The Map_Info value can
  22. // be created by calling map_info(K, V) with the key and value typeids.
  23. //
  24. // This map implementation makes extensive use of uintptr for representing
  25. // sizes, lengths, capacities, masks, pointers, offsets, and addresses to avoid
  26. // expensive sign extension and masking that would be generated if types were
  27. // casted all over. The only place regular ints show up is in the cap() and
  28. // len() implementations.
  29. //
  30. // To make this map cache-friendly it uses a novel strategy to ensure keys and
  31. // values of the map are always cache-line aligned and that no single key or
  32. // value of any type ever straddles a cache-line. This cache efficiency makes
  33. // for quick lookups because the linear-probe always addresses data in a cache
  34. // friendly way. This is enabled through the use of a special meta-type called
  35. // a Map_Cell which packs as many values of a given type into a local array adding
  36. // internal padding to round to MAP_CACHE_LINE_SIZE. One other benefit to storing
  37. // the internal data in this manner is false sharing no longer occurs when using
  38. // a map, enabling efficient concurrent access of the map data structure with
  39. // minimal locking if desired.
  40. // With Robin Hood hashing a maximum load factor of 75% is ideal.
  41. MAP_LOAD_FACTOR :: 75
  42. // Minimum log2 capacity.
  43. MAP_MIN_LOG2_CAPACITY :: 3 // 8 elements
  44. // Has to be less than 100% though.
  45. #assert(MAP_LOAD_FACTOR < 100)
  46. // This is safe to change. The log2 size of a cache-line. At minimum it has to
  47. // be six though. Higher cache line sizes are permitted.
  48. MAP_CACHE_LINE_LOG2 :: 6
  49. // The size of a cache-line.
  50. MAP_CACHE_LINE_SIZE :: 1 << MAP_CACHE_LINE_LOG2
  51. // The minimum cache-line size allowed by this implementation is 64 bytes since
  52. // we need 6 bits in the base pointer to store the integer log2 capacity, which
  53. // at maximum is 63. Odin uses signed integers to represent length and capacity,
  54. // so only 63 bits are needed in the maximum case.
  55. #assert(MAP_CACHE_LINE_SIZE >= 64)
  56. // Map_Cell type that packs multiple T in such a way to ensure that each T stays
  57. // aligned by align_of(T) and such that align_of(Map_Cell(T)) % MAP_CACHE_LINE_SIZE == 0
  58. //
  59. // This means a value of type T will never straddle a cache-line.
  60. //
  61. // When multiple Ts can fit in a single cache-line the data array will have more
  62. // than one element. When it cannot, the data array will have one element and
  63. // an array of Map_Cell(T) will be padded to stay a multiple of MAP_CACHE_LINE_SIZE.
  64. //
  65. // We rely on the type system to do all the arithmetic and padding for us here.
  66. //
  67. // The usual array[index] indexing for []T backed by a []Map_Cell(T) becomes a bit
  68. // more involved as there now may be internal padding. The indexing now becomes
  69. //
  70. // N :: len(Map_Cell(T){}.data)
  71. // i := index / N
  72. // j := index % N
  73. // cell[i].data[j]
  74. //
  75. // However, since len(Map_Cell(T){}.data) is a compile-time constant, there are some
  76. // optimizations we can do to eliminate the need for any divisions as N will
  77. // be bounded by [1, 64).
  78. //
  79. // In the optimal case, len(Map_Cell(T){}.data) = 1 so the cell array can be treated
  80. // as a regular array of T, which is the case for hashes.
  81. Map_Cell :: struct($T: typeid) #align(MAP_CACHE_LINE_SIZE) {
  82. data: [MAP_CACHE_LINE_SIZE / size_of(T) when 0 < size_of(T) && size_of(T) < MAP_CACHE_LINE_SIZE else 1]T,
  83. }
  84. // So we can operate on a cell data structure at runtime without any type
  85. // information, we have a simple table that stores some traits about the cell.
  86. //
  87. // 32-bytes on 64-bit
  88. // 16-bytes on 32-bit
  89. Map_Cell_Info :: struct {
  90. size_of_type: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  91. align_of_type: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  92. size_of_cell: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  93. elements_per_cell: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
  94. }
  95. // map_cell_info :: proc "contextless" ($T: typeid) -> ^Map_Cell_Info {...}
  96. map_cell_info :: intrinsics.type_map_cell_info
  97. // Same as the above procedure but at runtime with the cell Map_Cell_Info value.
  98. @(require_results)
  99. map_cell_index_dynamic :: #force_inline proc "contextless" (base: uintptr, #no_alias info: ^Map_Cell_Info, index: uintptr) -> uintptr {
  100. // Micro-optimize the common cases to save on integer division.
  101. elements_per_cell := uintptr(info.elements_per_cell)
  102. size_of_cell := uintptr(info.size_of_cell)
  103. switch elements_per_cell {
  104. case 1:
  105. return base + (index * size_of_cell)
  106. case 2:
  107. cell_index := index >> 1
  108. data_index := index & 1
  109. size_of_type := uintptr(info.size_of_type)
  110. return base + (cell_index * size_of_cell) + (data_index * size_of_type)
  111. case:
  112. cell_index := index / elements_per_cell
  113. data_index := index % elements_per_cell
  114. size_of_type := uintptr(info.size_of_type)
  115. return base + (cell_index * size_of_cell) + (data_index * size_of_type)
  116. }
  117. }
  118. // Same as above procedure but with compile-time constant index.
  119. @(require_results)
  120. map_cell_index_dynamic_const :: proc "contextless" (base: uintptr, #no_alias info: ^Map_Cell_Info, $INDEX: uintptr) -> uintptr {
  121. elements_per_cell := uintptr(info.elements_per_cell)
  122. size_of_cell := uintptr(info.size_of_cell)
  123. size_of_type := uintptr(info.size_of_type)
  124. cell_index := INDEX / elements_per_cell
  125. data_index := INDEX % elements_per_cell
  126. return base + (cell_index * size_of_cell) + (data_index * size_of_type)
  127. }
  128. // We always round the capacity to a power of two so this becomes [16]Foo, which
  129. // works out to [4]Cell(Foo).
  130. //
  131. // The following compile-time procedure indexes such a [N]Cell(T) structure as
  132. // if it were a flat array accounting for the internal padding introduced by the
  133. // Cell structure.
  134. @(require_results)
  135. map_cell_index_static :: #force_inline proc "contextless" (cells: [^]Map_Cell($T), index: uintptr) -> ^T #no_bounds_check {
  136. N :: size_of(Map_Cell(T){}.data) / size_of(T) when size_of(T) > 0 else 1
  137. #assert(N <= MAP_CACHE_LINE_SIZE)
  138. when size_of(Map_Cell(T)) == size_of([N]T) {
  139. // No padding case, can treat as a regular array of []T.
  140. return &([^]T)(cells)[index]
  141. } else when (N & (N - 1)) == 0 && N <= 8*size_of(uintptr) {
  142. // Likely case, N is a power of two because T is a power of two.
  143. // Unique case, no need to index data here since only one element.
  144. when N == 1 {
  145. return &cells[index].data[0]
  146. }
  147. // Compute the integer log 2 of N, this is the shift amount to index the
  148. // correct cell. Odin's intrinsics.count_leading_zeros does not produce a
  149. // constant, hence this approach. We only need to check up to N = 64.
  150. SHIFT :: 1 when N == 2 else
  151. 2 when N == 4 else
  152. 3 when N == 8 else
  153. 4 when N == 16 else
  154. 5 when N == 32 else 6
  155. #assert(SHIFT <= MAP_CACHE_LINE_LOG2)
  156. return &cells[index >> SHIFT].data[index & (N - 1)]
  157. } else {
  158. // Least likely (and worst case), we pay for a division operation but we
  159. // assume the compiler does not actually generate a division. N will be in the
  160. // range [1, CACHE_LINE_SIZE) and not a power of two.
  161. return &cells[index / N].data[index % N]
  162. }
  163. }
  164. // len() for map
  165. @(require_results)
  166. map_len :: #force_inline proc "contextless" (m: Raw_Map) -> int {
  167. return int(m.len)
  168. }
  169. // cap() for map
  170. @(require_results)
  171. map_cap :: #force_inline proc "contextless" (m: Raw_Map) -> int {
  172. // The data uintptr stores the capacity in the lower six bits which gives the
  173. // a maximum value of 2^6-1, or 63. We store the integer log2 of capacity
  174. // since our capacity is always a power of two. We only need 63 bits as Odin
  175. // represents length and capacity as a signed integer.
  176. return 0 if m.data == 0 else 1 << map_log2_cap(m)
  177. }
  178. // Query the load factor of the map. This is not actually configurable, but
  179. // some math is needed to compute it. Compute it as a fixed point percentage to
  180. // avoid floating point operations. This division can be optimized out by
  181. // multiplying by the multiplicative inverse of 100.
  182. @(require_results)
  183. map_load_factor :: #force_inline proc "contextless" (log2_capacity: uintptr) -> uintptr {
  184. return ((uintptr(1) << log2_capacity) * MAP_LOAD_FACTOR) / 100
  185. }
  186. @(require_results)
  187. map_resize_threshold :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  188. return map_load_factor(map_log2_cap(m))
  189. }
  190. // The data stores the log2 capacity in the lower six bits. This is primarily
  191. // used in the implementation rather than map_cap since the check for data = 0
  192. // isn't necessary in the implementation. cap() on the otherhand needs to work
  193. // when called on an empty map.
  194. @(require_results)
  195. map_log2_cap :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  196. return m.data & (64 - 1)
  197. }
  198. // Canonicalize the data by removing the tagged capacity stored in the lower six
  199. // bits of the data uintptr.
  200. @(require_results)
  201. map_data :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  202. return m.data &~ uintptr(64 - 1)
  203. }
  204. Map_Hash :: uintptr
  205. TOMBSTONE_MASK :: 1<<(size_of(Map_Hash)*8 - 1)
  206. // Procedure to check if a slot is empty for a given hash. This is represented
  207. // by the zero value to make the zero value useful. This is a procedure just
  208. // for prose reasons.
  209. @(require_results)
  210. map_hash_is_empty :: #force_inline proc "contextless" (hash: Map_Hash) -> bool {
  211. return hash == 0
  212. }
  213. @(require_results)
  214. map_hash_is_deleted :: #force_no_inline proc "contextless" (hash: Map_Hash) -> bool {
  215. // The MSB indicates a tombstone
  216. return hash & TOMBSTONE_MASK != 0
  217. }
  218. @(require_results)
  219. map_hash_is_valid :: #force_inline proc "contextless" (hash: Map_Hash) -> bool {
  220. // The MSB indicates a tombstone
  221. return (hash != 0) & (hash & TOMBSTONE_MASK == 0)
  222. }
  223. @(require_results)
  224. map_seed :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
  225. return map_seed_from_map_data(map_data(m))
  226. }
  227. // splitmix for uintptr
  228. @(require_results)
  229. map_seed_from_map_data :: #force_inline proc "contextless" (data: uintptr) -> uintptr {
  230. when size_of(uintptr) == size_of(u64) {
  231. mix := data + 0x9e3779b97f4a7c15
  232. mix = (mix ~ (mix >> 30)) * 0xbf58476d1ce4e5b9
  233. mix = (mix ~ (mix >> 27)) * 0x94d049bb133111eb
  234. return mix ~ (mix >> 31)
  235. } else {
  236. mix := data + 0x9e3779b9
  237. mix = (mix ~ (mix >> 16)) * 0x21f0aaad
  238. mix = (mix ~ (mix >> 15)) * 0x735a2d97
  239. return mix ~ (mix >> 15)
  240. }
  241. }
  242. // Computes the desired position in the array. This is just index % capacity,
  243. // but a procedure as there's some math involved here to recover the capacity.
  244. @(require_results)
  245. map_desired_position :: #force_inline proc "contextless" (m: Raw_Map, hash: Map_Hash) -> uintptr {
  246. // We do not use map_cap since we know the capacity will not be zero here.
  247. capacity := uintptr(1) << map_log2_cap(m)
  248. return uintptr(hash & Map_Hash(capacity - 1))
  249. }
  250. @(require_results)
  251. map_probe_distance :: #force_inline proc "contextless" (m: Raw_Map, hash: Map_Hash, slot: uintptr) -> uintptr {
  252. // We do not use map_cap since we know the capacity will not be zero here.
  253. capacity := uintptr(1) << map_log2_cap(m)
  254. return (slot + capacity - map_desired_position(m, hash)) & (capacity - 1)
  255. }
  256. // When working with the type-erased structure at runtime we need information
  257. // about the map to make working with it possible. This info structure stores
  258. // that.
  259. //
  260. // `Map_Info` and `Map_Cell_Info` are read only data structures and cannot be
  261. // modified after creation
  262. //
  263. // 32-bytes on 64-bit
  264. // 16-bytes on 32-bit
  265. Map_Info :: struct {
  266. ks: ^Map_Cell_Info, // 8-bytes on 64-bit, 4-bytes on 32-bit
  267. vs: ^Map_Cell_Info, // 8-bytes on 64-bit, 4-bytes on 32-bit
  268. key_hasher: proc "contextless" (key: rawptr, seed: Map_Hash) -> Map_Hash, // 8-bytes on 64-bit, 4-bytes on 32-bit
  269. key_equal: proc "contextless" (lhs, rhs: rawptr) -> bool, // 8-bytes on 64-bit, 4-bytes on 32-bit
  270. }
  271. // The Map_Info structure is basically a pseudo-table of information for a given K and V pair.
  272. // map_info :: proc "contextless" ($T: typeid/map[$K]$V) -> ^Map_Info {...}
  273. map_info :: intrinsics.type_map_info
  274. @(require_results)
  275. map_kvh_data_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info) -> (ks: uintptr, vs: uintptr, hs: [^]Map_Hash, sk: uintptr, sv: uintptr) {
  276. INFO_HS := intrinsics.type_map_cell_info(Map_Hash)
  277. capacity := uintptr(1) << map_log2_cap(m)
  278. ks = map_data(m)
  279. vs = map_cell_index_dynamic(ks, info.ks, capacity) // Skip past ks to get start of vs
  280. hs_ := map_cell_index_dynamic(vs, info.vs, capacity) // Skip past vs to get start of hs
  281. sk = map_cell_index_dynamic(hs_, INFO_HS, capacity) // Skip past hs to get start of sk
  282. // Need to skip past two elements in the scratch key space to get to the start
  283. // of the scratch value space, of which there's only two elements as well.
  284. sv = map_cell_index_dynamic_const(sk, info.ks, 2)
  285. hs = ([^]Map_Hash)(hs_)
  286. return
  287. }
  288. @(require_results)
  289. map_kvh_data_values_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info) -> (vs: uintptr) {
  290. capacity := uintptr(1) << map_log2_cap(m)
  291. return map_cell_index_dynamic(map_data(m), info.ks, capacity) // Skip past ks to get start of vs
  292. }
  293. @(require_results)
  294. map_total_allocation_size :: #force_inline proc "contextless" (capacity: uintptr, info: ^Map_Info) -> uintptr {
  295. round :: #force_inline proc "contextless" (value: uintptr) -> uintptr {
  296. CACHE_MASK :: MAP_CACHE_LINE_SIZE - 1
  297. return (value + CACHE_MASK) &~ CACHE_MASK
  298. }
  299. INFO_HS := intrinsics.type_map_cell_info(Map_Hash)
  300. size := uintptr(0)
  301. size = round(map_cell_index_dynamic(size, info.ks, capacity))
  302. size = round(map_cell_index_dynamic(size, info.vs, capacity))
  303. size = round(map_cell_index_dynamic(size, INFO_HS, capacity))
  304. size = round(map_cell_index_dynamic(size, info.ks, 2)) // Two additional ks for scratch storage
  305. size = round(map_cell_index_dynamic(size, info.vs, 2)) // Two additional vs for scratch storage
  306. return size
  307. }
  308. @(require_results)
  309. map_total_allocation_size_from_value :: #force_inline proc "contextless" (m: $M/map[$K]$V) -> uintptr {
  310. return map_total_allocation_size(uintptr(cap(m)), map_info(M))
  311. }
  312. // The only procedure which needs access to the context is the one which allocates the map.
  313. @(require_results)
  314. map_alloc_dynamic :: proc "odin" (info: ^Map_Info, log2_capacity: uintptr, allocator := context.allocator, loc := #caller_location) -> (result: Raw_Map, err: Allocator_Error) {
  315. result.allocator = allocator // set the allocator always
  316. if log2_capacity == 0 {
  317. return
  318. }
  319. if log2_capacity >= 64 {
  320. // Overflowed, would be caused by log2_capacity > 64
  321. return {}, .Out_Of_Memory
  322. }
  323. capacity := uintptr(1) << max(log2_capacity, MAP_MIN_LOG2_CAPACITY)
  324. CACHE_MASK :: MAP_CACHE_LINE_SIZE - 1
  325. size := map_total_allocation_size(capacity, info)
  326. data := mem_alloc_non_zeroed(int(size), MAP_CACHE_LINE_SIZE, allocator, loc) or_return
  327. data_ptr := uintptr(raw_data(data))
  328. if data_ptr == 0 {
  329. err = .Out_Of_Memory
  330. return
  331. }
  332. if intrinsics.expect(data_ptr & CACHE_MASK != 0, false) {
  333. panic("allocation not aligned to a cache line", loc)
  334. } else {
  335. result.data = data_ptr | log2_capacity // Tagged pointer representation for capacity.
  336. result.len = 0
  337. map_clear_dynamic(&result, info)
  338. }
  339. return
  340. }
  341. // This procedure has to stack allocate storage to store local keys during the
  342. // Robin Hood hashing technique where elements are swapped in the backing
  343. // arrays to reduce variance. This swapping can only be done with memcpy since
  344. // there is no type information.
  345. //
  346. // This procedure returns the address of the just inserted value, and will
  347. // return 'nil' if there was no room to insert the entry
  348. @(require_results)
  349. map_insert_hash_dynamic_with_key :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) -> (key: uintptr, result: uintptr) {
  350. h := h
  351. pos := map_desired_position(m^, h)
  352. distance := uintptr(0)
  353. mask := (uintptr(1) << map_log2_cap(m^)) - 1
  354. ks, vs, hs, sk, sv := map_kvh_data_dynamic(m^, info)
  355. // Avoid redundant loads of these values
  356. size_of_k := info.ks.size_of_type
  357. size_of_v := info.vs.size_of_type
  358. k := map_cell_index_dynamic(sk, info.ks, 0)
  359. v := map_cell_index_dynamic(sv, info.vs, 0)
  360. intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(ik), size_of_k)
  361. intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(iv), size_of_v)
  362. // Temporary k and v dynamic storage for swap below
  363. tk := map_cell_index_dynamic(sk, info.ks, 1)
  364. tv := map_cell_index_dynamic(sv, info.vs, 1)
  365. swap_loop: for {
  366. if distance > mask {
  367. // Failed to find an empty slot and prevent infinite loop
  368. panic("unable to insert into a map")
  369. }
  370. element_hash := hs[pos]
  371. if map_hash_is_empty(element_hash) {
  372. k_dst := map_cell_index_dynamic(ks, info.ks, pos)
  373. v_dst := map_cell_index_dynamic(vs, info.vs, pos)
  374. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  375. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  376. hs[pos] = h
  377. if result == 0 {
  378. key = k_dst
  379. result = v_dst
  380. }
  381. return
  382. }
  383. if map_hash_is_deleted(element_hash) {
  384. break swap_loop
  385. }
  386. if probe_distance := map_probe_distance(m^, element_hash, pos); distance > probe_distance {
  387. kp := map_cell_index_dynamic(ks, info.ks, pos)
  388. vp := map_cell_index_dynamic(vs, info.vs, pos)
  389. if result == 0 {
  390. key = kp
  391. result = vp
  392. }
  393. intrinsics.mem_copy_non_overlapping(rawptr(tk), rawptr(k), size_of_k)
  394. intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(kp), size_of_k)
  395. intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(tk), size_of_k)
  396. intrinsics.mem_copy_non_overlapping(rawptr(tv), rawptr(v), size_of_v)
  397. intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(vp), size_of_v)
  398. intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(tv), size_of_v)
  399. th := h
  400. h = hs[pos]
  401. hs[pos] = th
  402. distance = probe_distance
  403. }
  404. pos = (pos + 1) & mask
  405. distance += 1
  406. }
  407. // backward shift loop
  408. hs[pos] = 0
  409. look_ahead: uintptr = 1
  410. for {
  411. la_pos := (pos + look_ahead) & mask
  412. element_hash := hs[la_pos]
  413. if map_hash_is_deleted(element_hash) {
  414. look_ahead += 1
  415. hs[la_pos] = 0
  416. continue
  417. }
  418. k_dst := map_cell_index_dynamic(ks, info.ks, pos)
  419. v_dst := map_cell_index_dynamic(vs, info.vs, pos)
  420. if map_hash_is_empty(element_hash) {
  421. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  422. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  423. hs[pos] = h
  424. if result == 0 {
  425. key = k_dst
  426. result = v_dst
  427. }
  428. return
  429. }
  430. k_src := map_cell_index_dynamic(ks, info.ks, la_pos)
  431. v_src := map_cell_index_dynamic(vs, info.vs, la_pos)
  432. probe_distance := map_probe_distance(m^, element_hash, la_pos)
  433. if probe_distance < look_ahead {
  434. // probed can be made ideal while placing saved (ending condition)
  435. if result == 0 {
  436. key = k_dst
  437. result = v_dst
  438. }
  439. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  440. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  441. hs[pos] = h
  442. // This will be an ideal move
  443. pos = (la_pos - probe_distance) & mask
  444. look_ahead -= probe_distance
  445. // shift until we hit ideal/empty
  446. for probe_distance != 0 {
  447. k_dst = map_cell_index_dynamic(ks, info.ks, pos)
  448. v_dst = map_cell_index_dynamic(vs, info.vs, pos)
  449. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
  450. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
  451. hs[pos] = element_hash
  452. hs[la_pos] = 0
  453. pos = (pos + 1) & mask
  454. la_pos = (la_pos + 1) & mask
  455. look_ahead = (la_pos - pos) & mask
  456. element_hash = hs[la_pos]
  457. if map_hash_is_empty(element_hash) {
  458. return
  459. }
  460. probe_distance = map_probe_distance(m^, element_hash, la_pos)
  461. if probe_distance == 0 {
  462. return
  463. }
  464. // can be ideal?
  465. if probe_distance < look_ahead {
  466. pos = (la_pos - probe_distance) & mask
  467. }
  468. k_src = map_cell_index_dynamic(ks, info.ks, la_pos)
  469. v_src = map_cell_index_dynamic(vs, info.vs, la_pos)
  470. }
  471. return
  472. } else if distance < probe_distance - look_ahead {
  473. // shift back probed
  474. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k_src), size_of_k)
  475. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v_src), size_of_v)
  476. hs[pos] = element_hash
  477. hs[la_pos] = 0
  478. } else {
  479. // place saved, save probed
  480. if result == 0 {
  481. key = k_dst
  482. result = v_dst
  483. }
  484. intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
  485. intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
  486. hs[pos] = h
  487. intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(k_src), size_of_k)
  488. intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(v_src), size_of_v)
  489. h = hs[la_pos]
  490. hs[la_pos] = 0
  491. distance = probe_distance - look_ahead
  492. }
  493. pos = (pos + 1) & mask
  494. distance += 1
  495. }
  496. }
  497. @(require_results)
  498. map_insert_hash_dynamic :: #force_inline proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) -> (result: uintptr) {
  499. _, result = map_insert_hash_dynamic_with_key(m, info, h, ik, iv)
  500. return
  501. }
  502. @(require_results)
  503. map_grow_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> Allocator_Error {
  504. log2_capacity := map_log2_cap(m^)
  505. new_capacity := uintptr(1) << max(log2_capacity + 1, MAP_MIN_LOG2_CAPACITY)
  506. return map_reserve_dynamic(m, info, new_capacity, loc)
  507. }
  508. @(require_results)
  509. map_reserve_dynamic :: #force_no_inline proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, new_capacity: uintptr, loc := #caller_location) -> Allocator_Error {
  510. @(require_results)
  511. ceil_log2 :: #force_inline proc "contextless" (x: uintptr) -> uintptr {
  512. z := intrinsics.count_leading_zeros(x)
  513. if z > 0 && x & (x-1) != 0 {
  514. z -= 1
  515. }
  516. return size_of(uintptr)*8 - 1 - z
  517. }
  518. if m.allocator.procedure == nil {
  519. m.allocator = context.allocator
  520. }
  521. new_capacity := new_capacity
  522. old_capacity := uintptr(map_cap(m^))
  523. if old_capacity >= new_capacity {
  524. return nil
  525. }
  526. // ceiling nearest power of two
  527. log2_new_capacity := ceil_log2(new_capacity)
  528. log2_min_cap := max(MAP_MIN_LOG2_CAPACITY, log2_new_capacity)
  529. if m.data == 0 {
  530. m^ = map_alloc_dynamic(info, log2_min_cap, m.allocator, loc) or_return
  531. return nil
  532. }
  533. resized := map_alloc_dynamic(info, log2_min_cap, m.allocator, loc) or_return
  534. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  535. // Cache these loads to avoid hitting them in the for loop.
  536. n := m.len
  537. for i in 0..<old_capacity {
  538. hash := hs[i]
  539. if map_hash_is_empty(hash) {
  540. continue
  541. }
  542. if map_hash_is_deleted(hash) {
  543. continue
  544. }
  545. k := map_cell_index_dynamic(ks, info.ks, i)
  546. v := map_cell_index_dynamic(vs, info.vs, i)
  547. hash = info.key_hasher(rawptr(k), map_seed(resized))
  548. _ = map_insert_hash_dynamic(&resized, info, hash, k, v)
  549. // Only need to do this comparison on each actually added pair, so do not
  550. // fold it into the for loop comparator as a micro-optimization.
  551. n -= 1
  552. if n == 0 {
  553. break
  554. }
  555. }
  556. map_free_dynamic(m^, info, loc) or_return
  557. m.data = resized.data
  558. return nil
  559. }
  560. @(require_results)
  561. map_shrink_dynamic :: #force_no_inline proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> (did_shrink: bool, err: Allocator_Error) {
  562. if m.allocator.procedure == nil {
  563. m.allocator = context.allocator
  564. }
  565. // Cannot shrink the capacity if the number of items in the map would exceed
  566. // one minus the current log2 capacity's resize threshold. That is the shrunk
  567. // map needs to be within the max load factor.
  568. log2_capacity := map_log2_cap(m^)
  569. if uintptr(m.len) >= map_load_factor(log2_capacity - 1) {
  570. return false, nil
  571. }
  572. shrunk := map_alloc_dynamic(info, log2_capacity - 1, m.allocator) or_return
  573. capacity := uintptr(1) << log2_capacity
  574. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  575. n := m.len
  576. for i in 0..<capacity {
  577. hash := hs[i]
  578. if map_hash_is_empty(hash) {
  579. continue
  580. }
  581. if map_hash_is_deleted(hash) {
  582. continue
  583. }
  584. k := map_cell_index_dynamic(ks, info.ks, i)
  585. v := map_cell_index_dynamic(vs, info.vs, i)
  586. hash = info.key_hasher(rawptr(k), map_seed(shrunk))
  587. _ = map_insert_hash_dynamic(&shrunk, info, hash, k, v)
  588. // Only need to do this comparison on each actually added pair, so do not
  589. // fold it into the for loop comparator as a micro-optimization.
  590. n -= 1
  591. if n == 0 {
  592. break
  593. }
  594. }
  595. map_free_dynamic(m^, info, loc) or_return
  596. m.data = shrunk.data
  597. return true, nil
  598. }
  599. @(require_results)
  600. map_free_dynamic :: #force_no_inline proc "odin" (m: Raw_Map, info: ^Map_Info, loc := #caller_location) -> Allocator_Error {
  601. ptr := rawptr(map_data(m))
  602. size := int(map_total_allocation_size(uintptr(map_cap(m)), info))
  603. err := mem_free_with_size(ptr, size, m.allocator, loc)
  604. #partial switch err {
  605. case .None, .Mode_Not_Implemented:
  606. return nil
  607. }
  608. return err
  609. }
  610. @(require_results)
  611. map_lookup_dynamic :: #force_no_inline proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (index: uintptr, ok: bool) {
  612. if map_len(m) == 0 {
  613. return 0, false
  614. }
  615. h := info.key_hasher(rawptr(k), map_seed(m))
  616. p := map_desired_position(m, h)
  617. d := uintptr(0)
  618. c := (uintptr(1) << map_log2_cap(m)) - 1
  619. ks, _, hs, _, _ := map_kvh_data_dynamic(m, info)
  620. for {
  621. element_hash := hs[p]
  622. if map_hash_is_empty(element_hash) {
  623. return 0, false
  624. } else if d > map_probe_distance(m, element_hash, p) {
  625. return 0, false
  626. } else if element_hash == h && info.key_equal(rawptr(k), rawptr(map_cell_index_dynamic(ks, info.ks, p))) {
  627. return p, true
  628. }
  629. p = (p + 1) & c
  630. d += 1
  631. }
  632. }
  633. @(require_results)
  634. map_exists_dynamic :: #force_no_inline proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (ok: bool) {
  635. if map_len(m) == 0 {
  636. return false
  637. }
  638. h := info.key_hasher(rawptr(k), map_seed(m))
  639. p := map_desired_position(m, h)
  640. d := uintptr(0)
  641. c := (uintptr(1) << map_log2_cap(m)) - 1
  642. ks, _, hs, _, _ := map_kvh_data_dynamic(m, info)
  643. for {
  644. element_hash := hs[p]
  645. if map_hash_is_empty(element_hash) {
  646. return false
  647. } else if d > map_probe_distance(m, element_hash, p) {
  648. return false
  649. } else if element_hash == h && info.key_equal(rawptr(k), rawptr(map_cell_index_dynamic(ks, info.ks, p))) {
  650. return true
  651. }
  652. p = (p + 1) & c
  653. d += 1
  654. }
  655. }
  656. @(require_results)
  657. map_erase_dynamic :: #force_no_inline proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (old_k, old_v: uintptr, ok: bool) {
  658. index := map_lookup_dynamic(m^, info, k) or_return
  659. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  660. hs[index] |= TOMBSTONE_MASK
  661. old_k = map_cell_index_dynamic(ks, info.ks, index)
  662. old_v = map_cell_index_dynamic(vs, info.vs, index)
  663. m.len -= 1
  664. ok = true
  665. mask := (uintptr(1)<<map_log2_cap(m^)) - 1
  666. curr_index := uintptr(index)
  667. next_index := (curr_index + 1) & mask
  668. // if the next element is empty or has zero probe distance, then any lookup
  669. // will always fail on the next, so we can clear both of them
  670. hash := hs[next_index]
  671. if map_hash_is_empty(hash) || map_probe_distance(m^, hash, next_index) == 0 {
  672. hs[curr_index] = 0
  673. } else {
  674. hs[curr_index] |= TOMBSTONE_MASK
  675. }
  676. return
  677. }
  678. map_clear_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info) {
  679. if m.data == 0 {
  680. return
  681. }
  682. _, _, hs, _, _ := map_kvh_data_dynamic(m^, info)
  683. intrinsics.mem_zero(rawptr(hs), map_cap(m^) * size_of(Map_Hash))
  684. m.len = 0
  685. }
  686. @(require_results)
  687. map_kvh_data_static :: #force_inline proc "contextless" (m: $T/map[$K]$V) -> (ks: [^]Map_Cell(K), vs: [^]Map_Cell(V), hs: [^]Map_Hash) {
  688. capacity := uintptr(cap(m))
  689. ks = ([^]Map_Cell(K))(map_data(transmute(Raw_Map)m))
  690. vs = ([^]Map_Cell(V))(map_cell_index_static(ks, capacity))
  691. hs = ([^]Map_Hash)(map_cell_index_static(vs, capacity))
  692. return
  693. }
  694. @(require_results)
  695. map_get :: proc "contextless" (m: $T/map[$K]$V, key: K) -> (stored_key: K, stored_value: V, ok: bool) {
  696. rm := transmute(Raw_Map)m
  697. if rm.len == 0 {
  698. return
  699. }
  700. info := intrinsics.type_map_info(T)
  701. key := key
  702. h := info.key_hasher(&key, map_seed(rm))
  703. pos := map_desired_position(rm, h)
  704. distance := uintptr(0)
  705. mask := (uintptr(1) << map_log2_cap(rm)) - 1
  706. ks, vs, hs := map_kvh_data_static(m)
  707. for {
  708. element_hash := hs[pos]
  709. if map_hash_is_empty(element_hash) {
  710. return
  711. } else if distance > map_probe_distance(rm, element_hash, pos) {
  712. return
  713. } else if element_hash == h {
  714. element_key := map_cell_index_static(ks, pos)
  715. if info.key_equal(&key, rawptr(element_key)) {
  716. element_value := map_cell_index_static(vs, pos)
  717. stored_key = (^K)(element_key)^
  718. stored_value = (^V)(element_value)^
  719. ok = true
  720. return
  721. }
  722. }
  723. pos = (pos + 1) & mask
  724. distance += 1
  725. }
  726. }
  727. // IMPORTANT: USED WITHIN THE COMPILER
  728. __dynamic_map_get :: proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, key: rawptr) -> (ptr: rawptr) {
  729. if m.len == 0 {
  730. return nil
  731. }
  732. pos := map_desired_position(m^, h)
  733. distance := uintptr(0)
  734. mask := (uintptr(1) << map_log2_cap(m^)) - 1
  735. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  736. for {
  737. element_hash := hs[pos]
  738. if map_hash_is_empty(element_hash) {
  739. return nil
  740. } else if distance > map_probe_distance(m^, element_hash, pos) {
  741. return nil
  742. } else if element_hash == h && info.key_equal(key, rawptr(map_cell_index_dynamic(ks, info.ks, pos))) {
  743. return rawptr(map_cell_index_dynamic(vs, info.vs, pos))
  744. }
  745. pos = (pos + 1) & mask
  746. distance += 1
  747. }
  748. }
  749. __dynamic_map_get_key_and_value :: proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, key: rawptr) -> (key_ptr, value_ptr: rawptr) {
  750. if m.len == 0 {
  751. return nil, nil
  752. }
  753. pos := map_desired_position(m^, h)
  754. distance := uintptr(0)
  755. mask := (uintptr(1) << map_log2_cap(m^)) - 1
  756. ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
  757. for {
  758. element_hash := hs[pos]
  759. if map_hash_is_empty(element_hash) {
  760. return nil, nil
  761. } else if distance > map_probe_distance(m^, element_hash, pos) {
  762. return nil, nil
  763. } else if element_hash == h {
  764. other_key := rawptr(map_cell_index_dynamic(ks, info.ks, pos))
  765. if info.key_equal(key, other_key) {
  766. key_ptr = other_key
  767. value_ptr = rawptr(map_cell_index_dynamic(vs, info.vs, pos))
  768. return
  769. }
  770. }
  771. pos = (pos + 1) & mask
  772. distance += 1
  773. }
  774. }
  775. // IMPORTANT: USED WITHIN THE COMPILER
  776. __dynamic_map_check_grow :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> (err: Allocator_Error, has_grown: bool) {
  777. if m.len >= map_resize_threshold(m^) {
  778. return map_grow_dynamic(m, info, loc), true
  779. }
  780. return nil, false
  781. }
  782. __dynamic_map_set_without_hash :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key, value: rawptr, loc := #caller_location) -> rawptr {
  783. return __dynamic_map_set(m, info, info.key_hasher(key, map_seed(m^)), key, value, loc)
  784. }
  785. // IMPORTANT: USED WITHIN THE COMPILER
  786. __dynamic_map_set :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, hash: Map_Hash, key, value: rawptr, loc := #caller_location) -> rawptr {
  787. if found := __dynamic_map_get(m, info, hash, key); found != nil {
  788. intrinsics.mem_copy_non_overlapping(found, value, info.vs.size_of_type)
  789. return found
  790. }
  791. hash := hash
  792. err, has_grown := __dynamic_map_check_grow(m, info, loc)
  793. if err != nil {
  794. return nil
  795. }
  796. if has_grown {
  797. hash = info.key_hasher(key, map_seed(m^))
  798. }
  799. result := map_insert_hash_dynamic(m, info, hash, uintptr(key), uintptr(value))
  800. if result != 0 {
  801. m.len += 1
  802. }
  803. return rawptr(result)
  804. }
  805. __dynamic_map_set_extra_without_hash :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key, value: rawptr, loc := #caller_location) -> (prev_key_ptr, value_ptr: rawptr) {
  806. return __dynamic_map_set_extra(m, info, info.key_hasher(key, map_seed(m^)), key, value, loc)
  807. }
  808. __dynamic_map_set_extra :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, hash: Map_Hash, key, value: rawptr, loc := #caller_location) -> (prev_key_ptr, value_ptr: rawptr) {
  809. if prev_key_ptr, value_ptr = __dynamic_map_get_key_and_value(m, info, hash, key); value_ptr != nil {
  810. intrinsics.mem_copy_non_overlapping(value_ptr, value, info.vs.size_of_type)
  811. return
  812. }
  813. hash := hash
  814. err, has_grown := __dynamic_map_check_grow(m, info, loc)
  815. if err != nil {
  816. return nil, nil
  817. }
  818. if has_grown {
  819. hash = info.key_hasher(key, map_seed(m^))
  820. }
  821. result := map_insert_hash_dynamic(m, info, hash, uintptr(key), uintptr(value))
  822. if result != 0 {
  823. m.len += 1
  824. }
  825. return nil, rawptr(result)
  826. }
  827. __dynamic_map_entry :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key: rawptr, zero: rawptr, loc := #caller_location) -> (key_ptr: rawptr, value_ptr: rawptr, just_inserted: bool, err: Allocator_Error) {
  828. hash := info.key_hasher(key, map_seed(m^))
  829. if key_ptr, value_ptr = __dynamic_map_get_key_and_value(m, info, hash, key); value_ptr != nil {
  830. return
  831. }
  832. has_grown: bool
  833. if err, has_grown = __dynamic_map_check_grow(m, info, loc); err != nil {
  834. return
  835. } else if has_grown {
  836. hash = info.key_hasher(key, map_seed(m^))
  837. }
  838. kp, vp := map_insert_hash_dynamic_with_key(m, info, hash, uintptr(key), uintptr(zero))
  839. key_ptr = rawptr(kp)
  840. value_ptr = rawptr(vp)
  841. m.len += 1
  842. just_inserted = true
  843. return
  844. }
  845. // IMPORTANT: USED WITHIN THE COMPILER
  846. @(private)
  847. __dynamic_map_reserve :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, new_capacity: uint, loc := #caller_location) -> Allocator_Error {
  848. if m == nil {
  849. return nil
  850. }
  851. return map_reserve_dynamic(m, info, uintptr(new_capacity), loc)
  852. }
  853. // NOTE: the default hashing algorithm derives from fnv64a, with some minor modifications to work for `map` type:
  854. //
  855. // * Convert a `0` result to `1`
  856. // * "empty entry"
  857. // * Prevent the top bit from being set
  858. // * "deleted entry"
  859. //
  860. // Both of these modification are necessary for the implementation of the `map`
  861. INITIAL_HASH_SEED :: 0xcbf29ce484222325
  862. HASH_MASK :: 1 << (8*size_of(uintptr) - 1) -1
  863. default_hasher :: #force_inline proc "contextless" (data: rawptr, seed: uintptr, N: int) -> uintptr {
  864. h := u64(seed) + INITIAL_HASH_SEED
  865. p := ([^]byte)(data)
  866. for _ in 0..<N {
  867. h = (h ~ u64(p[0])) * 0x100000001b3
  868. p = p[1:]
  869. }
  870. h &= HASH_MASK
  871. return uintptr(h) | uintptr(uintptr(h) == 0)
  872. }
  873. default_hasher_string :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
  874. str := (^[]byte)(data)
  875. return default_hasher(raw_data(str^), seed, len(str))
  876. }
  877. default_hasher_cstring :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
  878. h := u64(seed) + INITIAL_HASH_SEED
  879. if ptr := (^[^]byte)(data)^; ptr != nil {
  880. for ptr[0] != 0 {
  881. h = (h ~ u64(ptr[0])) * 0x100000001b3
  882. ptr = ptr[1:]
  883. }
  884. }
  885. h &= HASH_MASK
  886. return uintptr(h) | uintptr(uintptr(h) == 0)
  887. }
  888. default_hasher_f64 :: proc "contextless" (f: f64, seed: uintptr) -> uintptr {
  889. f := f
  890. buf: [size_of(f)]u8
  891. if f == 0 {
  892. return default_hasher(&buf, seed, size_of(buf))
  893. }
  894. if f != f {
  895. // TODO(bill): What should the logic be for NaNs?
  896. return default_hasher(&f, seed, size_of(f))
  897. }
  898. return default_hasher(&f, seed, size_of(f))
  899. }
  900. default_hasher_complex128 :: proc "contextless" (x, y: f64, seed: uintptr) -> uintptr {
  901. seed := seed
  902. seed = default_hasher_f64(x, seed)
  903. seed = default_hasher_f64(y, seed)
  904. return seed
  905. }
  906. default_hasher_quaternion256 :: proc "contextless" (x, y, z, w: f64, seed: uintptr) -> uintptr {
  907. seed := seed
  908. seed = default_hasher_f64(x, seed)
  909. seed = default_hasher_f64(y, seed)
  910. seed = default_hasher_f64(z, seed)
  911. seed = default_hasher_f64(w, seed)
  912. return seed
  913. }