|
@@ -3,150 +3,727 @@ package runtime
|
|
|
import "core:intrinsics"
|
|
|
_ :: intrinsics
|
|
|
|
|
|
-INITIAL_MAP_CAP :: 16
|
|
|
+// High performance, cache-friendly, open-addressed Robin Hood hashing hash map
|
|
|
+// data structure with various optimizations for Odin.
|
|
|
+//
|
|
|
+// Copyright 2022 (c) Dale Weiler
|
|
|
+//
|
|
|
+// The core of the hash map data structure is the Raw_Map struct which is a
|
|
|
+// type-erased representation of the map. This type-erased representation is
|
|
|
+// used in two ways: static and dynamic. When static type information is known,
|
|
|
+// the procedures suffixed with _static should be used instead of _dynamic. The
|
|
|
+// static procedures are optimized since they have type information. Hashing of
|
|
|
+// keys, comparison of keys, and data lookup are all optimized. When type
|
|
|
+// information is not known, the procedures suffixed with _dynamic should be
|
|
|
+// used. The representation of the map is the same for both static and dynamic,
|
|
|
+// and procedures of each can be mixed and matched. The purpose of the dynamic
|
|
|
+// representation is to enable reflection and runtime manipulation of the map.
|
|
|
+// The dynamic procedures all take an additional Map_Info structure parameter
|
|
|
+// which carries runtime values describing the size, alignment, and offset of
|
|
|
+// various traits of a given key and value type pair. The Map_Info value can
|
|
|
+// be created by calling map_info(K, V) with the key and value typeids.
|
|
|
+//
|
|
|
+// This map implementation makes extensive use of uintptr for representing
|
|
|
+// sizes, lengths, capacities, masks, pointers, offsets, and addresses to avoid
|
|
|
+// expensive sign extension and masking that would be generated if types were
|
|
|
+// casted all over. The only place regular ints show up is in the cap() and
|
|
|
+// len() implementations.
|
|
|
+//
|
|
|
+// To make this map cache-friendly it uses a novel strategy to ensure keys and
|
|
|
+// values of the map are always cache-line aligned and that no single key or
|
|
|
+// value of any type ever straddles a cache-line. This cache efficiency makes
|
|
|
+// for quick lookups because the linear-probe always addresses data in a cache
|
|
|
+// friendly way. This is enabled through the use of a special meta-type called
|
|
|
+// a Map_Cell which packs as many values of a given type into a local array adding
|
|
|
+// internal padding to round to MAP_CACHE_LINE_SIZE. One other benefit to storing
|
|
|
+// the internal data in this manner is false sharing no longer occurs when using
|
|
|
+// a map, enabling efficient concurrent access of the map data structure with
|
|
|
+// minimal locking if desired.
|
|
|
+
|
|
|
+// With Robin Hood hashing a maximum load factor of 75% is ideal.
|
|
|
+MAP_LOAD_FACTOR :: 75
|
|
|
+
|
|
|
+// Minimum log2 capacity.
|
|
|
+MAP_MIN_LOG2_CAPACITY :: 6 // 64 elements
|
|
|
+
|
|
|
+// Has to be less than 100% though.
|
|
|
+#assert(MAP_LOAD_FACTOR < 100)
|
|
|
+
|
|
|
+// This is safe to change. The log2 size of a cache-line. At minimum it has to
|
|
|
+// be six though. Higher cache line sizes are permitted.
|
|
|
+MAP_CACHE_LINE_LOG2 :: 6
|
|
|
+
|
|
|
+// The size of a cache-line.
|
|
|
+MAP_CACHE_LINE_SIZE :: 1 << MAP_CACHE_LINE_LOG2
|
|
|
+
|
|
|
+// The minimum cache-line size allowed by this implementation is 64 bytes since
|
|
|
+// we need 6 bits in the base pointer to store the integer log2 capacity, which
|
|
|
+// at maximum is 63. Odin uses signed integers to represent length and capacity,
|
|
|
+// so only 63 bits are needed in the maximum case.
|
|
|
+#assert(MAP_CACHE_LINE_SIZE >= 64)
|
|
|
+
|
|
|
+// Map_Cell type that packs multiple T in such a way to ensure that each T stays
|
|
|
+// aligned by align_of(T) and such that align_of(Map_Cell(T)) % MAP_CACHE_LINE_SIZE == 0
|
|
|
+//
|
|
|
+// This means a value of type T will never straddle a cache-line.
|
|
|
+//
|
|
|
+// When multiple Ts can fit in a single cache-line the data array will have more
|
|
|
+// than one element. When it cannot, the data array will have one element and
|
|
|
+// an array of Map_Cell(T) will be padded to stay a multiple of MAP_CACHE_LINE_SIZE.
|
|
|
+//
|
|
|
+// We rely on the type system to do all the arithmetic and padding for us here.
|
|
|
+//
|
|
|
+// The usual array[index] indexing for []T backed by a []Map_Cell(T) becomes a bit
|
|
|
+// more involved as there now may be internal padding. The indexing now becomes
|
|
|
+//
|
|
|
+// N :: len(Map_Cell(T){}.data)
|
|
|
+// i := index / N
|
|
|
+// j := index % N
|
|
|
+// cell[i].data[j]
|
|
|
+//
|
|
|
+// However, since len(Map_Cell(T){}.data) is a compile-time constant, there are some
|
|
|
+// optimizations we can do to eliminate the need for any divisions as N will
|
|
|
+// be bounded by [1, 64).
|
|
|
+//
|
|
|
+// In the optimal case, len(Map_Cell(T){}.data) = 1 so the cell array can be treated
|
|
|
+// as a regular array of T, which is the case for hashes.
|
|
|
+Map_Cell :: struct($T: typeid) #align MAP_CACHE_LINE_SIZE {
|
|
|
+ data: [MAP_CACHE_LINE_SIZE / size_of(T) when size_of(T) < MAP_CACHE_LINE_SIZE else 1]T,
|
|
|
+}
|
|
|
+
|
|
|
+// So we can operate on a cell data structure at runtime without any type
|
|
|
+// information, we have a simple table that stores some traits about the cell.
|
|
|
+//
|
|
|
+// 32-bytes on 64-bit
|
|
|
+// 16-bytes on 32-bit
|
|
|
+Map_Cell_Info :: struct {
|
|
|
+ size_of_type: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
|
|
|
+ align_of_type: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
|
|
|
+ size_of_cell: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
|
|
|
+ elements_per_cell: uintptr, // 8-bytes on 64-bit, 4-bytes on 32-bits
|
|
|
+}
|
|
|
+
|
|
|
+// Same as the above procedure but at runtime with the cell Map_Cell_Info value.
|
|
|
+map_cell_index_dynamic :: #force_inline proc "contextless" (base: uintptr, info: ^Map_Cell_Info, index: uintptr) -> uintptr {
|
|
|
+ // Micro-optimize the case when the number of elements per cell is one or two
|
|
|
+ // to save on expensive integer division.
|
|
|
+ switch elements_per_cell := info.elements_per_cell; elements_per_cell {
|
|
|
+ case 1:
|
|
|
+ return base + (index * info.size_of_cell)
|
|
|
+ case 2:
|
|
|
+ cell_index := index >> 1
|
|
|
+ data_index := index & 1
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ case 4:
|
|
|
+ cell_index := index >> 2
|
|
|
+ data_index := index & 3
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ case 8:
|
|
|
+ cell_index := index >> 3
|
|
|
+ data_index := index & 7
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ case 16:
|
|
|
+ cell_index := index >> 4
|
|
|
+ data_index := index & 15
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ case 32:
|
|
|
+ cell_index := index >> 5
|
|
|
+ data_index := index & 31
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ case 64:
|
|
|
+ cell_index := index >> 6
|
|
|
+ data_index := index & 63
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ case:
|
|
|
+ cell_index := index / elements_per_cell
|
|
|
+ data_index := index % elements_per_cell
|
|
|
+ return base + (cell_index * info.size_of_cell) + (data_index * info.size_of_type)
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// Same as above procedure but with compile-time constant index.
|
|
|
+map_cell_index_dynamic_const :: proc "contextless" (base: uintptr, #no_alias info: ^Map_Cell_Info, $INDEX: uintptr) -> uintptr {
|
|
|
+ elements_per_cell := uintptr(info.elements_per_cell)
|
|
|
+ size_of_cell := uintptr(info.size_of_cell)
|
|
|
+ size_of_type := uintptr(info.size_of_type)
|
|
|
+ cell_index := INDEX / elements_per_cell
|
|
|
+ data_index := INDEX % elements_per_cell
|
|
|
+ return base + (cell_index * size_of_cell) + (data_index * size_of_type)
|
|
|
+}
|
|
|
+
|
|
|
+// len() for map
|
|
|
+map_len :: #force_inline proc "contextless" (m: Raw_Map) -> int {
|
|
|
+ return int(m.len)
|
|
|
+}
|
|
|
+
|
|
|
+// cap() for map
|
|
|
+map_cap :: #force_inline proc "contextless" (m: Raw_Map) -> int {
|
|
|
+ // The data uintptr stores the capacity in the lower six bits which gives the
|
|
|
+ // a maximum value of 2^6-1, or 63. We store the integer log2 of capacity
|
|
|
+ // since our capacity is always a power of two. We only need 63 bits as Odin
|
|
|
+ // represents length and capacity as a signed integer.
|
|
|
+ return 0 if m.data == 0 else 1 << map_log2_cap(m)
|
|
|
+}
|
|
|
+
|
|
|
+// Query the load factor of the map. This is not actually configurable, but
|
|
|
+// some math is needed to compute it. Compute it as a fixed point percentage to
|
|
|
+// avoid floating point operations. This division can be optimized out by
|
|
|
+// multiplying by the multiplicative inverse of 100.
|
|
|
+map_load_factor :: #force_inline proc "contextless" (log2_capacity: uintptr) -> uintptr {
|
|
|
+ return ((uintptr(1) << log2_capacity) * MAP_LOAD_FACTOR) / 100
|
|
|
+}
|
|
|
+
|
|
|
+map_resize_threshold :: #force_inline proc "contextless" (m: Raw_Map) -> int {
|
|
|
+ return int(map_load_factor(map_log2_cap(m)))
|
|
|
+}
|
|
|
|
|
|
-// Temporary data structure for comparing hashes and keys
|
|
|
-Map_Hash :: struct {
|
|
|
- hash: uintptr,
|
|
|
- key_ptr: rawptr, // address of Map_Entry_Header.key
|
|
|
+// The data stores the log2 capacity in the lower six bits. This is primarily
|
|
|
+// used in the implementation rather than map_cap since the check for data = 0
|
|
|
+// isn't necessary in the implementation. cap() on the otherhand needs to work
|
|
|
+// when called on an empty map.
|
|
|
+map_log2_cap :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
|
|
|
+ return m.data & (64 - 1)
|
|
|
}
|
|
|
|
|
|
-__get_map_key_hash :: #force_inline proc "contextless" (k: ^$K) -> uintptr {
|
|
|
- hasher := intrinsics.type_hasher_proc(K)
|
|
|
- return hasher(k, 0)
|
|
|
+// Canonicalize the data by removing the tagged capacity stored in the lower six
|
|
|
+// bits of the data uintptr.
|
|
|
+map_data :: #force_inline proc "contextless" (m: Raw_Map) -> uintptr {
|
|
|
+ return m.data & ~uintptr(64 - 1)
|
|
|
}
|
|
|
|
|
|
-__get_map_entry_key_ptr :: #force_inline proc "contextless" (h: Map_Header_Table, entry: ^Map_Entry_Header) -> rawptr {
|
|
|
- return rawptr(uintptr(entry) + h.key_offset)
|
|
|
+
|
|
|
+
|
|
|
+Map_Hash :: uintptr
|
|
|
+
|
|
|
+// __get_map_key_hash :: #force_inline proc "contextless" (k: ^$K) -> uintptr {
|
|
|
+// hasher := intrinsics.type_hasher_proc(K)
|
|
|
+// return hasher(k, 0)
|
|
|
+// }
|
|
|
+
|
|
|
+// __get_map_entry_key_ptr :: #force_inline proc "contextless" (h: Map_Header_Table, entry: ^Map_Entry_Header) -> rawptr {
|
|
|
+// return rawptr(uintptr(entry) + h.key_offset)
|
|
|
+// }
|
|
|
+
|
|
|
+
|
|
|
+// Procedure to check if a slot is empty for a given hash. This is represented
|
|
|
+// by the zero value to make the zero value useful. This is a procedure just
|
|
|
+// for prose reasons.
|
|
|
+map_hash_is_empty :: #force_inline proc "contextless" (hash: Map_Hash) -> bool {
|
|
|
+ return hash == 0
|
|
|
}
|
|
|
|
|
|
-Map_Index :: distinct uint
|
|
|
-MAP_SENTINEL :: ~Map_Index(0)
|
|
|
+map_hash_is_deleted :: #force_inline proc "contextless" (hash: Map_Hash) -> bool {
|
|
|
+ // The MSB indicates a tombstone
|
|
|
+ return (hash >> ((size_of(Map_Hash) * 8) - 1)) != 0
|
|
|
+}
|
|
|
+
|
|
|
+// Computes the desired position in the array. This is just index % capacity,
|
|
|
+// but a procedure as there's some math involved here to recover the capacity.
|
|
|
+map_desired_position :: #force_inline proc "contextless" (m: Raw_Map, hash: Map_Hash) -> uintptr {
|
|
|
+ // We do not use map_cap since we know the capacity will not be zero here.
|
|
|
+ capacity := uintptr(1) << map_log2_cap(m)
|
|
|
+ return uintptr(hash & Map_Hash(capacity - 1))
|
|
|
+}
|
|
|
|
|
|
-Map_Find_Result :: struct {
|
|
|
- hash_index: Map_Index,
|
|
|
- entry_prev: Map_Index,
|
|
|
- entry_index: Map_Index,
|
|
|
+map_probe_distance :: #force_inline proc "contextless" (m: Raw_Map, hash: Map_Hash, slot: uintptr) -> uintptr {
|
|
|
+ // We do not use map_cap since we know the capacity will not be zero here.
|
|
|
+ capacity := uintptr(1) << map_log2_cap(m)
|
|
|
+ return (slot + capacity - map_desired_position(m, hash)) & (capacity - 1)
|
|
|
}
|
|
|
|
|
|
-Map_Entry_Header :: struct {
|
|
|
- hash: uintptr,
|
|
|
- next: Map_Index,
|
|
|
-/*
|
|
|
- key: Key_Value,
|
|
|
- value: Value_Type,
|
|
|
-*/
|
|
|
+// When working with the type-erased structure at runtime we need information
|
|
|
+// about the map to make working with it possible. This info structure stores
|
|
|
+// that.
|
|
|
+//
|
|
|
+// The Odin compiler should generate this for __get_map_header.
|
|
|
+//
|
|
|
+// 80-bytes on 64-bit
|
|
|
+// 40-bytes on 32-bit
|
|
|
+Map_Info :: struct {
|
|
|
+ ks: Map_Cell_Info, // 32-bytes on 64-bit, 16-bytes on 32-bit
|
|
|
+ vs: Map_Cell_Info, // 32-bytes on 64-bit, 16-bytes on 32-bit
|
|
|
+ hash: proc "contextless" (key: rawptr, seed: Map_Hash) -> Map_Hash, // 8-bytes on 64-bit, 4-bytes on 32-bit
|
|
|
+ cmp: proc "contextless" (lhs, rhs: rawptr) -> bool, // 8-bytes on 64-bit, 4-bytes on 32-bit
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+// The Map_Info structure is basically a pseudo-table of information for a given K and V pair.
|
|
|
+map_info :: #force_inline proc "contextless" ($K: typeid, $V: typeid) -> ^Map_Info where intrinsics.type_is_comparable(K) {
|
|
|
+ @static INFO := Map_Info {
|
|
|
+ Map_Cell_Info {
|
|
|
+ size_of(K),
|
|
|
+ align_of(K),
|
|
|
+ size_of(Map_Cell(K)),
|
|
|
+ len(Map_Cell(K){}.data),
|
|
|
+ },
|
|
|
+ Map_Cell_Info {
|
|
|
+ size_of(V),
|
|
|
+ align_of(V),
|
|
|
+ size_of(Map_Cell(V)),
|
|
|
+ len(Map_Cell(V){}.data),
|
|
|
+ },
|
|
|
+ proc "contextless" (ptr: rawptr, seed: uintptr) -> Map_Hash { return intrinsics.type_hasher_proc(K)(ptr, seed) } ,
|
|
|
+ proc "contextless" (a, b: rawptr) -> bool { return intrinsics.type_equal_proc(K)(a, b) },
|
|
|
+ }
|
|
|
+ return &INFO
|
|
|
}
|
|
|
|
|
|
-Map_Header_Table :: struct {
|
|
|
- equal: Equal_Proc,
|
|
|
|
|
|
- entry_size: int,
|
|
|
- entry_align: int,
|
|
|
+map_kvh_data_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info) -> (ks: uintptr, vs: uintptr, hs: [^]Map_Hash, sk: uintptr, sv: uintptr) {
|
|
|
+ @static INFO_HS := Map_Cell_Info {
|
|
|
+ size_of(Map_Hash),
|
|
|
+ align_of(Map_Hash),
|
|
|
+ size_of(Map_Cell(Map_Hash)),
|
|
|
+ len(Map_Cell(Map_Hash){}.data),
|
|
|
+ }
|
|
|
|
|
|
- key_offset: uintptr,
|
|
|
- key_size: int,
|
|
|
+ capacity := uintptr(1) << map_log2_cap(m)
|
|
|
+ ks = map_data(m)
|
|
|
+ vs = map_cell_index_dynamic(ks, &info.ks, capacity) // Skip past ks to get start of vs
|
|
|
+ hs_ := map_cell_index_dynamic(vs, &info.vs, capacity) // Skip past vs to get start of hs
|
|
|
+ sk = map_cell_index_dynamic(hs_, &INFO_HS, capacity) // Skip past hs to get start of sk
|
|
|
+ // Need to skip past two elements in the scratch key space to get to the start
|
|
|
+ // of the scratch value space, of which there's only two elements as well.
|
|
|
+ sv = map_cell_index_dynamic_const(sk, &info.ks, 2)
|
|
|
|
|
|
- value_offset: uintptr,
|
|
|
- value_size: int,
|
|
|
+ hs = ([^]Map_Hash)(hs_)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
-Map_Header :: struct {
|
|
|
- m: ^Raw_Map,
|
|
|
- using table: Map_Header_Table,
|
|
|
+
|
|
|
+// The only procedure which needs access to the context is the one which allocates the map.
|
|
|
+map_alloc_dynamic :: proc(info: ^Map_Info, log2_capacity: uintptr, allocator := context.allocator) -> (result: Raw_Map, err: Allocator_Error) {
|
|
|
+ if log2_capacity == 0 {
|
|
|
+ // Empty map, but set the allocator.
|
|
|
+ return { 0, 0, allocator }, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ if log2_capacity >= 64 {
|
|
|
+ // Overflowed, would be caused by log2_capacity > 64
|
|
|
+ return {}, .Out_Of_Memory
|
|
|
+ }
|
|
|
+
|
|
|
+ capacity := uintptr(1) << log2_capacity
|
|
|
+
|
|
|
+ @static INFO_HS := Map_Cell_Info {
|
|
|
+ size_of(Map_Hash),
|
|
|
+ align_of(Map_Hash),
|
|
|
+ size_of(Map_Cell(Map_Hash)),
|
|
|
+ len(Map_Cell(Map_Hash){}.data),
|
|
|
+ }
|
|
|
+
|
|
|
+ round :: #force_inline proc "contextless" (value: uintptr) -> uintptr {
|
|
|
+ return (value + MAP_CACHE_LINE_SIZE - 1) & ~uintptr(MAP_CACHE_LINE_SIZE - 1)
|
|
|
+ }
|
|
|
+
|
|
|
+ size := uintptr(0)
|
|
|
+ size = round(map_cell_index_dynamic(size, &info.ks, capacity))
|
|
|
+ size = round(map_cell_index_dynamic(size, &info.vs, capacity))
|
|
|
+ size = round(map_cell_index_dynamic(size, &INFO_HS, capacity))
|
|
|
+
|
|
|
+ data := mem_alloc(int(size), MAP_CACHE_LINE_SIZE, allocator) or_return
|
|
|
+ data_ptr := uintptr(raw_data(data))
|
|
|
+
|
|
|
+ result = {
|
|
|
+ // Tagged pointer representation for capacity.
|
|
|
+ data_ptr | log2_capacity,
|
|
|
+ 0,
|
|
|
+ allocator,
|
|
|
+ }
|
|
|
+
|
|
|
+ map_clear_dynamic(&result, info)
|
|
|
+
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
-// USED INTERNALLY BY THE COMPILER
|
|
|
-__dynamic_map_get :: proc "contextless" (m: rawptr, table: Map_Header_Table, key_hash: uintptr, key_ptr: rawptr) -> rawptr {
|
|
|
- if m != nil {
|
|
|
- h := Map_Header{(^Raw_Map)(m), table}
|
|
|
- index := __dynamic_map_find(h, key_hash, key_ptr).entry_index
|
|
|
- if index != MAP_SENTINEL {
|
|
|
- data := uintptr(__dynamic_map_get_entry(h, index))
|
|
|
- return rawptr(data + h.value_offset)
|
|
|
+// When the type information is known we should use map_insert_hash_static for
|
|
|
+// better performance. This procedure has to stack allocate storage to store
|
|
|
+// local keys during the Robin Hood hashing technique where elements are swapped
|
|
|
+// in the backing arrays to reduce variance. This swapping can only be done with
|
|
|
+// memcpy since there is no type information.
|
|
|
+//
|
|
|
+// This procedure returns the address of the just inserted value.
|
|
|
+@(optimization_mode="size")
|
|
|
+map_insert_hash_dynamic :: proc(m: Raw_Map, info: ^Map_Info, h: Map_Hash, k, v: uintptr) -> (result: uintptr) {
|
|
|
+ info_ks := &info.ks
|
|
|
+ info_vs := &info.vs
|
|
|
+
|
|
|
+ // Storage to exchange when reducing variance.
|
|
|
+ k_storage := intrinsics.alloca(info_ks.size_of_type, MAP_CACHE_LINE_SIZE)
|
|
|
+ v_storage := intrinsics.alloca(info_vs.size_of_type, MAP_CACHE_LINE_SIZE)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k_storage), rawptr(k), info_ks.size_of_type)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v_storage), rawptr(v), info_vs.size_of_type)
|
|
|
+ h := h
|
|
|
+
|
|
|
+ p := map_desired_position(m, h)
|
|
|
+ d := uintptr(0)
|
|
|
+ c := (uintptr(1) << map_log2_cap(m)) - 1 // Saturating arithmetic mask
|
|
|
+
|
|
|
+ ks, vs, hs, _, _ := map_kvh_data_dynamic(m, info)
|
|
|
+
|
|
|
+ for {
|
|
|
+ hp := &hs[p]
|
|
|
+ element_hash := hp^
|
|
|
+
|
|
|
+ if map_hash_is_empty(element_hash) {
|
|
|
+ k_dst := map_cell_index_dynamic(ks, info_ks, p)
|
|
|
+ v_dst := map_cell_index_dynamic(vs, info_vs, p)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), k_storage, info_ks.size_of_type)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), v_storage, info_vs.size_of_type)
|
|
|
+ hp^ = h
|
|
|
+ return result if result != 0 else v_dst
|
|
|
+ }
|
|
|
+
|
|
|
+ if pd := map_probe_distance(m, element_hash, p); pd < d {
|
|
|
+ if map_hash_is_deleted(element_hash) {
|
|
|
+ k_dst := map_cell_index_dynamic(ks, info_ks, p)
|
|
|
+ v_dst := map_cell_index_dynamic(vs, info_vs, p)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), k_storage, info_ks.size_of_type)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), v_storage, info_vs.size_of_type)
|
|
|
+ hp^ = h
|
|
|
+ return result if result != 0 else v_dst
|
|
|
+ }
|
|
|
+
|
|
|
+ if result == 0 {
|
|
|
+ result = map_cell_index_dynamic(vs, info_vs, p)
|
|
|
+ }
|
|
|
+
|
|
|
+ swap :: #force_inline proc "contextless" (lhs, rhs, size: uintptr) {
|
|
|
+ tmp := intrinsics.alloca(size, MAP_CACHE_LINE_SIZE)
|
|
|
+ intrinsics.mem_copy_non_overlapping(&tmp[0], rawptr(lhs), size)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(lhs), rawptr(rhs), size)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(rhs), &tmp[0], size)
|
|
|
+ }
|
|
|
+
|
|
|
+ // Exchange to reduce variance.
|
|
|
+ swap(uintptr(k_storage), map_cell_index_dynamic(ks, info_ks, p), info_ks.size_of_type)
|
|
|
+ swap(uintptr(v_storage), map_cell_index_dynamic(vs, info_vs, p), info_vs.size_of_type)
|
|
|
+ hp^, h = h, hp^
|
|
|
+
|
|
|
+ d = pd
|
|
|
}
|
|
|
+
|
|
|
+ p = (p + 1) & c
|
|
|
+ d += 1
|
|
|
}
|
|
|
- return nil
|
|
|
}
|
|
|
|
|
|
-// USED INTERNALLY BY THE COMPILER
|
|
|
-__dynamic_map_set :: proc "odin" (m: rawptr, table: Map_Header_Table, key_hash: uintptr, key_ptr: rawptr, value: rawptr, loc := #caller_location) -> ^Map_Entry_Header #no_bounds_check {
|
|
|
- add_entry :: proc "odin" (h: Map_Header, key_hash: uintptr, key_ptr: rawptr, loc := #caller_location) -> Map_Index {
|
|
|
- prev := Map_Index(h.m.entries.len)
|
|
|
- c := Map_Index(__dynamic_array_append_nothing(&h.m.entries, h.entry_size, h.entry_align, loc))
|
|
|
- if c != prev {
|
|
|
- end := __dynamic_map_get_entry(h, c-1)
|
|
|
- end.hash = key_hash
|
|
|
- mem_copy(rawptr(uintptr(end) + h.key_offset), key_ptr, h.key_size)
|
|
|
- end.next = MAP_SENTINEL
|
|
|
+@(optimization_mode="speed")
|
|
|
+map_add_hash_dynamic :: proc(m: Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) {
|
|
|
+ info_ks := &info.ks
|
|
|
+ info_vs := &info.vs
|
|
|
+
|
|
|
+ capacity := uintptr(1) << map_log2_cap(m)
|
|
|
+ p := map_desired_position(m, h)
|
|
|
+ d := uintptr(0)
|
|
|
+ c := capacity - 1 // Saturating arithmetic mask
|
|
|
+
|
|
|
+ ks, vs, hs, sk, sv := map_kvh_data_dynamic(m, info)
|
|
|
+
|
|
|
+ // Avoid redundant loads of these values
|
|
|
+ size_of_k := info_ks.size_of_type
|
|
|
+ size_of_v := info_vs.size_of_type
|
|
|
+
|
|
|
+ // Use sk and sv scratch storage space for dynamic k and v storage here.
|
|
|
+ //
|
|
|
+ // Simulate the following at runtime
|
|
|
+ // k = ik
|
|
|
+ // v = iv
|
|
|
+ // h = h
|
|
|
+ k := map_cell_index_dynamic_const(sk, info_ks, 0)
|
|
|
+ v := map_cell_index_dynamic_const(sv, info_vs, 0)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(ik), size_of_k)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(iv), size_of_v)
|
|
|
+ h := h
|
|
|
+
|
|
|
+ // Temporary k and v dynamic storage for swap below
|
|
|
+ tk := map_cell_index_dynamic_const(sk, info_ks, 1)
|
|
|
+ tv := map_cell_index_dynamic_const(sv, info_vs, 1)
|
|
|
+
|
|
|
+ for {
|
|
|
+ hp := &hs[p]
|
|
|
+ element_hash := hp^
|
|
|
+
|
|
|
+ if map_hash_is_empty(element_hash) {
|
|
|
+ k_dst := map_cell_index_dynamic(ks, info_ks, p)
|
|
|
+ v_dst := map_cell_index_dynamic(vs, info_vs, p)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
|
|
|
+ hp^ = h
|
|
|
+ return
|
|
|
}
|
|
|
- return prev
|
|
|
+
|
|
|
+ if pd := map_probe_distance(m, element_hash, p); pd < d {
|
|
|
+ if map_hash_is_deleted(element_hash) {
|
|
|
+ k_dst := map_cell_index_dynamic(ks, info_ks, p)
|
|
|
+ v_dst := map_cell_index_dynamic(vs, info_vs, p)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
|
|
|
+ hp^ = h
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ kp := map_cell_index_dynamic(ks, info_vs, p)
|
|
|
+ vp := map_cell_index_dynamic(vs, info_ks, p)
|
|
|
+
|
|
|
+ // Simulate the following at runtime with dynamic storage
|
|
|
+ //
|
|
|
+ // kp^, k = k, kp^
|
|
|
+ // vp^, v = v, vp^
|
|
|
+ // hp^, h = h, hp^
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(tk), rawptr(kp), size_of_k)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(tv), rawptr(vp), size_of_v)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(k), size_of_k)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(vp), rawptr(v), size_of_v)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(tk), size_of_k)
|
|
|
+ intrinsics.mem_copy_non_overlapping(rawptr(v), rawptr(tv), size_of_v)
|
|
|
+ hp^, h = h, hp^
|
|
|
+
|
|
|
+ d = pd
|
|
|
+ }
|
|
|
+
|
|
|
+ p = (p + 1) & c
|
|
|
+ d += 1
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+@(optimization_mode="size")
|
|
|
+map_grow_dynamic :: proc(#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info) -> Allocator_Error {
|
|
|
+ allocator := m.allocator
|
|
|
+
|
|
|
+ log2_capacity := map_log2_cap(m^)
|
|
|
+
|
|
|
+ if m.data == 0 {
|
|
|
+ n := map_alloc_dynamic(info, MAP_MIN_LOG2_CAPACITY, allocator) or_return
|
|
|
+ m.data = n.data
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
- h := Map_Header{(^Raw_Map)(m), table}
|
|
|
+ resized := map_alloc_dynamic(info, log2_capacity + 1, allocator) or_return
|
|
|
+
|
|
|
+ capacity := uintptr(1) << log2_capacity
|
|
|
+
|
|
|
+ ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
|
|
|
|
|
|
- index := MAP_SENTINEL
|
|
|
+ // Cache these loads to avoid hitting them in the for loop.
|
|
|
+ info_ks := &info.ks
|
|
|
+ info_vs := &info.vs
|
|
|
|
|
|
- if len(h.m.hashes) == 0 {
|
|
|
- __dynamic_map_reserve(m, table, INITIAL_MAP_CAP, loc)
|
|
|
- __dynamic_map_grow(h, loc)
|
|
|
+ n := map_len(m^)
|
|
|
+ for i := uintptr(0); i < capacity; i += 1 {
|
|
|
+ hash := hs[i]
|
|
|
+ if map_hash_is_empty(hash) do continue
|
|
|
+ if map_hash_is_deleted(hash) do continue
|
|
|
+ k := map_cell_index_dynamic(ks, info_ks, i)
|
|
|
+ v := map_cell_index_dynamic(vs, info_vs, i)
|
|
|
+ map_insert_hash_dynamic(resized, info, hash, k, v)
|
|
|
+ // Only need to do this comparison on each actually added pair, so do not
|
|
|
+ // fold it into the for loop comparator as a micro-optimization.
|
|
|
+ n -= 1
|
|
|
+ if n == 0 do break
|
|
|
}
|
|
|
|
|
|
- fr := __dynamic_map_find(h, key_hash, key_ptr)
|
|
|
- if fr.entry_index != MAP_SENTINEL {
|
|
|
- index = fr.entry_index
|
|
|
- } else {
|
|
|
- index = add_entry(h, key_hash, key_ptr, loc)
|
|
|
- if fr.entry_prev != MAP_SENTINEL {
|
|
|
- entry := __dynamic_map_get_entry(h, fr.entry_prev)
|
|
|
- entry.next = index
|
|
|
- } else if fr.hash_index != MAP_SENTINEL {
|
|
|
- h.m.hashes[fr.hash_index] = index
|
|
|
- } else {
|
|
|
- return nil
|
|
|
- }
|
|
|
+ mem_free(rawptr(ks), allocator)
|
|
|
+
|
|
|
+ m.data = resized.data // Should copy the capacity too
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+@(optimization_mode="size")
|
|
|
+map_reserve_dynamic :: proc(#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, new_capacity: uintptr) -> Allocator_Error {
|
|
|
+ allocator := m.allocator
|
|
|
+
|
|
|
+ log2_capacity := map_log2_cap(m^)
|
|
|
+ capacity := uintptr(1) << log2_capacity
|
|
|
+
|
|
|
+ if capacity >= new_capacity {
|
|
|
+ return nil
|
|
|
}
|
|
|
+ // ceiling nearest power of two
|
|
|
+ log2_new_capacity := size_of(uintptr) - intrinsics.count_leading_zeros(new_capacity-1)
|
|
|
+
|
|
|
+ if m.data == 0 {
|
|
|
+ n := map_alloc_dynamic(info, MAP_MIN_LOG2_CAPACITY, allocator) or_return
|
|
|
+ m.data = n.data
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ resized := map_alloc_dynamic(info, log2_new_capacity, allocator) or_return
|
|
|
|
|
|
- e := __dynamic_map_get_entry(h, index)
|
|
|
- e.hash = key_hash
|
|
|
|
|
|
- key := rawptr(uintptr(e) + h.key_offset)
|
|
|
- val := rawptr(uintptr(e) + h.value_offset)
|
|
|
+ ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
|
|
|
|
|
|
- mem_copy(key, key_ptr, h.key_size)
|
|
|
- mem_copy(val, value, h.value_size)
|
|
|
+ // Cache these loads to avoid hitting them in the for loop.
|
|
|
+ info_ks := &info.ks
|
|
|
+ info_vs := &info.vs
|
|
|
|
|
|
- if __dynamic_map_full(h) {
|
|
|
- __dynamic_map_grow(h, loc)
|
|
|
+ n := map_len(m^)
|
|
|
+ for i := uintptr(0); i < capacity; i += 1 {
|
|
|
+ hash := hs[i]
|
|
|
+ if map_hash_is_empty(hash) do continue
|
|
|
+ if map_hash_is_deleted(hash) do continue
|
|
|
+ k := map_cell_index_dynamic(ks, info_ks, i)
|
|
|
+ v := map_cell_index_dynamic(vs, info_vs, i)
|
|
|
+ map_insert_hash_dynamic(resized, info, hash, k, v)
|
|
|
+ // Only need to do this comparison on each actually added pair, so do not
|
|
|
+ // fold it into the for loop comparator as a micro-optimization.
|
|
|
+ n -= 1
|
|
|
+ if n == 0 do break
|
|
|
}
|
|
|
|
|
|
- return __dynamic_map_get_entry(h, index)
|
|
|
+ mem_free(rawptr(ks), allocator)
|
|
|
+
|
|
|
+ m.data = resized.data // Should copy the capacity too
|
|
|
+
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
-// USED INTERNALLY BY THE COMPILER
|
|
|
-__dynamic_map_reserve :: proc "odin" (m: rawptr, table: Map_Header_Table, cap: uint, loc := #caller_location) {
|
|
|
- h := Map_Header{(^Raw_Map)(m), table}
|
|
|
|
|
|
- c := context
|
|
|
- if h.m.entries.allocator.procedure != nil {
|
|
|
- c.allocator = h.m.entries.allocator
|
|
|
+@(optimization_mode="size")
|
|
|
+map_shrink_dynamic :: proc(#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info) -> Allocator_Error {
|
|
|
+ allocator := m.allocator
|
|
|
+
|
|
|
+ // Cannot shrink the capacity if the number of items in the map would exceed
|
|
|
+ // one minus the current log2 capacity's resize threshold. That is the shrunk
|
|
|
+ // map needs to be within the max load factor.
|
|
|
+ log2_capacity := map_log2_cap(m^)
|
|
|
+ if m.len >= map_load_factor(log2_capacity - 1) do return nil
|
|
|
+
|
|
|
+ shrinked := map_alloc_dynamic(info, log2_capacity - 1, allocator) or_return
|
|
|
+
|
|
|
+ capacity := uintptr(1) << log2_capacity
|
|
|
+
|
|
|
+ ks, vs, hs, _, _ := map_kvh_data_dynamic(m^, info)
|
|
|
+
|
|
|
+ info_ks := &info.ks
|
|
|
+ info_vs := &info.vs
|
|
|
+
|
|
|
+ n := map_len(m^)
|
|
|
+ for i := uintptr(0); i < capacity; i += 1 {
|
|
|
+ hash := hs[i]
|
|
|
+ if map_hash_is_empty(hash) do continue
|
|
|
+ if map_hash_is_deleted(hash) do continue
|
|
|
+
|
|
|
+ k := map_cell_index_dynamic(ks, info_ks, i)
|
|
|
+ v := map_cell_index_dynamic(vs, info_vs, i)
|
|
|
+
|
|
|
+ map_insert_hash_dynamic(shrinked, info, hash, k, v)
|
|
|
+
|
|
|
+ // Only need to do this comparison on each actually added pair, so do not
|
|
|
+ // fold it into the for loop comparator as a micro-optimization.
|
|
|
+ n -= 1
|
|
|
+ if n == 0 do break
|
|
|
+ }
|
|
|
+
|
|
|
+ free(rawptr(ks), allocator)
|
|
|
+
|
|
|
+ m.data = shrinked.data // Should copy the capacity too
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// Single procedure for static and dynamic paths.
|
|
|
+@(require_results)
|
|
|
+map_free :: proc(m: Raw_Map, loc := #caller_location) -> Allocator_Error {
|
|
|
+ return mem_free(rawptr(map_data(m)), m.allocator, loc)
|
|
|
+}
|
|
|
+
|
|
|
+@(optimization_mode="speed")
|
|
|
+map_lookup_dynamic :: proc "contextless" (m: Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> (index: uintptr, ok: bool) {
|
|
|
+ if map_len(m) == 0 do return 0, false
|
|
|
+ h := info.hash(rawptr(k), 0)
|
|
|
+ p := map_desired_position(m, h)
|
|
|
+ d := uintptr(0)
|
|
|
+ c := (uintptr(1) << map_log2_cap(m)) - 1
|
|
|
+ ks, _, hs, _, _ := map_kvh_data_dynamic(m, info)
|
|
|
+ info_ks := &info.ks
|
|
|
+ for {
|
|
|
+ element_hash := hs[p]
|
|
|
+ if map_hash_is_empty(element_hash) {
|
|
|
+ return 0, false
|
|
|
+ } else if d > map_probe_distance(m, element_hash, p) {
|
|
|
+ return 0, false
|
|
|
+ } else if element_hash == h && info.cmp(rawptr(k), rawptr(map_cell_index_dynamic(ks, info_ks, p))) {
|
|
|
+ return p, true
|
|
|
+ }
|
|
|
+ p = (p + 1) & c
|
|
|
+ d += 1
|
|
|
}
|
|
|
- context = c
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
|
|
|
- cap := cap
|
|
|
- cap = ceil_to_pow2(cap)
|
|
|
|
|
|
- __dynamic_array_reserve(&h.m.entries, h.entry_size, h.entry_align, int(cap), loc)
|
|
|
+@(optimization_mode="speed")
|
|
|
+map_insert_dynamic :: proc(#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, k, v: uintptr) -> (value: uintptr, err: Allocator_Error) {
|
|
|
+ if map_len(m^) + 1 >= map_resize_threshold(m^) {
|
|
|
+ map_grow_dynamic(m, info) or_return
|
|
|
+ }
|
|
|
+ hashed := info.hash(rawptr(k), 0)
|
|
|
+ result := map_insert_hash_dynamic(m^, info, hashed, k, v)
|
|
|
+ m.len += 1
|
|
|
+ return result, nil
|
|
|
+}
|
|
|
|
|
|
- if h.m.entries.len*2 < len(h.m.hashes) {
|
|
|
- return
|
|
|
+// Same as map_insert_dynamic but does not return address to the inserted element.
|
|
|
+@(optimization_mode="speed")
|
|
|
+map_add_dynamic :: proc(#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, k, v: uintptr) -> Allocator_Error {
|
|
|
+ if map_len(m^) + 1 >= map_resize_threshold(m^) {
|
|
|
+ map_grow_dynamic(m, info) or_return
|
|
|
}
|
|
|
- if __slice_resize(&h.m.hashes, int(cap*2), h.m.entries.allocator, loc) {
|
|
|
- __dynamic_map_reset_entries(h, loc)
|
|
|
+ map_add_hash_dynamic(m^, info, info.hash(rawptr(k), 0), k, v)
|
|
|
+ m.len += 1
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+map_erase_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, k: uintptr) -> bool {
|
|
|
+ when size_of(Map_Hash) == 4 do MASK :: Map_Hash(0x8000_0000)
|
|
|
+ when size_of(Map_Hash) == 8 do MASK :: Map_Hash(0x8000_0000_0000_0000)
|
|
|
+ when size_of(Map_Hash) == 16 do MASK :: Map_Hash(0x8000_0000_0000_0000_0000_0000_0000_0000)
|
|
|
+ index := map_lookup_dynamic(m^, info, k) or_return
|
|
|
+ _, _, hs, _, _ := map_kvh_data_dynamic(m^, info)
|
|
|
+ hs[index] |= MASK
|
|
|
+ m.len -= 1
|
|
|
+ return true
|
|
|
+}
|
|
|
+
|
|
|
+map_clear_dynamic :: #force_inline proc "contextless" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info) {
|
|
|
+ if m.data == 0 do return
|
|
|
+ _, _, hs, _, _ := map_kvh_data_dynamic(m^, info)
|
|
|
+ intrinsics.mem_zero(rawptr(hs), map_cap(m^) * size_of(Map_Hash))
|
|
|
+ m.len = 0
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+// TODO(bill): Change signature to not be a `rawptr`
|
|
|
+__dynamic_map_get :: proc "contextless" (m: rawptr, #no_alias info: ^Map_Info, key: rawptr) -> rawptr {
|
|
|
+ rm := (^Raw_Map)(m)^
|
|
|
+ index, ok := map_lookup_dynamic(rm, info, uintptr(key))
|
|
|
+ if !ok {
|
|
|
+ return nil
|
|
|
}
|
|
|
+ _ = index
|
|
|
+ // TODO(bill)
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+__dynamic_map_set :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key, value: rawptr, loc := #caller_location) -> rawptr {
|
|
|
+ // value, _ := map_insert_dynamic(m, info, uintptr(key), uintptr(value))
|
|
|
+ // return rawptr(value)
|
|
|
+ // TODO(bill)
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+__dynamic_map_reserve :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, new_capacity: uint, loc := #caller_location) {
|
|
|
+ map_reserve_dynamic(m, info, uintptr(new_capacity))
|
|
|
}
|
|
|
|
|
|
|
|
|
+
|
|
|
+
|
|
|
INITIAL_HASH_SEED :: 0xcbf29ce484222325
|
|
|
|
|
|
_fnv64a :: proc "contextless" (data: []byte, seed: u64 = INITIAL_HASH_SEED) -> u64 {
|
|
@@ -154,7 +731,7 @@ _fnv64a :: proc "contextless" (data: []byte, seed: u64 = INITIAL_HASH_SEED) -> u
|
|
|
for b in data {
|
|
|
h = (h ~ u64(b)) * 0x100000001b3
|
|
|
}
|
|
|
- return h
|
|
|
+ return h | u64(h == 0)
|
|
|
}
|
|
|
|
|
|
default_hash :: #force_inline proc "contextless" (data: []byte) -> uintptr {
|
|
@@ -177,7 +754,7 @@ _default_hasher_const :: #force_inline proc "contextless" (data: rawptr, seed: u
|
|
|
h = (h ~ b) * 0x100000001b3
|
|
|
p += 1
|
|
|
}
|
|
|
- return uintptr(h)
|
|
|
+ return uintptr(h) | uintptr(h == 0)
|
|
|
}
|
|
|
|
|
|
default_hasher_n :: #force_inline proc "contextless" (data: rawptr, seed: uintptr, N: int) -> uintptr {
|
|
@@ -188,7 +765,7 @@ default_hasher_n :: #force_inline proc "contextless" (data: rawptr, seed: uintpt
|
|
|
h = (h ~ b) * 0x100000001b3
|
|
|
p += 1
|
|
|
}
|
|
|
- return uintptr(h)
|
|
|
+ return uintptr(h) | uintptr(h == 0)
|
|
|
}
|
|
|
|
|
|
// NOTE(bill): There are loads of predefined ones to improve optimizations for small types
|
|
@@ -216,7 +793,7 @@ default_hasher_string :: proc "contextless" (data: rawptr, seed: uintptr) -> uin
|
|
|
for b in str {
|
|
|
h = (h ~ u64(b)) * 0x100000001b3
|
|
|
}
|
|
|
- return uintptr(h)
|
|
|
+ return uintptr(h) | uintptr(h == 0)
|
|
|
}
|
|
|
default_hasher_cstring :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
|
|
|
h := u64(seed) + 0xcbf29ce484222325
|
|
@@ -226,203 +803,5 @@ default_hasher_cstring :: proc "contextless" (data: rawptr, seed: uintptr) -> ui
|
|
|
h = (h ~ u64(b)) * 0x100000001b3
|
|
|
ptr += 1
|
|
|
}
|
|
|
- return uintptr(h)
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-__get_map_header :: proc "contextless" (m: ^$T/map[$K]$V) -> (header: Map_Header) {
|
|
|
- header.m = (^Raw_Map)(m)
|
|
|
- header.table = #force_inline __get_map_header_table(T)
|
|
|
- return
|
|
|
-}
|
|
|
-
|
|
|
-__get_map_header_runtime :: proc "contextless" (m: ^Raw_Map, ti: Type_Info_Map) -> (header: Map_Header) {
|
|
|
- header.m = m
|
|
|
- header.table = #force_inline __get_map_header_table_runtime(ti)
|
|
|
- return
|
|
|
-}
|
|
|
-
|
|
|
-__get_map_header_table :: proc "contextless" ($T: typeid/map[$K]$V) -> (header: Map_Header_Table) {
|
|
|
- Entry :: struct {
|
|
|
- hash: uintptr,
|
|
|
- next: Map_Index,
|
|
|
- key: K,
|
|
|
- value: V,
|
|
|
- }
|
|
|
-
|
|
|
- header.equal = intrinsics.type_equal_proc(K)
|
|
|
-
|
|
|
- header.entry_size = size_of(Entry)
|
|
|
- header.entry_align = align_of(Entry)
|
|
|
-
|
|
|
- header.key_offset = offset_of(Entry, key)
|
|
|
- header.key_size = size_of(K)
|
|
|
-
|
|
|
- header.value_offset = offset_of(Entry, value)
|
|
|
- header.value_size = size_of(V)
|
|
|
-
|
|
|
- return
|
|
|
-}
|
|
|
-
|
|
|
-__get_map_header_table_runtime :: proc "contextless" (ti: Type_Info_Map) -> (header: Map_Header) {
|
|
|
- header.equal = ti.key_equal
|
|
|
-
|
|
|
- entries := ti.generated_struct.variant.(Type_Info_Struct).types[1]
|
|
|
- entry := entries.variant.(Type_Info_Dynamic_Array).elem
|
|
|
- e := entry.variant.(Type_Info_Struct)
|
|
|
-
|
|
|
- header.entry_size = entry.size
|
|
|
- header.entry_align = entry.align
|
|
|
-
|
|
|
- header.key_offset = e.offsets[2]
|
|
|
- header.key_size = e.types[2].size
|
|
|
-
|
|
|
- header.value_offset = e.offsets[3]
|
|
|
- header.value_size = e.types[3].size
|
|
|
-
|
|
|
- return
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-__slice_resize :: proc "odin" (array_: ^$T/[]$E, new_count: int, allocator: Allocator, loc := #caller_location) -> bool {
|
|
|
- array := (^Raw_Slice)(array_)
|
|
|
-
|
|
|
- if new_count < array.len {
|
|
|
- return true
|
|
|
- }
|
|
|
-
|
|
|
- old_size := array.len*size_of(T)
|
|
|
- new_size := new_count*size_of(T)
|
|
|
-
|
|
|
- new_data, err := mem_resize(array.data, old_size, new_size, align_of(T), allocator, loc)
|
|
|
- if err != nil {
|
|
|
- return false
|
|
|
- }
|
|
|
- if new_data != nil || size_of(E) == 0 {
|
|
|
- array.data = raw_data(new_data)
|
|
|
- array.len = new_count
|
|
|
- return true
|
|
|
- }
|
|
|
- return false
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_reset_entries :: proc "contextless" (h: Map_Header, loc := #caller_location) {
|
|
|
- for i in 0..<len(h.m.hashes) {
|
|
|
- h.m.hashes[i] = MAP_SENTINEL
|
|
|
- }
|
|
|
-
|
|
|
- for i in 0..<Map_Index(h.m.entries.len) {
|
|
|
- entry_header := __dynamic_map_get_entry(h, i)
|
|
|
- entry_header.next = MAP_SENTINEL
|
|
|
-
|
|
|
- fr := __dynamic_map_find_from_entry(h, entry_header)
|
|
|
- if fr.entry_prev != MAP_SENTINEL {
|
|
|
- e := __dynamic_map_get_entry(h, fr.entry_prev)
|
|
|
- e.next = i
|
|
|
- } else {
|
|
|
- h.m.hashes[fr.hash_index] = i
|
|
|
- }
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_shrink :: proc "odin" (h: Map_Header, cap: int, loc := #caller_location) -> (did_shrink: bool) {
|
|
|
- c := context
|
|
|
- if h.m.entries.allocator.procedure != nil {
|
|
|
- c.allocator = h.m.entries.allocator
|
|
|
- }
|
|
|
- context = c
|
|
|
-
|
|
|
- return __dynamic_array_shrink(&h.m.entries, h.entry_size, h.entry_align, cap, loc)
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-@(private="file")
|
|
|
-ceil_to_pow2 :: proc "contextless" (n: uint) -> uint {
|
|
|
- if n <= 2 {
|
|
|
- return n
|
|
|
- }
|
|
|
- n := n
|
|
|
- n -= 1
|
|
|
- n |= n >> 1
|
|
|
- n |= n >> 2
|
|
|
- n |= n >> 4
|
|
|
- n |= n >> 8
|
|
|
- n |= n >> 16
|
|
|
- when size_of(int) == 8 {
|
|
|
- n |= n >> 32
|
|
|
- }
|
|
|
- n += 1
|
|
|
- return n
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_grow :: proc "odin" (h: Map_Header, loc := #caller_location) {
|
|
|
- new_count := max(uint(h.m.entries.cap) * 2, INITIAL_MAP_CAP)
|
|
|
- // Rehash through Reserve
|
|
|
- __dynamic_map_reserve(h.m, h.table, new_count, loc)
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_full :: #force_inline proc "contextless" (h: Map_Header) -> bool {
|
|
|
- return int(0.75 * f64(len(h.m.hashes))) <= h.m.entries.len
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_find_from_entry :: proc "contextless" (h: Map_Header, e: ^Map_Entry_Header) -> Map_Find_Result #no_bounds_check {
|
|
|
- key_ptr := __get_map_entry_key_ptr(h, e)
|
|
|
- return __dynamic_map_find(h, e.hash, key_ptr)
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_find :: proc "contextless" (h: Map_Header, key_hash: uintptr, key_ptr: rawptr) -> Map_Find_Result #no_bounds_check {
|
|
|
- fr := Map_Find_Result{MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}
|
|
|
- if n := uintptr(len(h.m.hashes)); n != 0 {
|
|
|
- fr.hash_index = Map_Index(key_hash & (n-1))
|
|
|
- fr.entry_index = h.m.hashes[fr.hash_index]
|
|
|
- for fr.entry_index != MAP_SENTINEL {
|
|
|
- entry := __dynamic_map_get_entry(h, fr.entry_index)
|
|
|
- entry_key_ptr := __get_map_entry_key_ptr(h, entry)
|
|
|
- if entry.hash == key_hash && h.equal(entry_key_ptr, key_ptr) {
|
|
|
- return fr
|
|
|
- }
|
|
|
-
|
|
|
- fr.entry_prev = fr.entry_index
|
|
|
- fr.entry_index = entry.next
|
|
|
- }
|
|
|
- }
|
|
|
- return fr
|
|
|
-}
|
|
|
-
|
|
|
-// Utility procedure used by other runtime procedures
|
|
|
-__map_find :: proc "contextless" (h: Map_Header, key_ptr: ^$K) -> Map_Find_Result #no_bounds_check {
|
|
|
- hash := __get_map_key_hash(key_ptr)
|
|
|
- return #force_inline __dynamic_map_find(h, hash, key_ptr)
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_get_entry :: #force_inline proc "contextless" (h: Map_Header, index: Map_Index) -> ^Map_Entry_Header {
|
|
|
- return (^Map_Entry_Header)(uintptr(h.m.entries.data) + uintptr(index*Map_Index(h.entry_size)))
|
|
|
-}
|
|
|
-
|
|
|
-__dynamic_map_erase :: proc "contextless" (h: Map_Header, fr: Map_Find_Result) #no_bounds_check {
|
|
|
- if fr.entry_prev != MAP_SENTINEL {
|
|
|
- prev := __dynamic_map_get_entry(h, fr.entry_prev)
|
|
|
- curr := __dynamic_map_get_entry(h, fr.entry_index)
|
|
|
- prev.next = curr.next
|
|
|
- } else {
|
|
|
- h.m.hashes[fr.hash_index] = __dynamic_map_get_entry(h, fr.entry_index).next
|
|
|
- }
|
|
|
- last_index := Map_Index(h.m.entries.len-1)
|
|
|
- if fr.entry_index != last_index {
|
|
|
- old := __dynamic_map_get_entry(h, fr.entry_index)
|
|
|
- end := __dynamic_map_get_entry(h, last_index)
|
|
|
- mem_copy(old, end, h.entry_size)
|
|
|
-
|
|
|
- last := __dynamic_map_find_from_entry(h, old)
|
|
|
- if last.entry_prev != MAP_SENTINEL {
|
|
|
- e := __dynamic_map_get_entry(h, last.entry_prev)
|
|
|
- e.next = fr.entry_index
|
|
|
- } else {
|
|
|
- h.m.hashes[last.hash_index] = fr.entry_index
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- h.m.entries.len -= 1
|
|
|
+ return uintptr(h) | uintptr(h == 0)
|
|
|
}
|