rollback_stack_allocator.odin 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. package mem
  2. // The Rollback Stack Allocator was designed for the test runner to be fast,
  3. // able to grow, and respect the Tracking Allocator's requirement for
  4. // individual frees. It is not overly concerned with fragmentation, however.
  5. //
  6. // It has support for expansion when configured with a block allocator and
  7. // limited support for out-of-order frees.
  8. //
  9. // Allocation has constant-time best and usual case performance.
  10. // At worst, it is linear according to the number of memory blocks.
  11. //
  12. // Allocation follows a first-fit strategy when there are multiple memory
  13. // blocks.
  14. //
  15. // Freeing has constant-time best and usual case performance.
  16. // At worst, it is linear according to the number of memory blocks and number
  17. // of freed items preceding the last item in a block.
  18. //
  19. // Resizing has constant-time performance, if it's the last item in a block, or
  20. // the new size is smaller. Naturally, this becomes linear-time if there are
  21. // multiple blocks to search for the pointer's owning block. Otherwise, the
  22. // allocator defaults to a combined alloc & free operation internally.
  23. //
  24. // Out-of-order freeing is accomplished by collapsing a run of freed items
  25. // from the last allocation backwards.
  26. //
  27. // Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
  28. // the requested alignment.
  29. import "base:runtime"
  30. ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
  31. // This limitation is due to the size of `prev_ptr`, but it is only for the
  32. // head block; any allocation in excess of the allocator's `block_size` is
  33. // valid, so long as the block allocator can handle it.
  34. //
  35. // This is because allocations over the block size are not split up if the item
  36. // within is freed; they are immediately returned to the block allocator.
  37. ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
  38. Rollback_Stack_Header :: bit_field u64 {
  39. prev_offset: uintptr | 32,
  40. is_free: bool | 1,
  41. prev_ptr: uintptr | 31,
  42. }
  43. Rollback_Stack_Block :: struct {
  44. next_block: ^Rollback_Stack_Block,
  45. last_alloc: rawptr,
  46. offset: uintptr,
  47. buffer: []byte,
  48. }
  49. Rollback_Stack :: struct {
  50. head: ^Rollback_Stack_Block,
  51. block_size: int,
  52. block_allocator: Allocator,
  53. }
  54. @(private="file", require_results)
  55. rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
  56. start := raw_data(block.buffer)
  57. end := start[block.offset:]
  58. return start < ptr && ptr <= end
  59. }
  60. @(private="file", require_results)
  61. rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
  62. parent: ^Rollback_Stack_Block,
  63. block: ^Rollback_Stack_Block,
  64. header: ^Rollback_Stack_Header,
  65. err: Allocator_Error,
  66. ) {
  67. for block = stack.head; block != nil; block = block.next_block {
  68. if rb_ptr_in_bounds(block, ptr) {
  69. header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
  70. return
  71. }
  72. parent = block
  73. }
  74. return nil, nil, nil, .Invalid_Pointer
  75. }
  76. @(private="file", require_results)
  77. rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
  78. block: ^Rollback_Stack_Block,
  79. header: ^Rollback_Stack_Header,
  80. ok: bool,
  81. ) {
  82. for block = stack.head; block != nil; block = block.next_block {
  83. if block.last_alloc == ptr {
  84. header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
  85. return block, header, true
  86. }
  87. }
  88. return nil, nil, false
  89. }
  90. @(private="file")
  91. rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
  92. header := header
  93. for block.offset > 0 && header.is_free {
  94. block.offset = header.prev_offset
  95. block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
  96. header = cast(^Rollback_Stack_Header)(raw_data(block.buffer)[header.prev_ptr - size_of(Rollback_Stack_Header):])
  97. }
  98. }
  99. @(private="file", require_results)
  100. rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
  101. parent, block, header := rb_find_ptr(stack, ptr) or_return
  102. if header.is_free {
  103. return .Invalid_Pointer
  104. }
  105. header.is_free = true
  106. if block.last_alloc == ptr {
  107. block.offset = header.prev_offset
  108. rb_rollback_block(block, header)
  109. }
  110. if parent != nil && block.offset == 0 {
  111. parent.next_block = block.next_block
  112. runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
  113. }
  114. return nil
  115. }
  116. @(private="file")
  117. rb_free_all :: proc(stack: ^Rollback_Stack) {
  118. for block := stack.head.next_block; block != nil; /**/ {
  119. next_block := block.next_block
  120. runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
  121. block = next_block
  122. }
  123. stack.head.next_block = nil
  124. stack.head.last_alloc = nil
  125. stack.head.offset = 0
  126. }
  127. @(private="file", require_results)
  128. rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
  129. if ptr != nil {
  130. if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
  131. // `block.offset` should never underflow because it is contingent
  132. // on `old_size` in the first place, assuming sane arguments.
  133. assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
  134. if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
  135. // Prevent singleton allocations from fragmenting by forbidding
  136. // them to shrink, removing the possibility of overflow bugs.
  137. if len(block.buffer) <= stack.block_size {
  138. block.offset += cast(uintptr)size - cast(uintptr)old_size
  139. }
  140. #no_bounds_check return (cast([^]byte)ptr)[:size], nil
  141. }
  142. }
  143. }
  144. result = rb_alloc(stack, size, alignment) or_return
  145. runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
  146. err = rb_free(stack, ptr)
  147. return
  148. }
  149. @(private="file", require_results)
  150. rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
  151. parent: ^Rollback_Stack_Block
  152. for block := stack.head; /**/; block = block.next_block {
  153. when !ODIN_DISABLE_ASSERT {
  154. allocated_new_block: bool
  155. }
  156. if block == nil {
  157. if stack.block_allocator.procedure == nil {
  158. return nil, .Out_Of_Memory
  159. }
  160. minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
  161. new_block_size := max(minimum_size_required, stack.block_size)
  162. block = rb_make_block(new_block_size, stack.block_allocator) or_return
  163. parent.next_block = block
  164. when !ODIN_DISABLE_ASSERT {
  165. allocated_new_block = true
  166. }
  167. }
  168. start := raw_data(block.buffer)[block.offset:]
  169. padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
  170. if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
  171. when !ODIN_DISABLE_ASSERT {
  172. if allocated_new_block {
  173. panic("Rollback Stack Allocator allocated a new block but did not use it.")
  174. }
  175. }
  176. parent = block
  177. continue
  178. }
  179. header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
  180. ptr := start[padding:]
  181. header^ = {
  182. prev_offset = block.offset,
  183. prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
  184. is_free = false,
  185. }
  186. block.last_alloc = ptr
  187. block.offset += padding + cast(uintptr)size
  188. if len(block.buffer) > stack.block_size {
  189. // This block exceeds the allocator's standard block size and is considered a singleton.
  190. // Prevent any further allocations on it.
  191. block.offset = cast(uintptr)len(block.buffer)
  192. }
  193. #no_bounds_check return ptr[:size], nil
  194. }
  195. return nil, .Out_Of_Memory
  196. }
  197. @(private="file", require_results)
  198. rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
  199. buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
  200. block = cast(^Rollback_Stack_Block)raw_data(buffer)
  201. #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
  202. return
  203. }
  204. rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
  205. MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
  206. assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
  207. block := cast(^Rollback_Stack_Block)raw_data(buffer)
  208. block^ = {}
  209. #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
  210. stack^ = {}
  211. stack.head = block
  212. stack.block_size = len(block.buffer)
  213. }
  214. rollback_stack_init_dynamic :: proc(
  215. stack: ^Rollback_Stack,
  216. block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
  217. block_allocator := context.allocator,
  218. location := #caller_location,
  219. ) -> Allocator_Error {
  220. assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location)
  221. when size_of(int) > 4 {
  222. // It's impossible to specify an argument in excess when your integer
  223. // size is insufficient; check only on platforms with big enough ints.
  224. assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
  225. }
  226. block := rb_make_block(block_size, block_allocator) or_return
  227. stack^ = {}
  228. stack.head = block
  229. stack.block_size = block_size
  230. stack.block_allocator = block_allocator
  231. return nil
  232. }
  233. rollback_stack_init :: proc {
  234. rollback_stack_init_buffered,
  235. rollback_stack_init_dynamic,
  236. }
  237. rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
  238. if stack.block_allocator.procedure != nil {
  239. rb_free_all(stack)
  240. free(stack.head, stack.block_allocator)
  241. }
  242. stack^ = {}
  243. }
  244. @(require_results)
  245. rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
  246. return Allocator {
  247. data = stack,
  248. procedure = rollback_stack_allocator_proc,
  249. }
  250. }
  251. @(require_results)
  252. rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  253. size, alignment: int,
  254. old_memory: rawptr, old_size: int, location := #caller_location,
  255. ) -> (result: []byte, err: Allocator_Error) {
  256. stack := cast(^Rollback_Stack)allocator_data
  257. switch mode {
  258. case .Alloc, .Alloc_Non_Zeroed:
  259. assert(size >= 0, "Size must be positive or zero.", location)
  260. assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
  261. result = rb_alloc(stack, size, alignment) or_return
  262. if mode == .Alloc {
  263. zero_slice(result)
  264. }
  265. case .Free:
  266. err = rb_free(stack, old_memory)
  267. case .Free_All:
  268. rb_free_all(stack)
  269. case .Resize, .Resize_Non_Zeroed:
  270. assert(size >= 0, "Size must be positive or zero.", location)
  271. assert(old_size >= 0, "Old size must be positive or zero.", location)
  272. assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
  273. result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
  274. #no_bounds_check if mode == .Resize && size > old_size {
  275. zero_slice(result[old_size:])
  276. }
  277. case .Query_Features:
  278. set := (^Allocator_Mode_Set)(old_memory)
  279. if set != nil {
  280. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
  281. }
  282. return nil, nil
  283. case .Query_Info:
  284. return nil, .Mode_Not_Implemented
  285. }
  286. return
  287. }