rollback_stack_allocator.odin 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. package mem
  2. import "base:runtime"
  3. // import "base:sanitizer"
  4. /*
  5. Rollback stack default block size.
  6. */
  7. ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
  8. /*
  9. Rollback stack max head block size.
  10. This limitation is due to the size of `prev_ptr`, but it is only for the
  11. head block; any allocation in excess of the allocator's `block_size` is
  12. valid, so long as the block allocator can handle it.
  13. This is because allocations over the block size are not split up if the item
  14. within is freed; they are immediately returned to the block allocator.
  15. */
  16. ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
  17. /*
  18. Allocation header of the rollback stack allocator.
  19. */
  20. Rollback_Stack_Header :: bit_field u64 {
  21. prev_offset: uintptr | 32,
  22. is_free: bool | 1,
  23. prev_ptr: uintptr | 31,
  24. }
  25. /*
  26. Block header of the rollback stack allocator.
  27. */
  28. Rollback_Stack_Block :: struct {
  29. next_block: ^Rollback_Stack_Block,
  30. last_alloc: rawptr,
  31. offset: uintptr,
  32. buffer: []byte,
  33. }
  34. /*
  35. Rollback stack allocator data.
  36. */
  37. Rollback_Stack :: struct {
  38. head: ^Rollback_Stack_Block,
  39. block_size: int,
  40. block_allocator: Allocator,
  41. }
  42. @(private="file", require_results, no_sanitize_address)
  43. rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
  44. start := raw_data(block.buffer)
  45. end := start[block.offset:]
  46. return start < ptr && ptr <= end
  47. }
  48. @(private="file", require_results, no_sanitize_address)
  49. rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
  50. parent: ^Rollback_Stack_Block,
  51. block: ^Rollback_Stack_Block,
  52. header: ^Rollback_Stack_Header,
  53. err: Allocator_Error,
  54. ) {
  55. for block = stack.head; block != nil; block = block.next_block {
  56. if rb_ptr_in_bounds(block, ptr) {
  57. header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
  58. return
  59. }
  60. parent = block
  61. }
  62. return nil, nil, nil, .Invalid_Pointer
  63. }
  64. @(private="file", require_results, no_sanitize_address)
  65. rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
  66. block: ^Rollback_Stack_Block,
  67. header: ^Rollback_Stack_Header,
  68. ok: bool,
  69. ) {
  70. for block = stack.head; block != nil; block = block.next_block {
  71. if block.last_alloc == ptr {
  72. header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
  73. return block, header, true
  74. }
  75. }
  76. return nil, nil, false
  77. }
  78. @(private="file", no_sanitize_address)
  79. rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
  80. header := header
  81. for block.offset > 0 && header.is_free {
  82. block.offset = header.prev_offset
  83. block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
  84. header = cast(^Rollback_Stack_Header)(raw_data(block.buffer)[header.prev_ptr - size_of(Rollback_Stack_Header):])
  85. }
  86. }
  87. /*
  88. Free memory to a rollback stack allocator.
  89. */
  90. @(private="file", require_results, no_sanitize_address)
  91. rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
  92. parent, block, header := rb_find_ptr(stack, ptr) or_return
  93. if header.is_free {
  94. return .Invalid_Pointer
  95. }
  96. header.is_free = true
  97. if block.last_alloc == ptr {
  98. block.offset = header.prev_offset
  99. rb_rollback_block(block, header)
  100. }
  101. if parent != nil && block.offset == 0 {
  102. parent.next_block = block.next_block
  103. runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
  104. }
  105. return nil
  106. }
  107. /*
  108. Free all memory owned by the rollback stack allocator.
  109. */
  110. @(private="file", no_sanitize_address)
  111. rb_free_all :: proc(stack: ^Rollback_Stack) {
  112. for block := stack.head.next_block; block != nil; /**/ {
  113. next_block := block.next_block
  114. runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
  115. block = next_block
  116. }
  117. stack.head.next_block = nil
  118. stack.head.last_alloc = nil
  119. stack.head.offset = 0
  120. // sanitizer.address_poison(stack.head.buffer)
  121. }
  122. /*
  123. Allocate memory using the rollback stack allocator.
  124. */
  125. @(require_results, no_sanitize_address)
  126. rb_alloc :: proc(
  127. stack: ^Rollback_Stack,
  128. size: int,
  129. alignment := DEFAULT_ALIGNMENT,
  130. loc := #caller_location,
  131. ) -> (rawptr, Allocator_Error) {
  132. bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
  133. if bytes != nil {
  134. zero_slice(bytes)
  135. }
  136. return raw_data(bytes), err
  137. }
  138. /*
  139. Allocate memory using the rollback stack allocator.
  140. */
  141. @(require_results, no_sanitize_address)
  142. rb_alloc_bytes :: proc(
  143. stack: ^Rollback_Stack,
  144. size: int,
  145. alignment := DEFAULT_ALIGNMENT,
  146. loc := #caller_location,
  147. ) -> ([]byte, Allocator_Error) {
  148. bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
  149. if bytes != nil {
  150. zero_slice(bytes)
  151. }
  152. return bytes, err
  153. }
  154. /*
  155. Allocate non-initialized memory using the rollback stack allocator.
  156. */
  157. @(require_results, no_sanitize_address)
  158. rb_alloc_non_zeroed :: proc(
  159. stack: ^Rollback_Stack,
  160. size: int,
  161. alignment := DEFAULT_ALIGNMENT,
  162. loc := #caller_location,
  163. ) -> (rawptr, Allocator_Error) {
  164. bytes, err := rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
  165. return raw_data(bytes), err
  166. }
  167. /*
  168. Allocate non-initialized memory using the rollback stack allocator.
  169. */
  170. @(require_results, no_sanitize_address)
  171. rb_alloc_bytes_non_zeroed :: proc(
  172. stack: ^Rollback_Stack,
  173. size: int,
  174. alignment := DEFAULT_ALIGNMENT,
  175. loc := #caller_location,
  176. ) -> (result: []byte, err: Allocator_Error) {
  177. assert(size >= 0, "Size must be positive or zero.", loc)
  178. assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc)
  179. parent: ^Rollback_Stack_Block
  180. for block := stack.head; /**/; block = block.next_block {
  181. when !ODIN_DISABLE_ASSERT {
  182. allocated_new_block: bool
  183. }
  184. if block == nil {
  185. if stack.block_allocator.procedure == nil {
  186. return nil, .Out_Of_Memory
  187. }
  188. minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
  189. new_block_size := max(minimum_size_required, stack.block_size)
  190. block = rb_make_block(new_block_size, stack.block_allocator) or_return
  191. parent.next_block = block
  192. when !ODIN_DISABLE_ASSERT {
  193. allocated_new_block = true
  194. }
  195. }
  196. start := raw_data(block.buffer)[block.offset:]
  197. padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
  198. if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
  199. when !ODIN_DISABLE_ASSERT {
  200. if allocated_new_block {
  201. panic("Rollback Stack Allocator allocated a new block but did not use it.")
  202. }
  203. }
  204. parent = block
  205. continue
  206. }
  207. header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
  208. ptr := start[padding:]
  209. header^ = {
  210. prev_offset = block.offset,
  211. prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
  212. is_free = false,
  213. }
  214. block.last_alloc = ptr
  215. block.offset += padding + cast(uintptr)size
  216. if len(block.buffer) > stack.block_size {
  217. // This block exceeds the allocator's standard block size and is considered a singleton.
  218. // Prevent any further allocations on it.
  219. block.offset = cast(uintptr)len(block.buffer)
  220. }
  221. res := ptr[:size]
  222. // sanitizer.address_unpoison(res)
  223. return res, nil
  224. }
  225. return nil, .Out_Of_Memory
  226. }
  227. /*
  228. Resize an allocation owned by rollback stack allocator.
  229. */
  230. @(require_results, no_sanitize_address)
  231. rb_resize :: proc(
  232. stack: ^Rollback_Stack,
  233. old_ptr: rawptr,
  234. old_size: int,
  235. size: int,
  236. alignment := DEFAULT_ALIGNMENT,
  237. loc := #caller_location,
  238. ) -> (rawptr, Allocator_Error) {
  239. bytes, err := rb_resize_bytes_non_zeroed(stack, byte_slice(old_ptr, old_size), size, alignment, loc)
  240. if bytes != nil {
  241. if old_ptr == nil {
  242. zero_slice(bytes)
  243. } else if size > old_size {
  244. zero_slice(bytes[old_size:])
  245. }
  246. }
  247. return raw_data(bytes), err
  248. }
  249. /*
  250. Resize an allocation owned by rollback stack allocator.
  251. */
  252. @(require_results, no_sanitize_address)
  253. rb_resize_bytes :: proc(
  254. stack: ^Rollback_Stack,
  255. old_memory: []byte,
  256. size: int,
  257. alignment := DEFAULT_ALIGNMENT,
  258. loc := #caller_location,
  259. ) -> ([]u8, Allocator_Error) {
  260. bytes, err := rb_resize_bytes_non_zeroed(stack, old_memory, size, alignment, loc)
  261. if bytes != nil {
  262. if old_memory == nil {
  263. zero_slice(bytes)
  264. } else if size > len(old_memory) {
  265. zero_slice(bytes[len(old_memory):])
  266. }
  267. }
  268. return bytes, err
  269. }
  270. /*
  271. Resize an allocation owned by rollback stack allocator without explicit
  272. zero-initialization.
  273. */
  274. @(require_results, no_sanitize_address)
  275. rb_resize_non_zeroed :: proc(
  276. stack: ^Rollback_Stack,
  277. old_ptr: rawptr,
  278. old_size: int,
  279. size: int,
  280. alignment := DEFAULT_ALIGNMENT,
  281. loc := #caller_location,
  282. ) -> (rawptr, Allocator_Error) {
  283. bytes, err := rb_resize_bytes_non_zeroed(stack, byte_slice(old_ptr, old_size), size, alignment, loc)
  284. return raw_data(bytes), err
  285. }
  286. /*
  287. Resize an allocation owned by rollback stack allocator without explicit
  288. zero-initialization.
  289. */
  290. @(require_results, no_sanitize_address)
  291. rb_resize_bytes_non_zeroed :: proc(
  292. stack: ^Rollback_Stack,
  293. old_memory: []byte,
  294. size: int,
  295. alignment := DEFAULT_ALIGNMENT,
  296. loc := #caller_location,
  297. ) -> (result: []byte, err: Allocator_Error) {
  298. old_size := len(old_memory)
  299. ptr := raw_data(old_memory)
  300. assert(size >= 0, "Size must be positive or zero.", loc)
  301. assert(old_size >= 0, "Old size must be positive or zero.", loc)
  302. assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", loc)
  303. if ptr != nil {
  304. if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
  305. // `block.offset` should never underflow because it is contingent
  306. // on `old_size` in the first place, assuming sane arguments.
  307. assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
  308. if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
  309. // Prevent singleton allocations from fragmenting by forbidding
  310. // them to shrink, removing the possibility of overflow bugs.
  311. if len(block.buffer) <= stack.block_size {
  312. block.offset += cast(uintptr)size - cast(uintptr)old_size
  313. }
  314. res := (ptr)[:size]
  315. // sanitizer.address_unpoison(res)
  316. #no_bounds_check return res, nil
  317. }
  318. }
  319. }
  320. result = rb_alloc_bytes_non_zeroed(stack, size, alignment) or_return
  321. runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
  322. err = rb_free(stack, ptr)
  323. return
  324. }
  325. @(private="file", require_results, no_sanitize_address)
  326. rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
  327. buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
  328. block = cast(^Rollback_Stack_Block)raw_data(buffer)
  329. #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
  330. return
  331. }
  332. /*
  333. Initialize the rollback stack allocator using a fixed backing buffer.
  334. */
  335. @(no_sanitize_address)
  336. rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
  337. MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
  338. assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
  339. block := cast(^Rollback_Stack_Block)raw_data(buffer)
  340. block^ = {}
  341. #no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
  342. stack^ = {}
  343. stack.head = block
  344. stack.block_size = len(block.buffer)
  345. }
  346. /*
  347. Initialize the rollback stack alocator using a backing block allocator.
  348. */
  349. @(no_sanitize_address)
  350. rollback_stack_init_dynamic :: proc(
  351. stack: ^Rollback_Stack,
  352. block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
  353. block_allocator := context.allocator,
  354. location := #caller_location,
  355. ) -> Allocator_Error {
  356. assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location)
  357. when size_of(int) > 4 {
  358. // It's impossible to specify an argument in excess when your integer
  359. // size is insufficient; check only on platforms with big enough ints.
  360. assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
  361. }
  362. block := rb_make_block(block_size, block_allocator) or_return
  363. stack^ = {}
  364. stack.head = block
  365. stack.block_size = block_size
  366. stack.block_allocator = block_allocator
  367. return nil
  368. }
  369. /*
  370. Initialize the rollback stack.
  371. */
  372. rollback_stack_init :: proc {
  373. rollback_stack_init_buffered,
  374. rollback_stack_init_dynamic,
  375. }
  376. /*
  377. Destroy a rollback stack.
  378. */
  379. @(no_sanitize_address)
  380. rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
  381. if stack.block_allocator.procedure != nil {
  382. rb_free_all(stack)
  383. free(stack.head, stack.block_allocator)
  384. }
  385. stack^ = {}
  386. }
  387. /*
  388. Rollback stack allocator.
  389. The Rollback Stack Allocator was designed for the test runner to be fast,
  390. able to grow, and respect the Tracking Allocator's requirement for
  391. individual frees. It is not overly concerned with fragmentation, however.
  392. It has support for expansion when configured with a block allocator and
  393. limited support for out-of-order frees.
  394. Allocation has constant-time best and usual case performance.
  395. At worst, it is linear according to the number of memory blocks.
  396. Allocation follows a first-fit strategy when there are multiple memory
  397. blocks.
  398. Freeing has constant-time best and usual case performance.
  399. At worst, it is linear according to the number of memory blocks and number
  400. of freed items preceding the last item in a block.
  401. Resizing has constant-time performance, if it's the last item in a block, or
  402. the new size is smaller. Naturally, this becomes linear-time if there are
  403. multiple blocks to search for the pointer's owning block. Otherwise, the
  404. allocator defaults to a combined alloc & free operation internally.
  405. Out-of-order freeing is accomplished by collapsing a run of freed items
  406. from the last allocation backwards.
  407. Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
  408. the requested alignment.
  409. */
  410. @(require_results, no_sanitize_address)
  411. rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
  412. return Allocator {
  413. data = stack,
  414. procedure = rollback_stack_allocator_proc,
  415. }
  416. }
  417. @(require_results, no_sanitize_address)
  418. rollback_stack_allocator_proc :: proc(
  419. allocator_data: rawptr,
  420. mode: Allocator_Mode,
  421. size, alignment: int,
  422. old_memory: rawptr,
  423. old_size: int,
  424. loc := #caller_location,
  425. ) -> (result: []byte, err: Allocator_Error) {
  426. stack := cast(^Rollback_Stack)allocator_data
  427. switch mode {
  428. case .Alloc:
  429. return rb_alloc_bytes(stack, size, alignment, loc)
  430. case .Alloc_Non_Zeroed:
  431. return rb_alloc_bytes_non_zeroed(stack, size, alignment, loc)
  432. case .Free:
  433. return nil, rb_free(stack, old_memory)
  434. case .Free_All:
  435. rb_free_all(stack)
  436. return nil, nil
  437. case .Resize:
  438. return rb_resize_bytes(stack, byte_slice(old_memory, old_size), size, alignment, loc)
  439. case .Resize_Non_Zeroed:
  440. return rb_resize_bytes_non_zeroed(stack, byte_slice(old_memory, old_size), size, alignment, loc)
  441. case .Query_Features:
  442. set := (^Allocator_Mode_Set)(old_memory)
  443. if set != nil {
  444. set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
  445. }
  446. return nil, nil
  447. case .Query_Info:
  448. return nil, .Mode_Not_Implemented
  449. }
  450. return
  451. }