tlsf_internal.odin 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. Copyright 2024 Jeroen van Rijn <[email protected]>.
  3. Made available under Odin's BSD-3 license.
  4. List of contributors:
  5. Matt Conte: Original C implementation, see LICENSE file in this package
  6. Jeroen van Rijn: Source port
  7. */
  8. package mem_tlsf
  9. import "base:intrinsics"
  10. import "base:runtime"
  11. // import "core:fmt"
  12. // log2 of number of linear subdivisions of block sizes.
  13. // Larger values require more memory in the control structure.
  14. // Values of 4 or 5 are typical.
  15. TLSF_SL_INDEX_COUNT_LOG2 :: #config(TLSF_SL_INDEX_COUNT_LOG2, 5)
  16. // All allocation sizes and addresses are aligned to 4/8 bytes
  17. ALIGN_SIZE_LOG2 :: 3 when size_of(uintptr) == 8 else 2
  18. // We can increase this to support larger allocation sizes,
  19. // at the expense of more overhead in the TLSF structure
  20. FL_INDEX_MAX :: 32 when size_of(uintptr) == 8 else 30
  21. #assert(FL_INDEX_MAX < 36)
  22. ALIGN_SIZE :: 1 << ALIGN_SIZE_LOG2
  23. SL_INDEX_COUNT :: 1 << TLSF_SL_INDEX_COUNT_LOG2
  24. FL_INDEX_SHIFT :: TLSF_SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2
  25. FL_INDEX_COUNT :: FL_INDEX_MAX - FL_INDEX_SHIFT + 1
  26. SMALL_BLOCK_SIZE :: 1 << FL_INDEX_SHIFT
  27. /*
  28. We support allocations of sizes up to (1 << `FL_INDEX_MAX`) bits.
  29. However, because we linearly subdivide the second-level lists, and
  30. our minimum size granularity is 4 bytes, it doesn't make sense to
  31. create first-level lists for sizes smaller than `SL_INDEX_COUNT` * 4,
  32. or (1 << (`TLSF_SL_INDEX_COUNT_LOG2` + 2)) bytes, as there we will be
  33. trying to split size ranges into more slots than we have available.
  34. Instead, we calculate the minimum threshold size, and place all
  35. blocks below that size into the 0th first-level list.
  36. */
  37. // SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage tree
  38. #assert(size_of(uint) * 8 >= SL_INDEX_COUNT)
  39. // Ensure we've properly tuned our sizes.
  40. #assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
  41. #assert(size_of(Allocator) % ALIGN_SIZE == 0)
  42. Pool :: struct {
  43. data: []u8 `fmt:"-"`,
  44. allocator: runtime.Allocator,
  45. next: ^Pool,
  46. }
  47. /*
  48. Block header structure.
  49. There are several implementation subtleties involved:
  50. - The `prev_phys_block` field is only valid if the previous block is free.
  51. - The `prev_phys_block` field is actually stored at the end of the
  52. previous block. It appears at the beginning of this structure only to
  53. simplify the implementation.
  54. - The `next_free` / `prev_free` fields are only valid if the block is free.
  55. */
  56. Block_Header :: struct {
  57. prev_phys_block: ^Block_Header,
  58. size: uint, // The size of this block, excluding the block header
  59. // Next and previous free blocks.
  60. next_free: ^Block_Header,
  61. prev_free: ^Block_Header,
  62. }
  63. #assert(offset_of(Block_Header, prev_phys_block) == 0)
  64. /*
  65. Since block sizes are always at least a multiple of 4, the two least
  66. significant bits of the size field are used to store the block status:
  67. - bit 0: whether block is busy or free
  68. - bit 1: whether previous block is busy or free
  69. */
  70. BLOCK_HEADER_FREE :: uint(1 << 0)
  71. BLOCK_HEADER_PREV_FREE :: uint(1 << 1)
  72. /*
  73. The size of the block header exposed to used blocks is the `size` field.
  74. The `prev_phys_block` field is stored *inside* the previous free block.
  75. */
  76. BLOCK_HEADER_OVERHEAD :: uint(size_of(uint))
  77. POOL_OVERHEAD :: 2 * BLOCK_HEADER_OVERHEAD
  78. // User data starts directly after the size field in a used block.
  79. BLOCK_START_OFFSET :: offset_of(Block_Header, size) + size_of(Block_Header{}.size)
  80. /*
  81. A free block must be large enough to store its header minus the size of
  82. the `prev_phys_block` field, and no larger than the number of addressable
  83. bits for `FL_INDEX`.
  84. */
  85. BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
  86. BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
  87. /*
  88. TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
  89. the search for a free block to a free list of guaranteed size
  90. adequate to fulfill the request, combined with efficient free list
  91. queries using bitmasks and architecture-specific bit-manipulation
  92. routines.
  93. NOTE: TLSF spec relies on ffs/fls returning value 0..31.
  94. */
  95. @(require_results)
  96. ffs :: proc "contextless" (word: u32) -> (bit: i32) {
  97. return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
  98. }
  99. @(require_results)
  100. fls :: proc "contextless" (word: u32) -> (bit: i32) {
  101. N :: (size_of(u32) * 8) - 1
  102. return i32(N - intrinsics.count_leading_zeros(word))
  103. }
  104. @(require_results)
  105. fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
  106. N :: (size_of(uint) * 8) - 1
  107. return i32(N - intrinsics.count_leading_zeros(size))
  108. }
  109. @(require_results)
  110. block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
  111. return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
  112. }
  113. block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
  114. old_size := block.size
  115. block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
  116. }
  117. @(require_results)
  118. block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
  119. return block_size(block) == 0
  120. }
  121. @(require_results)
  122. block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
  123. return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
  124. }
  125. block_set_free :: proc "contextless" (block: ^Block_Header) {
  126. block.size |= BLOCK_HEADER_FREE
  127. }
  128. block_set_used :: proc "contextless" (block: ^Block_Header) {
  129. block.size &~= BLOCK_HEADER_FREE
  130. }
  131. @(require_results)
  132. block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
  133. return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
  134. }
  135. block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
  136. block.size |= BLOCK_HEADER_PREV_FREE
  137. }
  138. block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
  139. block.size &~= BLOCK_HEADER_PREV_FREE
  140. }
  141. @(require_results)
  142. block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
  143. return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
  144. }
  145. @(require_results)
  146. block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) {
  147. return rawptr(uintptr(block) + BLOCK_START_OFFSET)
  148. }
  149. // Return location of next block after block of given size.
  150. @(require_results)
  151. offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
  152. return (^Block_Header)(uintptr(ptr) + uintptr(size))
  153. }
  154. @(require_results)
  155. offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
  156. return (^Block_Header)(uintptr(ptr) - uintptr(size))
  157. }
  158. // Return location of previous block.
  159. @(require_results)
  160. block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
  161. assert(block_is_prev_free(block), "previous block must be free")
  162. return block.prev_phys_block
  163. }
  164. // Return location of next existing block.
  165. @(require_results)
  166. block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
  167. return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
  168. }
  169. // Link a new block with its physical neighbor, return the neighbor.
  170. @(require_results)
  171. block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
  172. next = block_next(block)
  173. next.prev_phys_block = block
  174. return
  175. }
  176. block_mark_as_free :: proc(block: ^Block_Header) {
  177. // Link the block to the next block, first.
  178. next := block_link_next(block)
  179. block_set_prev_free(next)
  180. block_set_free(block)
  181. }
  182. block_mark_as_used :: proc(block: ^Block_Header) {
  183. next := block_next(block)
  184. block_set_prev_used(next)
  185. block_set_used(block)
  186. }
  187. @(require_results)
  188. align_up :: proc(x, align: uint) -> (aligned: uint) {
  189. assert(0 == (align & (align - 1)), "must align to a power of two")
  190. return (x + (align - 1)) &~ (align - 1)
  191. }
  192. @(require_results)
  193. align_down :: proc(x, align: uint) -> (aligned: uint) {
  194. assert(0 == (align & (align - 1)), "must align to a power of two")
  195. return x - (x & (align - 1))
  196. }
  197. @(require_results)
  198. align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
  199. assert(0 == (align & (align - 1)), "must align to a power of two")
  200. align_mask := uintptr(align) - 1
  201. _ptr := uintptr(ptr)
  202. _aligned := (_ptr + align_mask) &~ (align_mask)
  203. return rawptr(_aligned)
  204. }
  205. // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
  206. @(require_results)
  207. adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
  208. if size == 0 {
  209. return 0
  210. }
  211. // aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
  212. if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
  213. adjusted = min(aligned, BLOCK_SIZE_MAX)
  214. }
  215. return
  216. }
  217. // Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
  218. @(require_results)
  219. adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
  220. if size == 0 {
  221. return 0, nil
  222. }
  223. // aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
  224. if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
  225. adjusted = min(aligned, BLOCK_SIZE_MAX)
  226. } else {
  227. err = .Out_Of_Memory
  228. }
  229. return
  230. }
  231. // TLSF utility functions. In most cases these are direct translations of
  232. // the documentation in the research paper.
  233. @(optimization_mode="favor_size", require_results)
  234. mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
  235. if size < SMALL_BLOCK_SIZE {
  236. // Store small blocks in first list.
  237. sl = i32(size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
  238. } else {
  239. fl = fls_uint(size)
  240. sl = i32(size >> (uint(fl) - TLSF_SL_INDEX_COUNT_LOG2)) ~ (1 << TLSF_SL_INDEX_COUNT_LOG2)
  241. fl -= (FL_INDEX_SHIFT - 1)
  242. }
  243. return
  244. }
  245. @(optimization_mode="favor_size", require_results)
  246. mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
  247. rounded = size
  248. if size >= SMALL_BLOCK_SIZE {
  249. round := uint(1 << (uint(fls_uint(size) - TLSF_SL_INDEX_COUNT_LOG2))) - 1
  250. rounded += round
  251. }
  252. return
  253. }
  254. // This version rounds up to the next block size (for allocations)
  255. @(optimization_mode="favor_size", require_results)
  256. mapping_search :: proc(size: uint) -> (fl, sl: i32) {
  257. return mapping_insert(mapping_round(size))
  258. }
  259. @(require_results)
  260. search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
  261. // First, search for a block in the list associated with the given fl/sl index.
  262. fl := fli^; sl := sli^
  263. sl_map := control.sl_bitmap[fli^] & (~u32(0) << uint(sl))
  264. if sl_map == 0 {
  265. // No block exists. Search in the next largest first-level list.
  266. fl_map := control.fl_bitmap & (~u32(0) << uint(fl + 1))
  267. if fl_map == 0 {
  268. // No free blocks available, memory has been exhausted.
  269. return {}
  270. }
  271. fl = ffs(fl_map)
  272. fli^ = fl
  273. sl_map = control.sl_bitmap[fl]
  274. }
  275. assert(sl_map != 0, "internal error - second level bitmap is null")
  276. sl = ffs(sl_map)
  277. sli^ = sl
  278. // Return the first block in the free list.
  279. return control.blocks[fl][sl]
  280. }
  281. // Remove a free block from the free list.
  282. remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
  283. prev := block.prev_free
  284. next := block.next_free
  285. assert(prev != nil, "prev_free can not be nil")
  286. assert(next != nil, "next_free can not be nil")
  287. next.prev_free = prev
  288. prev.next_free = next
  289. // If this block is the head of the free list, set new head.
  290. if control.blocks[fl][sl] == block {
  291. control.blocks[fl][sl] = next
  292. // If the new head is nil, clear the bitmap
  293. if next == &control.block_null {
  294. control.sl_bitmap[fl] &~= (u32(1) << uint(sl))
  295. // If the second bitmap is now empty, clear the fl bitmap
  296. if control.sl_bitmap[fl] == 0 {
  297. control.fl_bitmap &~= (u32(1) << uint(fl))
  298. }
  299. }
  300. }
  301. }
  302. // Insert a free block into the free block list.
  303. insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
  304. current := control.blocks[fl][sl]
  305. assert(current != nil, "free lists cannot have a nil entry")
  306. assert(block != nil, "cannot insert a nil entry into the free list")
  307. block.next_free = current
  308. block.prev_free = &control.block_null
  309. current.prev_free = block
  310. assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE), "block not properly aligned")
  311. // Insert the new block at the head of the list, and mark the first- and second-level bitmaps appropriately.
  312. control.blocks[fl][sl] = block
  313. control.fl_bitmap |= (u32(1) << uint(fl))
  314. control.sl_bitmap[fl] |= (u32(1) << uint(sl))
  315. }
  316. // Remove a given block from the free list.
  317. block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
  318. fl, sl := mapping_insert(block_size(block))
  319. remove_free_block(control, block, fl, sl)
  320. }
  321. // Insert a given block into the free list.
  322. block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
  323. fl, sl := mapping_insert(block_size(block))
  324. insert_free_block(control, block, fl, sl)
  325. }
  326. @(require_results)
  327. block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
  328. return block_size(block) >= size_of(Block_Header) + size
  329. }
  330. // Split a block into two, the second of which is free.
  331. @(require_results)
  332. block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
  333. // Calculate the amount of space left in the remaining block.
  334. remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
  335. remain_size := block_size(block) - (size + BLOCK_HEADER_OVERHEAD)
  336. assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE),
  337. "remaining block not aligned properly")
  338. assert(block_size(block) == remain_size + size + BLOCK_HEADER_OVERHEAD)
  339. block_set_size(remaining, remain_size)
  340. assert(block_size(remaining) >= BLOCK_SIZE_MIN, "block split with invalid size")
  341. block_set_size(block, size)
  342. block_mark_as_free(remaining)
  343. return remaining
  344. }
  345. // Absorb a free block's storage into an adjacent previous free block.
  346. @(require_results)
  347. block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
  348. assert(!block_is_last(prev), "previous block can't be last")
  349. // Note: Leaves flags untouched.
  350. prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD
  351. _ = block_link_next(prev)
  352. return prev
  353. }
  354. // Merge a just-freed block with an adjacent previous free block.
  355. @(require_results)
  356. block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
  357. merged = block
  358. if (block_is_prev_free(block)) {
  359. prev := block_prev(block)
  360. assert(prev != nil, "prev physical block can't be nil")
  361. assert(block_is_free(prev), "prev block is not free though marked as such")
  362. block_remove(control, prev)
  363. merged = block_absorb(prev, block)
  364. }
  365. return merged
  366. }
  367. // Merge a just-freed block with an adjacent free block.
  368. @(require_results)
  369. block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
  370. merged = block
  371. next := block_next(block)
  372. assert(next != nil, "next physical block can't be nil")
  373. if (block_is_free(next)) {
  374. assert(!block_is_last(block), "previous block can't be last")
  375. block_remove(control, next)
  376. merged = block_absorb(block, next)
  377. }
  378. return merged
  379. }
  380. // Trim any trailing block space off the end of a free block, return to pool.
  381. block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
  382. assert(block_is_free(block), "block must be free")
  383. if (block_can_split(block, size)) {
  384. remaining_block := block_split(block, size)
  385. _ = block_link_next(block)
  386. block_set_prev_free(remaining_block)
  387. block_insert(control, remaining_block)
  388. }
  389. }
  390. // Trim any trailing block space off the end of a used block, return to pool.
  391. block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
  392. assert(!block_is_free(block), "Block must be used")
  393. if (block_can_split(block, size)) {
  394. // If the next block is free, we must coalesce.
  395. remaining_block := block_split(block, size)
  396. block_set_prev_used(remaining_block)
  397. remaining_block = block_merge_next(control, remaining_block)
  398. block_insert(control, remaining_block)
  399. }
  400. }
  401. // Trim leading block space, return to pool.
  402. @(require_results)
  403. block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
  404. remaining = block
  405. if block_can_split(block, size) {
  406. // We want the 2nd block.
  407. remaining = block_split(block, size - BLOCK_HEADER_OVERHEAD)
  408. block_set_prev_free(remaining)
  409. _ = block_link_next(block)
  410. block_insert(control, block)
  411. }
  412. return remaining
  413. }
  414. @(require_results)
  415. block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
  416. fl, sl: i32
  417. if size != 0 {
  418. fl, sl = mapping_search(size)
  419. /*
  420. `mapping_search` can futz with the size, so for excessively large sizes it can sometimes wind up
  421. with indices that are off the end of the block array. So, we protect against that here,
  422. since this is the only call site of `mapping_search`. Note that we don't need to check `sl`,
  423. as it comes from a modulo operation that guarantees it's always in range.
  424. */
  425. if fl < FL_INDEX_COUNT {
  426. block = search_suitable_block(control, &fl, &sl)
  427. }
  428. }
  429. if block != nil {
  430. assert(block_size(block) >= size)
  431. remove_free_block(control, block, fl, sl)
  432. }
  433. return block
  434. }
  435. @(require_results)
  436. block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
  437. if block != nil {
  438. assert(size != 0, "Size must be non-zero")
  439. block_trim_free(control, block, size)
  440. block_mark_as_used(block)
  441. res = ([^]byte)(block_to_ptr(block))[:size]
  442. }
  443. return
  444. }
  445. // Clear control structure and point all empty lists at the null block
  446. clear :: proc(control: ^Allocator) {
  447. control.block_null.next_free = &control.block_null
  448. control.block_null.prev_free = &control.block_null
  449. control.fl_bitmap = 0
  450. for i in 0..<FL_INDEX_COUNT {
  451. control.sl_bitmap[i] = 0
  452. for j in 0..<SL_INDEX_COUNT {
  453. control.blocks[i][j] = &control.block_null
  454. }
  455. }
  456. }
  457. @(require_results)
  458. pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
  459. assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
  460. pool_overhead := POOL_OVERHEAD
  461. pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
  462. if pool_bytes < BLOCK_SIZE_MIN {
  463. return .Backing_Buffer_Too_Small
  464. } else if pool_bytes > BLOCK_SIZE_MAX {
  465. return .Backing_Buffer_Too_Large
  466. }
  467. // Create the main free block. Offset the start of the block slightly,
  468. // so that the `prev_phys_block` field falls outside of the pool -
  469. // it will never be used.
  470. block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
  471. block_set_size(block, pool_bytes)
  472. block_set_free(block)
  473. block_set_prev_used(block)
  474. block_insert(control, block)
  475. // Split the block to create a zero-size sentinel block
  476. next := block_link_next(block)
  477. block_set_size(next, 0)
  478. block_set_used(next)
  479. block_set_prev_free(next)
  480. return
  481. }
  482. pool_remove :: proc(control: ^Allocator, pool: []u8) {
  483. block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
  484. assert(block_is_free(block), "Block should be free")
  485. assert(!block_is_free(block_next(block)), "Next block should not be free")
  486. assert(block_size(block_next(block)) == 0, "Next block size should be zero")
  487. fl, sl := mapping_insert(block_size(block))
  488. remove_free_block(control, block, fl, sl)
  489. }
  490. @(require_results)
  491. alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
  492. assert(control != nil)
  493. adjust := adjust_request_size(size, ALIGN_SIZE)
  494. GAP_MINIMUM :: size_of(Block_Header)
  495. size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
  496. aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
  497. if aligned_size == 0 && size > 0 {
  498. return nil, .Out_Of_Memory
  499. }
  500. block := block_locate_free(control, aligned_size)
  501. if block == nil {
  502. return nil, .Out_Of_Memory
  503. }
  504. ptr := block_to_ptr(block)
  505. aligned := align_ptr(ptr, align)
  506. gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
  507. if gap != 0 && gap < GAP_MINIMUM {
  508. gap_remain := GAP_MINIMUM - gap
  509. offset := uintptr(max(gap_remain, align))
  510. next_aligned := rawptr(uintptr(aligned) + offset)
  511. aligned = align_ptr(next_aligned, align)
  512. gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
  513. }
  514. if gap != 0 {
  515. assert(gap >= GAP_MINIMUM, "gap size too small")
  516. block = block_trim_free_leading(control, block, gap)
  517. }
  518. return block_prepare_used(control, block, adjust)
  519. }
  520. @(require_results)
  521. alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
  522. res, err = alloc_bytes_non_zeroed(control, size, align)
  523. if err == nil {
  524. intrinsics.mem_zero(raw_data(res), len(res))
  525. }
  526. return
  527. }
  528. free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
  529. assert(control != nil)
  530. // `size` is currently ignored
  531. if ptr == nil {
  532. return
  533. }
  534. block := block_from_ptr(ptr)
  535. assert(!block_is_free(block), "block already marked as free") // double free
  536. block_mark_as_free(block)
  537. block = block_merge_prev(control, block)
  538. block = block_merge_next(control, block)
  539. block_insert(control, block)
  540. }
  541. @(require_results)
  542. resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
  543. assert(control != nil)
  544. if ptr != nil && new_size == 0 {
  545. free_with_size(control, ptr, old_size)
  546. return
  547. } else if ptr == nil {
  548. return alloc_bytes(control, new_size, alignment)
  549. }
  550. block := block_from_ptr(ptr)
  551. next := block_next(block)
  552. curr_size := block_size(block)
  553. combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
  554. adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
  555. assert(!block_is_free(block), "block already marked as free") // double free
  556. min_size := min(curr_size, new_size, old_size)
  557. if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
  558. res = alloc_bytes(control, new_size, alignment) or_return
  559. if res != nil {
  560. copy(res, ([^]byte)(ptr)[:min_size])
  561. free_with_size(control, ptr, curr_size)
  562. }
  563. return
  564. }
  565. if adjust > curr_size {
  566. _ = block_merge_next(control, block)
  567. block_mark_as_used(block)
  568. }
  569. block_trim_used(control, block, adjust)
  570. res = ([^]byte)(ptr)[:new_size]
  571. if min_size < new_size {
  572. to_zero := ([^]byte)(ptr)[min_size:new_size]
  573. runtime.mem_zero(raw_data(to_zero), len(to_zero))
  574. }
  575. return
  576. }
  577. @(require_results)
  578. resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
  579. assert(control != nil)
  580. if ptr != nil && new_size == 0 {
  581. free_with_size(control, ptr, old_size)
  582. return
  583. } else if ptr == nil {
  584. return alloc_bytes_non_zeroed(control, new_size, alignment)
  585. }
  586. block := block_from_ptr(ptr)
  587. next := block_next(block)
  588. curr_size := block_size(block)
  589. combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
  590. adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
  591. assert(!block_is_free(block), "block already marked as free") // double free
  592. min_size := min(curr_size, new_size, old_size)
  593. if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
  594. res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
  595. if res != nil {
  596. copy(res, ([^]byte)(ptr)[:min_size])
  597. free_with_size(control, ptr, old_size)
  598. }
  599. return
  600. }
  601. if adjust > curr_size {
  602. _ = block_merge_next(control, block)
  603. block_mark_as_used(block)
  604. }
  605. block_trim_used(control, block, adjust)
  606. res = ([^]byte)(ptr)[:new_size]
  607. return
  608. }