default_allocators.odin 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. package runtime
  2. import "core:os"
  3. default_allocator_proc :: os.heap_allocator_proc;
  4. default_allocator :: proc() -> Allocator {
  5. return os.heap_allocator();
  6. }
  7. Default_Temp_Allocator :: struct {
  8. data: []byte,
  9. curr_offset: int,
  10. prev_offset: int,
  11. backup_allocator: Allocator,
  12. leaked_allocations: [dynamic]rawptr,
  13. }
  14. default_temp_allocator_init :: proc(allocator: ^Default_Temp_Allocator, data: []byte, backup_allocator := context.allocator) {
  15. allocator.data = data;
  16. allocator.curr_offset = 0;
  17. allocator.prev_offset = 0;
  18. allocator.backup_allocator = backup_allocator;
  19. allocator.leaked_allocations.allocator = backup_allocator;
  20. }
  21. default_temp_allocator_destroy :: proc(using allocator: ^Default_Temp_Allocator) {
  22. if allocator == nil {
  23. return;
  24. }
  25. for ptr in leaked_allocations {
  26. free(ptr, backup_allocator);
  27. }
  28. delete(leaked_allocations);
  29. delete(data, backup_allocator);
  30. allocator^ = {};
  31. }
  32. default_temp_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
  33. size, alignment: int,
  34. old_memory: rawptr, old_size: int, flags: u64 = 0, loc := #caller_location) -> rawptr {
  35. allocator := (^Default_Temp_Allocator)(allocator_data);
  36. if allocator.data == nil {
  37. DEFAULT_SCRATCH_BACKING_SIZE :: 1<<22;
  38. a := context.allocator;
  39. if !(context.allocator.procedure != default_temp_allocator_proc &&
  40. context.allocator.data != allocator_data) {
  41. a = default_allocator();
  42. }
  43. default_temp_allocator_init(allocator, make([]byte, DEFAULT_SCRATCH_BACKING_SIZE, a), a);
  44. }
  45. switch mode {
  46. case .Alloc:
  47. switch {
  48. case allocator.curr_offset+size <= len(allocator.data):
  49. offset := align_forward_uintptr(uintptr(allocator.curr_offset), uintptr(alignment));
  50. ptr := &allocator.data[offset];
  51. mem_zero(ptr, size);
  52. allocator.prev_offset = int(offset);
  53. allocator.curr_offset = int(offset) + size;
  54. return ptr;
  55. case size <= len(allocator.data):
  56. offset := align_forward_uintptr(uintptr(0), uintptr(alignment));
  57. ptr := &allocator.data[offset];
  58. mem_zero(ptr, size);
  59. allocator.prev_offset = int(offset);
  60. allocator.curr_offset = int(offset) + size;
  61. return ptr;
  62. }
  63. // TODO(bill): Should leaks be notified about? Should probably use a logging system that is built into the context system
  64. a := allocator.backup_allocator;
  65. if a.procedure == nil {
  66. a = context.allocator;
  67. allocator.backup_allocator = a;
  68. }
  69. ptr := mem_alloc(size, alignment, a, loc);
  70. if allocator.leaked_allocations == nil {
  71. allocator.leaked_allocations = make([dynamic]rawptr, a);
  72. }
  73. append(&allocator.leaked_allocations, ptr);
  74. return ptr;
  75. case .Free:
  76. if len(allocator.data) == 0 {
  77. return nil;
  78. }
  79. last_ptr := rawptr(&allocator.data[allocator.prev_offset]);
  80. if old_memory == last_ptr {
  81. full_size := allocator.curr_offset - allocator.prev_offset;
  82. allocator.curr_offset = allocator.prev_offset;
  83. mem_zero(last_ptr, full_size);
  84. return nil;
  85. } else {
  86. #no_bounds_check start, end := &allocator.data[0], &allocator.data[allocator.curr_offset];
  87. if start <= old_memory && old_memory < end {
  88. // NOTE(bill): Cannot free this pointer
  89. return nil;
  90. }
  91. if len(allocator.leaked_allocations) != 0 {
  92. for ptr, i in allocator.leaked_allocations {
  93. if ptr == old_memory {
  94. free(ptr, allocator.backup_allocator);
  95. ordered_remove(&allocator.leaked_allocations, i);
  96. return nil;
  97. }
  98. }
  99. }
  100. }
  101. // NOTE(bill): It's a temporary memory, don't worry about freeing
  102. case .Free_All:
  103. allocator.curr_offset = 0;
  104. allocator.prev_offset = 0;
  105. for ptr in allocator.leaked_allocations {
  106. free(ptr, allocator.backup_allocator);
  107. }
  108. clear(&allocator.leaked_allocations);
  109. case .Resize:
  110. last_ptr := #no_bounds_check rawptr(&allocator.data[allocator.prev_offset]);
  111. if old_memory == last_ptr && len(allocator.data)-allocator.prev_offset >= size {
  112. allocator.curr_offset = allocator.prev_offset+size;
  113. return old_memory;
  114. }
  115. ptr := default_temp_allocator_proc(allocator_data, Allocator_Mode.Alloc, size, alignment, old_memory, old_size, flags, loc);
  116. mem_copy(ptr, old_memory, old_size);
  117. return ptr;
  118. case .Query_Features:
  119. set := (^Allocator_Mode_Set)(old_memory);
  120. if set != nil {
  121. set^ = {.Alloc, .Free, .Free_All, .Resize, .Query_Features};
  122. }
  123. return set;
  124. case .Query_Info:
  125. return nil;
  126. }
  127. return nil;
  128. }
  129. default_temp_allocator :: proc(allocator: ^Default_Temp_Allocator) -> Allocator {
  130. return Allocator{
  131. procedure = default_temp_allocator_proc,
  132. data = allocator,
  133. };
  134. }