memory.cpp 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Copyright (c) 2012-2016 Daniele Bartolini and individual contributors.
  3. * License: https://github.com/taylor001/crown/blob/master/LICENSE
  4. */
  5. #include "memory.h"
  6. #include "allocator.h"
  7. #include "mutex.h"
  8. #include <stdlib.h> // malloc
  9. // void* operator new(size_t) throw (std::bad_alloc)
  10. // {
  11. // CE_ASSERT(false, "operator new forbidden");
  12. // return NULL;
  13. // }
  14. // void* operator new[](size_t) throw (std::bad_alloc)
  15. // {
  16. // CE_ASSERT(false, "operator new[] forbidden");
  17. // return NULL;
  18. // }
  19. // void operator delete(void*) throw ()
  20. // {
  21. // CE_ASSERT(false, "operator delete forbidden");
  22. // }
  23. // void operator delete[](void*) throw ()
  24. // {
  25. // CE_ASSERT(false, "operator delete[] forbidden");
  26. // }
  27. namespace crown
  28. {
  29. namespace memory
  30. {
  31. // Header stored at the beginning of a memory allocation to indicate the
  32. // size of the allocated data.
  33. struct Header
  34. {
  35. u32 size;
  36. };
  37. // If we need to align the memory allocation we pad the header with this
  38. // value after storing the size. That way we can
  39. const u32 HEADER_PAD_VALUE = 0xffffffffu;
  40. // Given a pointer to the header, returns a pointer to the data that follows it.
  41. inline void *data_pointer(Header* header, u32 align) {
  42. void *p = header + 1;
  43. return memory::align_top(p, align);
  44. }
  45. // Given a pointer to the data, returns a pointer to the header before it.
  46. inline Header*header(const void* data)
  47. {
  48. u32 *p = (u32*)data;
  49. while (p[-1] == HEADER_PAD_VALUE)
  50. --p;
  51. return (Header*)p - 1;
  52. }
  53. // Stores the size in the header and pads with HEADER_PAD_VALUE up to the
  54. // data pointer.
  55. inline void fill(Header*header, void *data, u32 size)
  56. {
  57. header->size = size;
  58. u32 *p = (u32*)(header + 1);
  59. while (p < data)
  60. *p++ = HEADER_PAD_VALUE;
  61. }
  62. inline u32 actual_allocation_size(u32 size, u32 align)
  63. {
  64. return size + align + sizeof(Header);
  65. }
  66. inline void pad(Header* header, void* data)
  67. {
  68. u32* p = (u32*)(header + 1);
  69. while (p != data)
  70. {
  71. *p = HEADER_PAD_VALUE;
  72. p++;
  73. }
  74. }
  75. /// Allocator based on C malloc().
  76. class HeapAllocator : public Allocator
  77. {
  78. Mutex _mutex;
  79. u32 _allocated_size;
  80. u32 _allocation_count;
  81. public:
  82. HeapAllocator()
  83. : _allocated_size(0)
  84. , _allocation_count(0)
  85. {
  86. }
  87. ~HeapAllocator()
  88. {
  89. CE_ASSERT(_allocation_count == 0 && total_allocated() == 0
  90. , "Missing %d deallocations causing a leak of %d bytes"
  91. , _allocation_count
  92. , total_allocated()
  93. );
  94. }
  95. /// @copydoc Allocator::allocate()
  96. void* allocate(u32 size, u32 align = Allocator::DEFAULT_ALIGN)
  97. {
  98. ScopedMutex sm(_mutex);
  99. u32 actual_size = actual_allocation_size(size, align);
  100. Header* h = (Header*)malloc(actual_size);
  101. h->size = actual_size;
  102. void* data = memory::align_top(h + 1, align);
  103. pad(h, data);
  104. _allocated_size += actual_size;
  105. _allocation_count++;
  106. return data;
  107. }
  108. /// @copydoc Allocator::deallocate()
  109. void deallocate(void* data)
  110. {
  111. ScopedMutex sm(_mutex);
  112. if (!data)
  113. return;
  114. Header* h = header(data);
  115. _allocated_size -= h->size;
  116. _allocation_count--;
  117. free(h);
  118. }
  119. /// @copydoc Allocator::allocated_size()
  120. u32 allocated_size(const void* ptr)
  121. {
  122. return get_size(ptr);
  123. }
  124. /// @copydoc Allocator::total_allocated()
  125. u32 total_allocated()
  126. {
  127. ScopedMutex sm(_mutex);
  128. return _allocated_size;
  129. }
  130. /// Returns the size in bytes of the block of memory pointed by @a data
  131. u32 get_size(const void* data)
  132. {
  133. ScopedMutex sm(_mutex);
  134. Header* h = header(data);
  135. return h->size;
  136. }
  137. };
  138. // Copyright (C) 2012 Bitsquid AB
  139. // License: https://bitbucket.org/bitsquid/foundation/src/default/LICENCSE
  140. //
  141. // An allocator used to allocate temporary "scratch" memory. The allocator
  142. // uses a fixed size ring buffer to services the requests.
  143. //
  144. // Memory is always always allocated linearly. An allocation pointer is
  145. // advanced through the buffer as memory is allocated and wraps around at
  146. // the end of the buffer. Similarly, a free pointer is advanced as memory
  147. // is freed.
  148. //
  149. // It is important that the scratch allocator is only used for short-lived
  150. // memory allocations. A long lived allocator will lock the "free" pointer
  151. // and prevent the "allocate" pointer from proceeding past it, which means
  152. // the ring buffer can't be used.
  153. //
  154. // If the ring buffer is exhausted, the scratch allocator will use its backing
  155. // allocator to allocate memory instead.
  156. class ScratchAllocator : public Allocator
  157. {
  158. Mutex _mutex;
  159. Allocator &_backing;
  160. // Start and end of the ring buffer.
  161. char*_begin, *_end;
  162. // Pointers to where to allocate memory and where to free memory.
  163. char*_allocate, *_free;
  164. public:
  165. /// Creates a ScratchAllocator. The allocator will use the backing
  166. /// allocator to create the ring buffer and to service any requests
  167. /// that don't fit in the ring buffer.
  168. ///
  169. /// size specifies the size of the ring buffer.
  170. ScratchAllocator(Allocator &backing, u32 size) : _backing(backing)
  171. {
  172. _begin = (char*)_backing.allocate(size);
  173. _end = _begin + size;
  174. _allocate = _begin;
  175. _free = _begin;
  176. }
  177. ~ScratchAllocator()
  178. {
  179. CE_ASSERT(_free == _allocate, "Memory leak");
  180. _backing.deallocate(_begin);
  181. }
  182. bool in_use(void *p)
  183. {
  184. if (_free == _allocate)
  185. return false;
  186. if (_allocate > _free)
  187. return p >= _free && p < _allocate;
  188. return p >= _free || p < _allocate;
  189. }
  190. void *allocate(u32 size, u32 align)
  191. {
  192. ScopedMutex sm(_mutex);
  193. CE_ASSERT(align % 4 == 0, "Must be 4-byte aligned");
  194. size = ((size + 3)/4)*4;
  195. char* p = _allocate;
  196. Header* h = (Header*)p;
  197. char* data = (char*)data_pointer(h, align);
  198. p = data + size;
  199. // Reached the end of the buffer, wrap around to the beginning.
  200. if (p > _end) {
  201. h->size = u32(_end - (char*)h) | 0x80000000u;
  202. p = _begin;
  203. h = (Header*)p;
  204. data = (char*)data_pointer(h, align);
  205. p = data + size;
  206. }
  207. // If the buffer is exhausted use the backing allocator instead.
  208. if (in_use(p))
  209. return _backing.allocate(size, align);
  210. fill(h, data, u32(p - (char*)h));
  211. _allocate = p;
  212. return data;
  213. }
  214. void deallocate(void *p)
  215. {
  216. ScopedMutex sm(_mutex);
  217. if (!p)
  218. return;
  219. if (p < _begin || p >= _end) {
  220. _backing.deallocate(p);
  221. return;
  222. }
  223. // Mark this slot as free
  224. Header*h = header(p);
  225. CE_ASSERT((h->size & 0x80000000u) == 0, "Not free");
  226. h->size = h->size | 0x80000000u;
  227. // Advance the free pointer past all free slots.
  228. while (_free != _allocate) {
  229. Header*h = (Header*)_free;
  230. if ((h->size & 0x80000000u) == 0)
  231. break;
  232. _free += h->size & 0x7fffffffu;
  233. if (_free == _end)
  234. _free = _begin;
  235. }
  236. }
  237. u32 allocated_size(const void *p)
  238. {
  239. ScopedMutex sm(_mutex);
  240. Header* h = header(p);
  241. return h->size - u32((char*)p - (char*)h);
  242. }
  243. u32 total_allocated()
  244. {
  245. ScopedMutex sm(_mutex);
  246. return u32(_end - _begin);
  247. }
  248. };
  249. } // namespace memory
  250. namespace memory_globals
  251. {
  252. using namespace memory;
  253. static const u32 SIZE = sizeof(HeapAllocator)
  254. + sizeof(ScratchAllocator)
  255. ;
  256. char _buffer[SIZE];
  257. HeapAllocator* _default_allocator = NULL;
  258. ScratchAllocator* _default_scratch_allocator = NULL;
  259. void init()
  260. {
  261. _default_allocator = new (_buffer) HeapAllocator();
  262. _default_scratch_allocator = new (_buffer + sizeof(HeapAllocator)) ScratchAllocator(*_default_allocator, 1024*1024);
  263. }
  264. void shutdown()
  265. {
  266. _default_scratch_allocator->~ScratchAllocator();
  267. _default_allocator->~HeapAllocator();
  268. }
  269. } // namespace memory_globals
  270. Allocator& default_allocator()
  271. {
  272. return *memory_globals::_default_allocator;
  273. }
  274. Allocator& default_scratch_allocator()
  275. {
  276. return *memory_globals::_default_scratch_allocator;
  277. }
  278. } // namespace crown