PoolAlloc.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. //
  2. // Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
  3. // Copyright (C) 2012-2013 LunarG, Inc.
  4. //
  5. // All rights reserved.
  6. //
  7. // Redistribution and use in source and binary forms, with or without
  8. // modification, are permitted provided that the following conditions
  9. // are met:
  10. //
  11. // Redistributions of source code must retain the above copyright
  12. // notice, this list of conditions and the following disclaimer.
  13. //
  14. // Redistributions in binary form must reproduce the above
  15. // copyright notice, this list of conditions and the following
  16. // disclaimer in the documentation and/or other materials provided
  17. // with the distribution.
  18. //
  19. // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
  20. // contributors may be used to endorse or promote products derived
  21. // from this software without specific prior written permission.
  22. //
  23. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  24. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  25. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  26. // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  27. // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  28. // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  29. // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  30. // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  31. // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  32. // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
  33. // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. // POSSIBILITY OF SUCH DAMAGE.
  35. //
  36. #ifndef _POOLALLOC_INCLUDED_
  37. #define _POOLALLOC_INCLUDED_
  38. #ifdef _DEBUG
  39. # define GUARD_BLOCKS // define to enable guard block sanity checking
  40. #endif
  41. //
  42. // This header defines an allocator that can be used to efficiently
  43. // allocate a large number of small requests for heap memory, with the
  44. // intention that they are not individually deallocated, but rather
  45. // collectively deallocated at one time.
  46. //
  47. // This simultaneously
  48. //
  49. // * Makes each individual allocation much more efficient; the
  50. // typical allocation is trivial.
  51. // * Completely avoids the cost of doing individual deallocation.
  52. // * Saves the trouble of tracking down and plugging a large class of leaks.
  53. //
  54. // Individual classes can use this allocator by supplying their own
  55. // new and delete methods.
  56. //
  57. // STL containers can use this allocator by using the pool_allocator
  58. // class as the allocator (second) template argument.
  59. //
  60. #include <cstddef>
  61. #include <cstring>
  62. #include <vector>
  63. namespace glslang {
  64. // If we are using guard blocks, we must track each individual
  65. // allocation. If we aren't using guard blocks, these
  66. // never get instantiated, so won't have any impact.
  67. //
  68. class TAllocation {
  69. public:
  70. TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
  71. size(size), mem(mem), prevAlloc(prev) {
  72. // Allocations are bracketed:
  73. // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
  74. // This would be cleaner with if (guardBlockSize)..., but that
  75. // makes the compiler print warnings about 0 length memsets,
  76. // even with the if() protecting them.
  77. # ifdef GUARD_BLOCKS
  78. memset(preGuard(), guardBlockBeginVal, guardBlockSize);
  79. memset(data(), userDataFill, size);
  80. memset(postGuard(), guardBlockEndVal, guardBlockSize);
  81. # endif
  82. }
  83. void check() const {
  84. checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
  85. checkGuardBlock(postGuard(), guardBlockEndVal, "after");
  86. }
  87. void checkAllocList() const;
  88. // Return total size needed to accommodate user buffer of 'size',
  89. // plus our tracking data.
  90. inline static size_t allocationSize(size_t size) {
  91. return size + 2 * guardBlockSize + headerSize();
  92. }
  93. // Offset from surrounding buffer to get to user data buffer.
  94. inline static unsigned char* offsetAllocation(unsigned char* m) {
  95. return m + guardBlockSize + headerSize();
  96. }
  97. private:
  98. void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
  99. // Find offsets to pre and post guard blocks, and user data buffer
  100. unsigned char* preGuard() const { return mem + headerSize(); }
  101. unsigned char* data() const { return preGuard() + guardBlockSize; }
  102. unsigned char* postGuard() const { return data() + size; }
  103. size_t size; // size of the user data area
  104. unsigned char* mem; // beginning of our allocation (pts to header)
  105. TAllocation* prevAlloc; // prior allocation in the chain
  106. const static unsigned char guardBlockBeginVal;
  107. const static unsigned char guardBlockEndVal;
  108. const static unsigned char userDataFill;
  109. const static size_t guardBlockSize;
  110. # ifdef GUARD_BLOCKS
  111. inline static size_t headerSize() { return sizeof(TAllocation); }
  112. # else
  113. inline static size_t headerSize() { return 0; }
  114. # endif
  115. };
  116. //
  117. // There are several stacks. One is to track the pushing and popping
  118. // of the user, and not yet implemented. The others are simply a
  119. // repositories of free pages or used pages.
  120. //
  121. // Page stacks are linked together with a simple header at the beginning
  122. // of each allocation obtained from the underlying OS. Multi-page allocations
  123. // are returned to the OS. Individual page allocations are kept for future
  124. // re-use.
  125. //
  126. // The "page size" used is not, nor must it match, the underlying OS
  127. // page size. But, having it be about that size or equal to a set of
  128. // pages is likely most optimal.
  129. //
  130. class TPoolAllocator {
  131. public:
  132. TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
  133. //
  134. // Don't call the destructor just to free up the memory, call pop()
  135. //
  136. ~TPoolAllocator();
  137. //
  138. // Call push() to establish a new place to pop memory too. Does not
  139. // have to be called to get things started.
  140. //
  141. void push();
  142. //
  143. // Call pop() to free all memory allocated since the last call to push(),
  144. // or if no last call to push, frees all memory since first allocation.
  145. //
  146. void pop();
  147. //
  148. // Call popAll() to free all memory allocated.
  149. //
  150. void popAll();
  151. //
  152. // Call allocate() to actually acquire memory. Returns 0 if no memory
  153. // available, otherwise a properly aligned pointer to 'numBytes' of memory.
  154. //
  155. void* allocate(size_t numBytes);
  156. //
  157. // There is no deallocate. The point of this class is that
  158. // deallocation can be skipped by the user of it, as the model
  159. // of use is to simultaneously deallocate everything at once
  160. // by calling pop(), and to not have to solve memory leak problems.
  161. //
  162. protected:
  163. friend struct tHeader;
  164. struct tHeader {
  165. tHeader(tHeader* nextPage, size_t pageCount) :
  166. #ifdef GUARD_BLOCKS
  167. lastAllocation(0),
  168. #endif
  169. nextPage(nextPage), pageCount(pageCount) { }
  170. ~tHeader() {
  171. #ifdef GUARD_BLOCKS
  172. if (lastAllocation)
  173. lastAllocation->checkAllocList();
  174. #endif
  175. }
  176. #ifdef GUARD_BLOCKS
  177. TAllocation* lastAllocation;
  178. #endif
  179. tHeader* nextPage;
  180. size_t pageCount;
  181. };
  182. struct tAllocState {
  183. size_t offset;
  184. tHeader* page;
  185. };
  186. typedef std::vector<tAllocState> tAllocStack;
  187. // Track allocations if and only if we're using guard blocks
  188. #ifndef GUARD_BLOCKS
  189. void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
  190. #else
  191. void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
  192. new(memory) TAllocation(numBytes, memory, block->lastAllocation);
  193. block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
  194. #endif
  195. // This is optimized entirely away if GUARD_BLOCKS is not defined.
  196. return TAllocation::offsetAllocation(memory);
  197. }
  198. size_t pageSize; // granularity of allocation from the OS
  199. size_t alignment; // all returned allocations will be aligned at
  200. // this granularity, which will be a power of 2
  201. size_t alignmentMask;
  202. size_t headerSkip; // amount of memory to skip to make room for the
  203. // header (basically, size of header, rounded
  204. // up to make it aligned
  205. size_t currentPageOffset; // next offset in top of inUseList to allocate from
  206. tHeader* freeList; // list of popped memory
  207. tHeader* inUseList; // list of all memory currently being used
  208. tAllocStack stack; // stack of where to allocate from, to partition pool
  209. int numCalls; // just an interesting statistic
  210. size_t totalBytes; // just an interesting statistic
  211. private:
  212. TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
  213. TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
  214. };
  215. //
  216. // There could potentially be many pools with pops happening at
  217. // different times. But a simple use is to have a global pop
  218. // with everyone using the same global allocator.
  219. //
  220. extern TPoolAllocator& GetThreadPoolAllocator();
  221. void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
  222. //
  223. // This STL compatible allocator is intended to be used as the allocator
  224. // parameter to templatized STL containers, like vector and map.
  225. //
  226. // It will use the pools for allocation, and not
  227. // do any deallocation, but will still do destruction.
  228. //
  229. template<class T>
  230. class pool_allocator {
  231. public:
  232. typedef size_t size_type;
  233. typedef ptrdiff_t difference_type;
  234. typedef T *pointer;
  235. typedef const T *const_pointer;
  236. typedef T& reference;
  237. typedef const T& const_reference;
  238. typedef T value_type;
  239. template<class Other>
  240. struct rebind {
  241. typedef pool_allocator<Other> other;
  242. };
  243. pointer address(reference x) const { return &x; }
  244. const_pointer address(const_reference x) const { return &x; }
  245. pool_allocator() : allocator(GetThreadPoolAllocator()) { }
  246. pool_allocator(TPoolAllocator& a) : allocator(a) { }
  247. pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
  248. template<class Other>
  249. pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
  250. pointer allocate(size_type n) {
  251. return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
  252. pointer allocate(size_type n, const void*) {
  253. return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
  254. void deallocate(void*, size_type) { }
  255. void deallocate(pointer, size_type) { }
  256. pointer _Charalloc(size_t n) {
  257. return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
  258. void construct(pointer p, const T& val) { new ((void *)p) T(val); }
  259. void destroy(pointer p) { p->T::~T(); }
  260. bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
  261. bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
  262. size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
  263. size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
  264. TPoolAllocator& getAllocator() const { return allocator; }
  265. protected:
  266. pool_allocator& operator=(const pool_allocator&) { return *this; }
  267. TPoolAllocator& allocator;
  268. };
  269. } // end namespace glslang
  270. #endif // _POOLALLOC_INCLUDED_