OVR_Allocator.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /************************************************************************************
  2. PublicHeader: OVR_Kernel.h
  3. Filename : OVR_Allocator.h
  4. Content : Installable memory allocator
  5. Created : September 19, 2012
  6. Notes :
  7. Copyright : Copyright 2014 Oculus VR, LLC All Rights reserved.
  8. Licensed under the Oculus VR Rift SDK License Version 3.2 (the "License");
  9. you may not use the Oculus VR Rift SDK except in compliance with the License,
  10. which is provided at the time of installation or download, or which
  11. otherwise accompanies this software in either electronic or hard copy form.
  12. You may obtain a copy of the License at
  13. http://www.oculusvr.com/licenses/LICENSE-3.2
  14. Unless required by applicable law or agreed to in writing, the Oculus VR SDK
  15. distributed under the License is distributed on an "AS IS" BASIS,
  16. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. See the License for the specific language governing permissions and
  18. limitations under the License.
  19. ************************************************************************************/
  20. #ifndef OVR_Allocator_h
  21. #define OVR_Allocator_h
  22. #include "OVR_Types.h"
  23. #include "OVR_Atomic.h"
  24. #include "OVR_Std.h"
  25. #include "stdlib.h"
  26. #include "stdint.h"
  27. #include <string.h>
  28. #include <exception>
  29. //-----------------------------------------------------------------------------------
  30. // ***** Disable template-unfriendly MS VC++ warnings
  31. #if defined(OVR_CC_MSVC)
  32. #pragma warning(push)
  33. // Pragma to prevent long name warnings in in VC++
  34. #pragma warning(disable : 4503)
  35. #pragma warning(disable : 4786)
  36. // In MSVC 7.1, warning about placement new POD default initializer
  37. #pragma warning(disable : 4345)
  38. #endif
  39. // Un-define new so that placement constructors work
  40. #undef new
  41. //-----------------------------------------------------------------------------------
  42. // ***** Placement new overrides
  43. // Calls constructor on own memory created with "new(ptr) type"
  44. #ifndef __PLACEMENT_NEW_INLINE
  45. #define __PLACEMENT_NEW_INLINE
  46. # if defined(OVR_CC_MWERKS) || defined(OVR_CC_BORLAND) || defined(OVR_CC_GNU)
  47. # include <new>
  48. # else
  49. // Useful on MSVC
  50. OVR_FORCE_INLINE void* operator new (size_t n, void *ptr) { OVR_UNUSED(n); return ptr; }
  51. OVR_FORCE_INLINE void operator delete (void *, void *) { }
  52. # endif
  53. #endif // __PLACEMENT_NEW_INLINE
  54. //------------------------------------------------------------------------
  55. // ***** Macros to redefine class new/delete operators
  56. // Types specifically declared to allow disambiguation of address in
  57. // class member operator new.
  58. #define OVR_MEMORY_REDEFINE_NEW_IMPL(class_name, check_delete) \
  59. void* operator new(size_t sz) \
  60. { void* p = OVR_ALLOC_DEBUG(sz, __FILE__, __LINE__); return p; } \
  61. void* operator new(size_t sz, const char* file, int line) \
  62. { OVR_UNUSED2(file, line); void* p = OVR_ALLOC_DEBUG(sz, file, line); return p; } \
  63. void operator delete(void* p) \
  64. { check_delete(class_name, p); OVR_FREE(p); } \
  65. void operator delete(void* p, const char*, int) \
  66. { check_delete(class_name, p); OVR_FREE(p); }
  67. #define OVR_MEMORY_DEFINE_PLACEMENT_NEW \
  68. void* operator new (size_t n, void* ptr) { OVR_UNUSED(n); return ptr; } \
  69. void operator delete (void* ptr, void* ptr2) { OVR_UNUSED2(ptr, ptr2); }
  70. #define OVR_MEMORY_CHECK_DELETE_NONE(class_name, p)
  71. // Redefined all delete/new operators in a class without custom memory initialization
  72. #define OVR_MEMORY_REDEFINE_NEW(class_name) \
  73. OVR_MEMORY_REDEFINE_NEW_IMPL(class_name, OVR_MEMORY_CHECK_DELETE_NONE)
  74. namespace OVR {
  75. // We subclass std::bad_alloc for the purpose of overriding the 'what' function
  76. // to provide additional information about the exception, such as context about
  77. // how or where the exception occurred in our code. We subclass std::bad_alloc
  78. // instead of creating a new type because it's intended to override std::bad_alloc
  79. // and be caught by code that uses catch(std::bad_alloc&){}. Also, the std::bad_alloc
  80. // constructor actually attempts to allocate memory!
  81. struct bad_alloc : public std::bad_alloc
  82. {
  83. bad_alloc(const char* description = "OVR::bad_alloc") OVR_NOEXCEPT;
  84. bad_alloc(const bad_alloc& oba) OVR_NOEXCEPT
  85. {
  86. OVR_strlcpy(Description, oba.Description, sizeof(Description));
  87. }
  88. bad_alloc& operator=(const bad_alloc& oba) OVR_NOEXCEPT
  89. {
  90. OVR_strlcpy(Description, oba.Description, sizeof(Description));
  91. return *this;
  92. }
  93. virtual const char* what() const OVR_NOEXCEPT
  94. {
  95. return Description;
  96. }
  97. char Description[256]; // Fixed size because we cannot allocate memory.
  98. };
  99. //-----------------------------------------------------------------------------------
  100. // ***** Construct / Destruct
  101. // Construct/Destruct functions are useful when new is redefined, as they can
  102. // be called instead of placement new constructors.
  103. template <class T>
  104. OVR_FORCE_INLINE T* Construct(void *p)
  105. {
  106. return ::new(p) T();
  107. }
  108. template <class T>
  109. OVR_FORCE_INLINE T* Construct(void *p, const T& source)
  110. {
  111. return ::new(p) T(source);
  112. }
  113. // Same as above, but allows for a different type of constructor.
  114. template <class T, class S>
  115. OVR_FORCE_INLINE T* ConstructAlt(void *p, const S& source)
  116. {
  117. return ::new(p) T(source);
  118. }
  119. template <class T, class S1, class S2>
  120. OVR_FORCE_INLINE T* ConstructAlt(void *p, const S1& src1, const S2& src2)
  121. {
  122. return ::new(p) T(src1, src2);
  123. }
  124. // Note: These ConstructArray functions don't properly support the case of a C++ exception occurring midway
  125. // during construction, as they don't deconstruct the successfully constructed array elements before returning.
  126. template <class T>
  127. OVR_FORCE_INLINE void ConstructArray(void *p, size_t count)
  128. {
  129. uint8_t *pdata = (uint8_t*)p;
  130. for (size_t i=0; i< count; ++i, pdata += sizeof(T))
  131. {
  132. Construct<T>(pdata);
  133. }
  134. }
  135. template <class T>
  136. OVR_FORCE_INLINE void ConstructArray(void *p, size_t count, const T& source)
  137. {
  138. uint8_t *pdata = (uint8_t*)p;
  139. for (size_t i=0; i< count; ++i, pdata += sizeof(T))
  140. {
  141. Construct<T>(pdata, source);
  142. }
  143. }
  144. template <class T>
  145. OVR_FORCE_INLINE void Destruct(T *pobj)
  146. {
  147. pobj->~T();
  148. OVR_UNUSED1(pobj); // Fix incorrect 'unused variable' MSVC warning.
  149. }
  150. template <class T>
  151. OVR_FORCE_INLINE void DestructArray(T *pobj, size_t count)
  152. {
  153. for (size_t i=0; i<count; ++i, ++pobj)
  154. pobj->~T();
  155. }
  156. //-----------------------------------------------------------------------------------
  157. // ***** Allocator
  158. // Allocator defines a memory allocation interface that developers can override
  159. // to to provide memory for OVR; an instance of this class is typically created on
  160. // application startup and passed into System or OVR::System constructor.
  161. //
  162. //
  163. // Users implementing this interface must provide three functions: Alloc, Free,
  164. // and Realloc. Implementations of these functions must honor the requested alignment.
  165. // Although arbitrary alignment requests are possible, requested alignment will
  166. // typically be small, such as 16 bytes or less.
  167. class Allocator
  168. {
  169. friend class System;
  170. public:
  171. virtual ~Allocator()
  172. {
  173. }
  174. // Returns the pointer to the current globally installed Allocator instance.
  175. // This pointer is used for most of the memory allocations.
  176. static Allocator* GetInstance();
  177. // *** Standard Alignment Alloc/Free
  178. // Allocate memory of specified size with default alignment.
  179. // Alloc of size==0 will allocate a tiny block & return a valid pointer;
  180. // this makes it suitable for new operator.
  181. virtual void* Alloc(size_t size) = 0;
  182. // Same as Alloc, but provides an option of passing debug data.
  183. virtual void* AllocDebug(size_t size, const char* /*file*/, unsigned /*line*/)
  184. { return Alloc(size); }
  185. // Reallocate memory block to a new size, copying data if necessary. Returns the pointer to
  186. // new memory block, which may be the same as original pointer. Will return 0 if reallocation
  187. // failed, in which case previous memory is still valid.
  188. // Realloc to decrease size will never fail.
  189. // Realloc of pointer == 0 is equivalent to Alloc
  190. // Realloc to size == 0, shrinks to the minimal size, pointer remains valid and requires Free().
  191. virtual void* Realloc(void* p, size_t newSize) = 0;
  192. // Frees memory allocated by Alloc/Realloc.
  193. // Free of null pointer is valid and will do nothing.
  194. virtual void Free(void *p) = 0;
  195. // *** Standard Alignment Alloc/Free
  196. // Allocate memory of specified alignment.
  197. // Memory allocated with AllocAligned MUST be freed with FreeAligned.
  198. // Default implementation will delegate to Alloc/Free after doing rounding.
  199. virtual void* AllocAligned(size_t size, size_t align);
  200. // Frees memory allocated with AllocAligned.
  201. virtual void FreeAligned(void* p);
  202. protected:
  203. // *** Tracking of allocations w/ callstacks for debug builds.
  204. // Add the allocation & the callstack to the tracking database
  205. void trackAlloc(void* p, size_t size);
  206. // Remove the allocation from the tracking database
  207. void untrackAlloc(void* p);
  208. // Lock used during LibOVR execution to guard the tracked allocation list.
  209. Lock TrackLock;
  210. protected:
  211. Allocator() {}
  212. public:
  213. //------------------------------------------------------------------------
  214. // ***** DumpMemory
  215. // Enable/disable leak tracking mode and check if currently tracking.
  216. static void SetLeakTracking(bool enabled);
  217. static bool IsTrackingLeaks();
  218. // Displays information about outstanding allocations, typically for the
  219. // purpose of reporting leaked memory on application or module shutdown.
  220. // This should be used instead of, for example, VC++ _CrtDumpMemoryLeaks
  221. // because it allows us to dump additional information about our allocations.
  222. // Returns the number of currently outstanding heap allocations.
  223. static int DumpMemory();
  224. };
  225. //------------------------------------------------------------------------
  226. // ***** DefaultAllocator
  227. // This allocator is created and used if no other allocator is installed.
  228. // Default allocator delegates to system malloc.
  229. class DefaultAllocator : public Allocator
  230. {
  231. public:
  232. virtual void* Alloc(size_t size);
  233. virtual void* AllocDebug(size_t size, const char* file, unsigned line);
  234. virtual void* Realloc(void* p, size_t newSize);
  235. virtual void Free(void *p);
  236. };
  237. //------------------------------------------------------------------------
  238. // ***** DebugPageAllocator
  239. //
  240. // Implements a page-protected allocator:
  241. // Detects use-after-free and memory overrun bugs immediately at the time of usage via an exception.
  242. // Can detect a memory read or write beyond the valid memory immediately at the
  243. // time of usage via an exception (if EnableOverrunDetection is enabled).
  244. // This doesn't replace valgrind but implements a subset of its functionality
  245. // in a way that performs well enough to avoid interfering with app execution.
  246. // The point of this is that immediately detects these two classes of errors while
  247. // being much faster than external tools such as valgrind, etc. This is at a cost of
  248. // as much as a page of extra bytes per allocation (two if EnableOverrunDetection is enabled).
  249. // On Windows the Alloc and Free functions average about 12000 cycles each. This isn't small but
  250. // it should be low enough for many testing circumstances with apps that are prudent with
  251. // memory allocation volume.
  252. // The amount of system memory needed for this isn't as high as one might initially guess, as it
  253. // takes hundreds of thousands of memory allocations in order to make a dent in the gigabytes of
  254. // memory most computers have.
  255. //
  256. //
  257. // Technical design for the Windows platform:
  258. // Every Alloc is satisfied via a VirtualAlloc return of a memory block of one or more pages;
  259. // the minimum needed to satisy the user's size and alignment requirements.
  260. // Upon Free the memory block (which is one or more pages) is not passed to VirtualFree but rather
  261. // is converted to PAGE_NOACCESS and put into a delayed free list (FreedBlockArray) to be passed
  262. // to VirtualFree later. The result of this is that any further attempts to read or write the
  263. // memory will result in an exception.
  264. // The delayed-free list increases each time Free is called until it reached maximum capacity,
  265. // at which point the oldest memory block in the list is passed to VirtualFree and its
  266. // entry in the list is filled with this newly Freed (PAGE_NOACCESS) memory block.
  267. // Once the delayed-free list reaches maximum capacity it thus acts as a ring buffer of blocks.
  268. // The maximum size of this list is currently determined at compile time as a constant.
  269. // The EnableOverrunDetection is an additional feature which allows reads or writes beyond valid
  270. // memory to be detected as they occur. This is implemented by adding an allocating an additional
  271. // page of memory at the end of the usual pages and leaving it uncommitted (MEM_RESERVE).
  272. // When this option is used, we return a pointer to the user that's at the end of the valid
  273. // memory block as opposed to at the beginning. This is so that the space right after the
  274. // user space is invalid. If there are some odd bytes remaining between the end of the user's
  275. // space and the page (due to alignment requirements), we optionally fill these with guard bytes.
  276. // We do not currently support memory underrun detection, which could be implemented via an
  277. // extra un-accessible page before the user page(s). In practice this is rarely needed.
  278. // Currently the choice to use EnableOverrunDetection must be done before any calls to Alloc, etc.
  279. // as the logic is simpler and faster if we don't have to dynamically handle either case at runtime.
  280. // We store within the memory block the size of the block and the size of the original user Alloc
  281. // request. This is done as two size_t values written before the memory returned to the user.
  282. // Thus the pointer returned to the user will never be at the very beginning of the memory block,
  283. // because there will be two size_t's before it.
  284. // This class itself allocates no memory, as that could interfere with its ability to supply
  285. // memory, especially if the global malloc and new functions are replaced with this class.
  286. // We could in fact support this class allocating memory as long as it used a system allocator
  287. // and not malloc, new, etc.
  288. // As of this writing we don't do debug fill patterns in the returned memory, because we mostly
  289. // don't need it because memory exceptions take the place of unexpected fill value validation.
  290. // However, there is some value in doing a small debug fill of the last few bytes after the
  291. // user's bytes but before the next page, which will happen for odd sizes passed to Alloc.
  292. //
  293. // Technical design for Mac and Linux platforms:
  294. // Apple's XCode malloc functionality includes something called MallocGuardEdges which is similar
  295. // to DebugPageAllocator, though it protects only larger sized allocations and not smaller ones.
  296. // Our approach for this on Mac and Linux is to use mmap and mprotect in a way similar to VirtualAlloc and
  297. // VirtualProtect. Unix doesn't have the concept of Windows MEM_RESERVE vs. MEM_COMMIT, but we can
  298. // simulate MEM_RESERVE by having an extra page that's PROT_NONE instead of MEM_RESERVE. Since Unix
  299. // platforms don't commit pages pages to physical memory until they are first accessed, this extra
  300. // page will in practice act similar to Windows MEM_RESERVE at runtime.
  301. //
  302. // Allocation inteface:
  303. // Alloc sizes can be any size_t >= 0.
  304. // An alloc size of 0 returns a non-nullptr.
  305. // Alloc functions may fail (usually due to insufficent memory), in which case they return nullptr.
  306. // All returned allocations are aligned on a power-of-two boundary of at least DebugPageAllocator::DefaultAlignment.
  307. // AllocAligned supports any alignment power-of-two value from 1 to 256. Other values result in undefined behavior.
  308. // AllocAligned may return a pointer that's aligned greater than the requested alignment.
  309. // Realloc acts as per the C99 Standard realloc.
  310. // Free requires the supplied pointer to be a valid pointer returned by this allocator's Alloc functions, else the behavior is undefined.
  311. // You may not Free a pointer a second time, else the behavior is undefined.
  312. // Free otherwise always succeeds.
  313. // Allocations made with AllocAligned or ReallocAligned must be Freed via FreeAligned, as per the base class requirement.
  314. //
  315. class DebugPageAllocator : public Allocator
  316. {
  317. public:
  318. DebugPageAllocator();
  319. virtual ~DebugPageAllocator();
  320. void Init();
  321. void Shutdown();
  322. void SetMaxDelayedFreeCount(size_t delayedFreeCount); // Sets how many freed blocks we should save before purging the oldest of them.
  323. size_t GetMaxDelayedFreeCount() const; // Returns the max number of delayed free allocations before the oldest ones are purged (finally freed).
  324. void EnableOverrunDetection(bool enableOverrunDetection, bool enableOverrunGuardBytes); // enableOverrunDetection is by default. enableOverrunGuardBytes is enabled by default in debug builds.
  325. void* Alloc(size_t size);
  326. void* AllocAligned(size_t size, size_t align);
  327. void* Realloc(void* p, size_t newSize);
  328. void* ReallocAligned(void* p, size_t newSize, size_t newAlign);
  329. void Free(void* p);
  330. void FreeAligned(void* p);
  331. size_t GetAllocSize(const void* p) const { return GetUserSize(p); }
  332. size_t GetPageSize() const { return PageSize; }
  333. protected:
  334. struct Block
  335. {
  336. void* BlockPtr; // The pointer to the first page of the contiguous set of pages that make up this block.
  337. size_t BlockSize; // (page size) * (page count). Will be >= (SizeStorageSize + UserSize).
  338. void Clear() { BlockPtr = nullptr; BlockSize = 0; }
  339. };
  340. Block* FreedBlockArray; // Currently a very simple array-like container that acts as a ring buffer of delay-freed (but inaccessible) blocks.
  341. size_t FreedBlockArrayMaxSize; // The max number of Freed blocks to put into FreedBlockArray before they start getting purged. Must be <= kFreedBlockArrayCapacity.
  342. size_t FreedBlockArraySize; // The amount of valid elements within FreedBlockArray. Increases as elements are added until it reaches kFreedBlockArrayCapacity. Then stays that way until Shutdown.
  343. size_t FreedBlockArrayOldest; // The oldest entry in the FreedBlockArray ring buffer.
  344. size_t AllocationCount; // Number of currently live Allocations. Incremented by successful calls to Alloc (etc.) Decremented by successful calss to Free.
  345. bool OverrunPageEnabled; // If true then we implement memory overrun detection, at the cost of an extra page per user allocation.
  346. bool OverrunGuardBytesEnabled; // If true then any remaining bytes between the end of the user's allocation and the end of the page are filled with guard bytes and verified upon Free. Valid only if OverrunPageEnabled is true.
  347. size_t PageSize; // The current default platform memory page size (e.g. 4096). We allocated blocks in multiples of pages.
  348. OVR::Lock Lock; // Mutex which allows an instance of this class to be used by multiple threads simultaneously.
  349. public:
  350. #if defined(_WIN64) || defined(_M_IA64) || defined(__LP64__) || defined(__LP64__) || defined(__arch64__) || defined(__APPLE__)
  351. static const size_t DefaultAlignment = 16; // 64 bit platforms and all Apple platforms.
  352. #else
  353. static const size_t DefaultAlignment = 8; // 32 bit platforms. We want DefaultAlignment as low as possible because that means less unused bytes between a user allocation and the end of the page.
  354. #endif
  355. #if defined(_WIN32)
  356. static const size_t MaxAlignment = 2048; // Half a page size.
  357. #else
  358. static const size_t MaxAlignment = DefaultAlignment; // Currently a low limit because we don't have full page allocator support yet.
  359. #endif
  360. protected:
  361. static const size_t SizeStorageSize = DefaultAlignment; // Where the user size and block size is stored. Needs to be at least 2 * sizeof(size_t).
  362. static const size_t UserSizeIndex = 0; // We store block sizes within the memory itself, and this serves to identify it.
  363. static const size_t BlockSizeIndex = 1;
  364. static const uint8_t GuardFillByte = 0xfd; // Same value VC++ uses for heap guard bytes.
  365. static size_t GetUserSize(const void* p); // Returns the size that the user requested in Alloc, etc.
  366. static size_t GetBlockSize(const void* p); // Returns the actual number of bytes in the returned block. Will be a multiple of PageSize.
  367. static size_t* GetSizePosition(const void* p); // We store the user and block size as two size_t values within the returned memory to the user, before the user pointer. This gets that location.
  368. void* GetBlockPtr(void* p);
  369. void* GetUserPosition(void* pPageMemory, size_t blockSize, size_t userSize, size_t userAlignment);
  370. void* AllocCommittedPageMemory(size_t blockSize);
  371. void* EnablePageMemory(void* pPageMemory, size_t blockSize);
  372. void DisablePageMemory(void* pPageMemory, size_t blockSize);
  373. void FreePageMemory(void* pPageMemory, size_t blockSize);
  374. };
  375. ///------------------------------------------------------------------------
  376. /// ***** OVR_malloca / OVR_freea
  377. ///
  378. /// Implements a safer version of alloca. However, see notes below.
  379. ///
  380. /// Allocates memory from the stack via alloca (or similar) for smaller
  381. /// allocation sizes, else falls back to operator new. This is very similar
  382. /// to the Microsoft _malloca and _freea functions, and the implementation
  383. /// is nearly the same aside from using operator new instead of malloc.
  384. ///
  385. /// Unlike alloca, calls to OVR_malloca must be matched by calls to OVR_freea,
  386. /// and the OVR_freea call must be in the same function scope as the original
  387. /// call to OVR_malloca.
  388. ///
  389. /// Note:
  390. /// While this function reduces the likelihood of a stack overflow exception,
  391. /// it cannot guarantee it, as even small allocation sizes done by alloca
  392. /// can exhaust the stack when it is nearly full. However, the majority of
  393. /// stack overflows due to alloca usage are due to large allocation size
  394. /// requests.
  395. ///
  396. /// Declarations:
  397. /// void* OVR_malloca(size_t size);
  398. /// void OVR_freea(void* p);
  399. ///
  400. /// Example usage:
  401. /// void TestMalloca()
  402. /// {
  403. /// char* charArray = (char*)OVR_malloca(37000);
  404. ///
  405. /// if(charArray)
  406. /// {
  407. /// // <use charArray>
  408. /// OVR_freea(charArray);
  409. /// }
  410. /// }
  411. ///
  412. #if !defined(OVR_malloca)
  413. #define OVR_MALLOCA_ALLOCA_ID UINT32_C(0xcccccccc)
  414. #define OVR_MALLOCA_MALLOC_ID UINT32_C(0xdddddddd)
  415. #define OVR_MALLOCA_ID_SIZE 16 // Needs to be at least 2 * sizeof(uint32_t) and at least the minimum alignment for malloc on the platform. 16 works for all platforms.
  416. #if defined(_MSC_VER)
  417. #define OVR_MALLOCA_SIZE_THRESHOLD 8192
  418. #else
  419. #define OVR_MALLOCA_SIZE_THRESHOLD 1024 // Non-Microsoft platforms tend to exhaust stack space sooner due to non-automatic stack expansion.
  420. #endif
  421. #define OVR_malloca(size) \
  422. ((((size) + OVR_MALLOCA_ID_SIZE) < OVR_MALLOCA_SIZE_THRESHOLD) ? \
  423. OVR::malloca_SetId(static_cast<char*>(alloca((size) + OVR_MALLOCA_ID_SIZE)), OVR_MALLOCA_ALLOCA_ID) : \
  424. OVR::malloca_SetId(static_cast<char*>(new char[(size) + OVR_MALLOCA_ID_SIZE]), OVR_MALLOCA_MALLOC_ID))
  425. inline void* malloca_SetId(char* p, uint32_t id)
  426. {
  427. if(p)
  428. {
  429. *reinterpret_cast<uint32_t*>(p) = id;
  430. p = reinterpret_cast<char*>(p) + OVR_MALLOCA_ID_SIZE;
  431. }
  432. return p;
  433. }
  434. #endif
  435. #if !defined(OVR_freea)
  436. #define OVR_freea(p) OVR::freea_Impl(reinterpret_cast<char*>(p))
  437. inline void freea_Impl(char* p)
  438. {
  439. if (p)
  440. {
  441. // We store the allocation type id at the first uint32_t in the returned memory.
  442. static_assert(OVR_MALLOCA_ID_SIZE >= sizeof(uint32_t), "Insufficient OVR_MALLOCA_ID_SIZE size.");
  443. p -= OVR_MALLOCA_ID_SIZE;
  444. uint32_t id = *reinterpret_cast<uint32_t*>(p);
  445. if(id == OVR_MALLOCA_MALLOC_ID)
  446. delete[] p;
  447. #if defined(OVR_BUILD_DEBUG)
  448. else if(id != OVR_MALLOCA_ALLOCA_ID)
  449. OVR_FAIL_M("OVR_freea memory corrupt or not allocated by OVR_alloca.");
  450. #endif
  451. }
  452. }
  453. #endif
  454. ///------------------------------------------------------------------------
  455. /// ***** OVR_newa / OVR_deletea
  456. ///
  457. /// Implements a C++ array version of OVR_malloca/OVR_freea.
  458. /// Expresses failure via a nullptr return value and not via a C++ exception.
  459. /// If a handled C++ exception occurs midway during construction in OVR_newa,
  460. // there is no automatic destruction of the successfully constructed elements.
  461. ///
  462. /// Declarations:
  463. /// T* OVR_newa(T, size_t count);
  464. /// void OVR_deletea(T, T* pTArray);
  465. ///
  466. /// Example usage:
  467. /// void TestNewa()
  468. /// {
  469. /// Widget* pWidgetArray = OVR_newa(Widget, 37000);
  470. ///
  471. /// if(pWidgetArray)
  472. /// {
  473. /// // <use pWidgetArray>
  474. /// OVR_deletea(Widget, pWidgetArray);
  475. /// }
  476. /// }
  477. ///
  478. #if !defined(OVR_newa)
  479. #define OVR_newa(T, count) OVR::newa_Impl<T>(static_cast<char*>(OVR_malloca(count * sizeof(T))), count)
  480. #endif
  481. template<class T>
  482. T* newa_Impl(char* pTArray, size_t count)
  483. {
  484. if(pTArray)
  485. {
  486. OVR::ConstructArray<T>(pTArray, count);
  487. // We store the count at the second uint32_t in the returned memory.
  488. static_assert(OVR_MALLOCA_ID_SIZE >= (2 * sizeof(uint32_t)), "Insufficient OVR_MALLOCA_ID_SIZE size.");
  489. reinterpret_cast<uint32_t*>((reinterpret_cast<char*>(pTArray) - OVR_MALLOCA_ID_SIZE))[1] = (uint32_t)count;
  490. }
  491. return reinterpret_cast<T*>(pTArray);
  492. }
  493. #if !defined(OVR_deletea)
  494. #define OVR_deletea(T, pTArray) OVR::deletea_Impl<T>(pTArray)
  495. #endif
  496. template<class T>
  497. void deletea_Impl(T* pTArray)
  498. {
  499. if(pTArray)
  500. {
  501. uint32_t count = reinterpret_cast<uint32_t*>((reinterpret_cast<char*>(pTArray) - OVR_MALLOCA_ID_SIZE))[1];
  502. OVR::DestructArray<T>(pTArray, count);
  503. OVR_freea(pTArray);
  504. }
  505. }
  506. //------------------------------------------------------------------------
  507. // ***** Memory Allocation Macros
  508. // These macros should be used for global allocation. In the future, these
  509. // macros will allows allocation to be extended with debug file/line information
  510. // if necessary.
  511. #define OVR_REALLOC(p,s) OVR::Allocator::GetInstance()->Realloc((p),(s))
  512. #define OVR_FREE(p) OVR::Allocator::GetInstance()->Free((p))
  513. #define OVR_ALLOC_ALIGNED(s,a) OVR::Allocator::GetInstance()->AllocAligned((s),(a))
  514. #define OVR_FREE_ALIGNED(p) OVR::Allocator::GetInstance()->FreeAligned((p))
  515. #ifdef OVR_BUILD_DEBUG
  516. #define OVR_ALLOC(s) OVR::Allocator::GetInstance()->AllocDebug((s), __FILE__, __LINE__)
  517. #define OVR_ALLOC_DEBUG(s,f,l) OVR::Allocator::GetInstance()->AllocDebug((s), f, l)
  518. #else
  519. #define OVR_ALLOC(s) OVR::Allocator::GetInstance()->Alloc((s))
  520. #define OVR_ALLOC_DEBUG(s,f,l) OVR::Allocator::GetInstance()->Alloc((s))
  521. #endif
  522. //------------------------------------------------------------------------
  523. // Base class that overrides the new and delete operators.
  524. // Deriving from this class, even as a multiple base, incurs no space overhead.
  525. class NewOverrideBase
  526. {
  527. public:
  528. // Redefine all new & delete operators.
  529. OVR_MEMORY_REDEFINE_NEW(NewOverrideBase)
  530. };
  531. //------------------------------------------------------------------------
  532. // ***** Mapped memory allocation
  533. //
  534. // Equates to VirtualAlloc/VirtualFree on Windows, mmap/munmap on Unix.
  535. // These are useful for when you need system-supplied memory pages.
  536. // These are also useful for when you need to allocate memory in a way
  537. // that doesn't affect the application heap.
  538. void* SafeMMapAlloc(size_t size);
  539. void SafeMMapFree (const void* memory, size_t size);
  540. } // OVR
  541. // Redefine operator 'new' if necessary.
  542. #if defined(OVR_DEFINE_NEW)
  543. #define new OVR_DEFINE_NEW
  544. #endif
  545. #if defined(OVR_CC_MSVC)
  546. #pragma warning(pop)
  547. #endif
  548. #endif // OVR_Allocator_h