|
@@ -54,7 +54,7 @@
|
|
|
#else
|
|
|
#define RMEMAPI // We are building or using library as a static library (or Linux shared library)
|
|
|
#endif
|
|
|
-
|
|
|
+
|
|
|
//----------------------------------------------------------------------------------
|
|
|
// Types and Structures Definition
|
|
|
//----------------------------------------------------------------------------------
|
|
@@ -139,9 +139,9 @@ RMEMAPI void ObjPoolCleanUp(ObjPool *objpool, void **ptrref);
|
|
|
|
|
|
#if defined(RMEM_IMPLEMENTATION)
|
|
|
|
|
|
-#include <stdio.h> // Required for:
|
|
|
-#include <stdlib.h> // Required for:
|
|
|
-#include <string.h> // Required for:
|
|
|
+#include <stdio.h> // Required for:
|
|
|
+#include <stdlib.h> // Required for:
|
|
|
+#include <string.h> // Required for:
|
|
|
|
|
|
//----------------------------------------------------------------------------------
|
|
|
// Defines and Macros
|
|
@@ -163,8 +163,8 @@ RMEMAPI void ObjPoolCleanUp(ObjPool *objpool, void **ptrref);
|
|
|
//----------------------------------------------------------------------------------
|
|
|
// Module specific Functions Declaration
|
|
|
//----------------------------------------------------------------------------------
|
|
|
-static inline size_t __AlignSize(const size_t size, const size_t align)
|
|
|
-{
|
|
|
+static inline size_t __AlignSize(const size_t size, const size_t align)
|
|
|
+{
|
|
|
return (size + (align - 1)) & -align;
|
|
|
}
|
|
|
|
|
@@ -175,9 +175,9 @@ static inline size_t __AlignSize(const size_t size, const size_t align)
|
|
|
MemPool CreateMemPool(const size_t size)
|
|
|
{
|
|
|
MemPool mempool = { 0 };
|
|
|
-
|
|
|
+
|
|
|
if (size == 0UL) return mempool;
|
|
|
- else
|
|
|
+ else
|
|
|
{
|
|
|
// Align the mempool size to at least the size of an alloc node.
|
|
|
mempool.stack.size = size;
|
|
@@ -188,7 +188,7 @@ MemPool CreateMemPool(const size_t size)
|
|
|
mempool.stack.size = 0UL;
|
|
|
return mempool;
|
|
|
}
|
|
|
- else
|
|
|
+ else
|
|
|
{
|
|
|
mempool.stack.base = mempool.stack.mem + mempool.stack.size;
|
|
|
return mempool;
|
|
@@ -199,9 +199,9 @@ MemPool CreateMemPool(const size_t size)
|
|
|
MemPool CreateMemPoolFromBuffer(void *buf, const size_t size)
|
|
|
{
|
|
|
MemPool mempool = { 0 };
|
|
|
-
|
|
|
+
|
|
|
if ((size == 0UL) || (buf == NULL) || (size <= sizeof(MemNode))) return mempool;
|
|
|
- else
|
|
|
+ else
|
|
|
{
|
|
|
mempool.stack.size = size;
|
|
|
mempool.stack.mem = buf;
|
|
@@ -213,7 +213,7 @@ MemPool CreateMemPoolFromBuffer(void *buf, const size_t size)
|
|
|
void DestroyMemPool(MemPool *const mempool)
|
|
|
{
|
|
|
if ((mempool == NULL) || (mempool->stack.mem == NULL)) return;
|
|
|
- else
|
|
|
+ else
|
|
|
{
|
|
|
free(mempool->stack.mem);
|
|
|
*mempool = (MemPool){ 0 };
|
|
@@ -228,7 +228,7 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|
|
MemNode *new_mem = NULL;
|
|
|
const size_t ALLOC_SIZE = __AlignSize(size + sizeof *new_mem, sizeof(intptr_t));
|
|
|
const size_t BUCKET_INDEX = (ALLOC_SIZE >> MEMPOOL_BUCKET_BITS) - 1;
|
|
|
-
|
|
|
+
|
|
|
// If the size is small enough, let's check if our buckets has a fitting memory block.
|
|
|
if (BUCKET_INDEX < MEMPOOL_BUCKET_SIZE && mempool->buckets[BUCKET_INDEX] != NULL && mempool->buckets[BUCKET_INDEX]->size >= ALLOC_SIZE)
|
|
|
{
|
|
@@ -240,7 +240,7 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|
|
else if (mempool->freeList.head != NULL)
|
|
|
{
|
|
|
const size_t MEM_SPLIT_THRESHOLD = 16;
|
|
|
-
|
|
|
+
|
|
|
// If the freelist is valid, let's allocate FROM the freelist then!
|
|
|
for (MemNode *inode = mempool->freeList.head; inode != NULL; inode = inode->next)
|
|
|
{
|
|
@@ -251,10 +251,10 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|
|
new_mem = inode;
|
|
|
(inode->prev != NULL)? (inode->prev->next = inode->next) : (mempool->freeList.head = inode->next);
|
|
|
(inode->next != NULL)? (inode->next->prev = inode->prev) : (mempool->freeList.tail = inode->prev);
|
|
|
-
|
|
|
+
|
|
|
if (mempool->freeList.head != NULL) mempool->freeList.head->prev = NULL;
|
|
|
else mempool->freeList.tail = NULL;
|
|
|
-
|
|
|
+
|
|
|
if (mempool->freeList.tail != NULL) mempool->freeList.tail->next = NULL;
|
|
|
mempool->freeList.len--;
|
|
|
break;
|
|
@@ -269,7 +269,7 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
if (new_mem == NULL)
|
|
|
{
|
|
|
// not enough memory to support the size!
|
|
@@ -279,13 +279,13 @@ void *MemPoolAlloc(MemPool *const mempool, const size_t size)
|
|
|
// Couldn't allocate from a freelist, allocate from available mempool.
|
|
|
// Subtract allocation size from the mempool.
|
|
|
mempool->stack.base -= ALLOC_SIZE;
|
|
|
-
|
|
|
+
|
|
|
// Use the available mempool space as the new node.
|
|
|
new_mem = (MemNode *)mempool->stack.base;
|
|
|
new_mem->size = ALLOC_SIZE;
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
// Visual of the allocation block.
|
|
|
// --------------
|
|
|
// | mem size | lowest addr of block
|
|
@@ -314,7 +314,7 @@ void *MemPoolRealloc(MemPool *const restrict mempool, void *ptr, const size_t si
|
|
|
MemNode *const node = (MemNode *)((uint8_t *)ptr - sizeof *node);
|
|
|
const size_t NODE_SIZE = sizeof *node;
|
|
|
uint8_t *const resized_block = MemPoolAlloc(mempool, size);
|
|
|
-
|
|
|
+
|
|
|
if (resized_block == NULL) return NULL;
|
|
|
else
|
|
|
{
|
|
@@ -329,16 +329,16 @@ void *MemPoolRealloc(MemPool *const restrict mempool, void *ptr, const size_t si
|
|
|
void MemPoolFree(MemPool *const restrict mempool, void *ptr)
|
|
|
{
|
|
|
if ((mempool == NULL) || (ptr == NULL) || ((uintptr_t)ptr - sizeof(MemNode) < (uintptr_t)mempool->stack.mem)) return;
|
|
|
- else
|
|
|
+ else
|
|
|
{
|
|
|
// Behind the actual pointer data is the allocation info.
|
|
|
MemNode *const mem_node = (MemNode *)((uint8_t *)ptr - sizeof *mem_node);
|
|
|
const size_t BUCKET_INDEX = (mem_node->size >> MEMPOOL_BUCKET_BITS) - 1;
|
|
|
-
|
|
|
+
|
|
|
// Make sure the pointer data is valid.
|
|
|
- if (((uintptr_t)mem_node < (uintptr_t)mempool->stack.base) ||
|
|
|
- (((uintptr_t)mem_node - (uintptr_t)mempool->stack.mem) > mempool->stack.size) ||
|
|
|
- (mem_node->size == 0UL) ||
|
|
|
+ if (((uintptr_t)mem_node < (uintptr_t)mempool->stack.base) ||
|
|
|
+ (((uintptr_t)mem_node - (uintptr_t)mempool->stack.mem) > mempool->stack.size) ||
|
|
|
+ (mem_node->size == 0UL) ||
|
|
|
(mem_node->size > mempool->stack.size)) return;
|
|
|
// If the mem_node is right at the stack base ptr, then add it to the stack.
|
|
|
else if ((uintptr_t)mem_node == (uintptr_t)mempool->stack.base)
|
|
@@ -362,13 +362,13 @@ void MemPoolFree(MemPool *const restrict mempool, void *ptr)
|
|
|
else /*if ((mempool->freeList.len == 0UL) || ((uintptr_t)mempool->freeList.head >= (uintptr_t)mempool->stack.mem && (uintptr_t)mempool->freeList.head - (uintptr_t)mempool->stack.mem < mempool->stack.size))*/
|
|
|
{
|
|
|
for (MemNode *n = mempool->freeList.head; n != NULL; n = n->next) if (n == mem_node) return;
|
|
|
-
|
|
|
+
|
|
|
// This code insertion sorts where largest size is last.
|
|
|
if (mempool->freeList.head == NULL)
|
|
|
{
|
|
|
mempool->freeList.head = mempool->freeList.tail = mem_node;
|
|
|
mempool->freeList.len++;
|
|
|
- }
|
|
|
+ }
|
|
|
else if (mempool->freeList.head->size >= mem_node->size)
|
|
|
{
|
|
|
mem_node->next = mempool->freeList.head;
|
|
@@ -383,7 +383,7 @@ void MemPoolFree(MemPool *const restrict mempool, void *ptr)
|
|
|
mempool->freeList.tail = mem_node;
|
|
|
mempool->freeList.len++;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
if (mempool->freeList.autoDefrag && (mempool->freeList.maxNodes != 0UL) && (mempool->freeList.len > mempool->freeList.maxNodes)) MemPoolDefrag(mempool);
|
|
|
}
|
|
|
}
|
|
@@ -392,7 +392,7 @@ void MemPoolFree(MemPool *const restrict mempool, void *ptr)
|
|
|
void MemPoolCleanUp(MemPool *const restrict mempool, void **ptrref)
|
|
|
{
|
|
|
if ((mempool == NULL) || (ptrref == NULL) || (*ptrref == NULL)) return;
|
|
|
- else
|
|
|
+ else
|
|
|
{
|
|
|
MemPoolFree(mempool, *ptrref);
|
|
|
*ptrref = NULL;
|
|
@@ -402,11 +402,11 @@ void MemPoolCleanUp(MemPool *const restrict mempool, void **ptrref)
|
|
|
size_t GetMemPoolFreeMemory(const MemPool mempool)
|
|
|
{
|
|
|
size_t total_remaining = (uintptr_t)mempool.stack.base - (uintptr_t)mempool.stack.mem;
|
|
|
-
|
|
|
+
|
|
|
for (MemNode *n=mempool.freeList.head; n != NULL; n = n->next) total_remaining += n->size;
|
|
|
-
|
|
|
+
|
|
|
for (size_t i=0; i<MEMPOOL_BUCKET_SIZE; i++) for (MemNode *n = mempool.buckets[i]; n != NULL; n = n->next) total_remaining += n->size;
|
|
|
-
|
|
|
+
|
|
|
return total_remaining;
|
|
|
}
|
|
|
|
|
@@ -423,12 +423,12 @@ bool MemPoolDefrag(MemPool *const mempool)
|
|
|
for (size_t i = 0; i < MEMPOOL_BUCKET_SIZE; i++) mempool->buckets[i] = NULL;
|
|
|
mempool->stack.base = mempool->stack.mem + mempool->stack.size;
|
|
|
return true;
|
|
|
- }
|
|
|
+ }
|
|
|
else
|
|
|
{
|
|
|
for (size_t i=0; i<MEMPOOL_BUCKET_SIZE; i++)
|
|
|
{
|
|
|
- while (mempool->buckets[i] != NULL)
|
|
|
+ while (mempool->buckets[i] != NULL)
|
|
|
{
|
|
|
if ((uintptr_t)mempool->buckets[i] == (uintptr_t)mempool->stack.base)
|
|
|
{
|
|
@@ -440,42 +440,42 @@ bool MemPoolDefrag(MemPool *const mempool)
|
|
|
else break;
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
const size_t PRE_DEFRAG_LEN = mempool->freeList.len;
|
|
|
MemNode **node = &mempool->freeList.head;
|
|
|
-
|
|
|
+
|
|
|
while (*node != NULL)
|
|
|
{
|
|
|
- if ((uintptr_t)*node == (uintptr_t)mempool->stack.base)
|
|
|
+ if ((uintptr_t)*node == (uintptr_t)mempool->stack.base)
|
|
|
{
|
|
|
// If node is right at the stack, merge it back into the stack.
|
|
|
mempool->stack.base += (*node)->size;
|
|
|
(*node)->size = 0UL;
|
|
|
((*node)->prev != NULL)? ((*node)->prev->next = (*node)->next) : (mempool->freeList.head = (*node)->next);
|
|
|
((*node)->next != NULL)? ((*node)->next->prev = (*node)->prev) : (mempool->freeList.tail = (*node)->prev);
|
|
|
-
|
|
|
+
|
|
|
if (mempool->freeList.head != NULL) mempool->freeList.head->prev = NULL;
|
|
|
else mempool->freeList.tail = NULL;
|
|
|
-
|
|
|
+
|
|
|
if (mempool->freeList.tail != NULL) mempool->freeList.tail->next = NULL;
|
|
|
mempool->freeList.len--;
|
|
|
node = &mempool->freeList.head;
|
|
|
- }
|
|
|
+ }
|
|
|
else if (((uintptr_t)*node + (*node)->size) == (uintptr_t)(*node)->next)
|
|
|
{
|
|
|
// Next node is at a higher address.
|
|
|
(*node)->size += (*node)->next->size;
|
|
|
(*node)->next->size = 0UL;
|
|
|
-
|
|
|
+
|
|
|
// <-[P Curr N]-> <-[P Next N]-> <-[P NextNext N]->
|
|
|
- //
|
|
|
+ //
|
|
|
// |--------------------|
|
|
|
// <-[P Curr N]-> <-[P Next N]-> [P NextNext N]->
|
|
|
if ((*node)->next->next != NULL) (*node)->next->next->prev = *node;
|
|
|
-
|
|
|
+
|
|
|
// <-[P Curr N]-> <-[P NextNext N]->
|
|
|
(*node)->next = (*node)->next->next;
|
|
|
-
|
|
|
+
|
|
|
mempool->freeList.len--;
|
|
|
node = &mempool->freeList.head;
|
|
|
}
|
|
@@ -484,16 +484,16 @@ bool MemPoolDefrag(MemPool *const mempool)
|
|
|
// Prev node is at a higher address.
|
|
|
(*node)->size += (*node)->prev->size;
|
|
|
(*node)->prev->size = 0UL;
|
|
|
-
|
|
|
+
|
|
|
// <-[P PrevPrev N]-> <-[P Prev N]-> <-[P Curr N]->
|
|
|
//
|
|
|
// |--------------------|
|
|
|
// <-[P PrevPrev N] <-[P Prev N]-> <-[P Curr N]->
|
|
|
(*node)->prev->prev->next = *node;
|
|
|
-
|
|
|
+
|
|
|
// <-[P PrevPrev N]-> <-[P Curr N]->
|
|
|
(*node)->prev = (*node)->prev->prev;
|
|
|
-
|
|
|
+
|
|
|
mempool->freeList.len--;
|
|
|
node = &mempool->freeList.head;
|
|
|
}
|
|
@@ -501,12 +501,12 @@ bool MemPoolDefrag(MemPool *const mempool)
|
|
|
{
|
|
|
// Next node is at a lower address.
|
|
|
(*node)->next->size += (*node)->size;
|
|
|
-
|
|
|
+
|
|
|
(*node)->size = 0UL;
|
|
|
(*node)->next->prev = (*node)->prev;
|
|
|
(*node)->prev->next = (*node)->next;
|
|
|
*node = (*node)->next;
|
|
|
-
|
|
|
+
|
|
|
mempool->freeList.len--;
|
|
|
node = &mempool->freeList.head;
|
|
|
}
|
|
@@ -514,21 +514,21 @@ bool MemPoolDefrag(MemPool *const mempool)
|
|
|
{
|
|
|
// Prev node is at a lower address.
|
|
|
(*node)->prev->size += (*node)->size;
|
|
|
-
|
|
|
+
|
|
|
(*node)->size = 0UL;
|
|
|
(*node)->next->prev = (*node)->prev;
|
|
|
(*node)->prev->next = (*node)->next;
|
|
|
*node = (*node)->prev;
|
|
|
-
|
|
|
+
|
|
|
mempool->freeList.len--;
|
|
|
node = &mempool->freeList.head;
|
|
|
- }
|
|
|
+ }
|
|
|
else
|
|
|
{
|
|
|
node = &(*node)->next;
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
return PRE_DEFRAG_LEN > mempool->freeList.len;
|
|
|
}
|
|
|
}
|
|
@@ -551,19 +551,19 @@ union ObjInfo {
|
|
|
ObjPool CreateObjPool(const size_t objsize, const size_t len)
|
|
|
{
|
|
|
ObjPool objpool = { 0 };
|
|
|
-
|
|
|
+
|
|
|
if ((len == 0UL) || (objsize == 0UL)) return objpool;
|
|
|
else
|
|
|
{
|
|
|
objpool.objSize = __AlignSize(objsize, sizeof(size_t));
|
|
|
objpool.stack.size = objpool.freeBlocks = len;
|
|
|
objpool.stack.mem = calloc(objpool.stack.size, objpool.objSize);
|
|
|
-
|
|
|
+
|
|
|
if (objpool.stack.mem == NULL)
|
|
|
{
|
|
|
objpool.stack.size = 0UL;
|
|
|
return objpool;
|
|
|
- }
|
|
|
+ }
|
|
|
else
|
|
|
{
|
|
|
for (size_t i=0; i<objpool.freeBlocks; i++)
|
|
@@ -571,7 +571,7 @@ ObjPool CreateObjPool(const size_t objsize, const size_t len)
|
|
|
union ObjInfo block = { .byte = &objpool.stack.mem[i*objpool.objSize] };
|
|
|
*block.index = i + 1;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
objpool.stack.base = objpool.stack.mem;
|
|
|
return objpool;
|
|
|
}
|
|
@@ -581,7 +581,7 @@ ObjPool CreateObjPool(const size_t objsize, const size_t len)
|
|
|
ObjPool CreateObjPoolFromBuffer(void *const buf, const size_t objsize, const size_t len)
|
|
|
{
|
|
|
ObjPool objpool = { 0 };
|
|
|
-
|
|
|
+
|
|
|
// If the object size isn't large enough to align to a size_t, then we can't use it.
|
|
|
if ((buf == NULL) || (len == 0UL) || (objsize < sizeof(size_t)) || (objsize*len != __AlignSize(objsize, sizeof(size_t))*len)) return objpool;
|
|
|
else
|
|
@@ -589,13 +589,13 @@ ObjPool CreateObjPoolFromBuffer(void *const buf, const size_t objsize, const siz
|
|
|
objpool.objSize = __AlignSize(objsize, sizeof(size_t));
|
|
|
objpool.stack.size = objpool.freeBlocks = len;
|
|
|
objpool.stack.mem = buf;
|
|
|
-
|
|
|
+
|
|
|
for (size_t i=0; i<objpool.freeBlocks; i++)
|
|
|
{
|
|
|
union ObjInfo block = { .byte = &objpool.stack.mem[i*objpool.objSize] };
|
|
|
*block.index = i + 1;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
objpool.stack.base = objpool.stack.mem;
|
|
|
return objpool;
|
|
|
}
|
|
@@ -623,7 +623,7 @@ void *ObjPoolAlloc(ObjPool *const objpool)
|
|
|
// ret = Head == ret = &pool[0];
|
|
|
union ObjInfo ret = { .byte = objpool->stack.base };
|
|
|
objpool->freeBlocks--;
|
|
|
-
|
|
|
+
|
|
|
// after allocating, we set head to the address of the index that *Head holds.
|
|
|
// Head = &pool[*Head * pool.objsize];
|
|
|
objpool->stack.base = (objpool->freeBlocks != 0UL)? objpool->stack.mem + (*ret.index*objpool->objSize) : NULL;
|