1
0
mirror of https://github.com/adtools/clib2.git synced 2025-12-08 14:59:05 +00:00

Added a slab allocator

Added a slab allocator which replaces the use of memory pools or the plain AllocMem() operations, respectively. In order to activate the slab allocator, choose a slab size (e.g. 2048 bytes or 4096 bytes) and declare a global variable like this:

   ULONG __slab_max_size = 2048;

Memory allocations smaller than the slab size will be made from "slabs", i.e. large chunks of memory of the given size. Larger allocations will be managed separately.
This commit is contained in:
obarthel
2016-11-18 17:22:21 +01:00
parent eb223f269d
commit fbc8694c49
22 changed files with 786 additions and 495 deletions

View File

@ -326,6 +326,7 @@ C_LIB = \
stdlib_dosbase.o \
stdlib_exit.o \
stdlib_free.o \
stdlib_free_unused_slabs.o \
stdlib_getdefstacksize.o \
stdlib_getenv.o \
stdlib_getmemstats.o \
@ -368,6 +369,7 @@ C_LIB = \
stdlib_set_process_window.o \
stdlib_shell_escape.o \
stdlib_showerror.o \
stdlib_slab.o \
stdlib_slab_max_size.o \
stdlib_srand.o \
stdlib_stacksize.o \
@ -1128,6 +1130,10 @@ $(LIBC_OBJS)/stdlib_free.o : stdlib_free.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_malloc.o : stdlib_malloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_slab.o : stdlib_slab.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "amiga.lib 1.206"
#define VSTRING "amiga.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "amiga.lib 1.207"
#define VSTRING "amiga.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "c.lib 1.206"
#define VSTRING "c.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: c.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "c.lib 1.207"
#define VSTRING "c.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207

View File

@ -1,3 +1,17 @@
c.lib 1.207 (18.11.2016)
- Added a slab allocator which replaces the use of memory pools or the
plain AllocMem() operations, respectively. In order to activate the
slab allocator, choose a slab size (e.g. 2048 bytes or 4096 bytes)
and declare a global variable like this:
ULONG __slab_max_size = 2048;
Memory allocations smaller than the slab size will be made from
"slabs", i.e. large chunks of memory of the given size. Larger
allocations will be managed separately.
m.lib 1.206 (24.4.2015)
- The fscanf() family failed to parse and convert %f parameters correctly

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "debug.lib 1.206"
#define VSTRING "debug.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "debug.lib 1.207"
#define VSTRING "debug.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207

View File

@ -212,6 +212,7 @@ C_LIB := \
stdlib_dosbase.o \
stdlib_exit.o \
stdlib_free.o \
stdlib_free_unused_slabs.o \
stdlib_getdefstacksize.o \
stdlib_getenv.o \
stdlib_getmemstats.o \
@ -255,6 +256,7 @@ C_LIB := \
stdlib_shared_objs.o \
stdlib_shell_escape.o \
stdlib_showerror.o \
stdlib_slab.o \
stdlib_slab_max_size.o \
stdlib_srand.o \
stdlib_stacksize.o \

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "m.lib 1.206"
#define VSTRING "m.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: m.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "m.lib 1.207"
#define VSTRING "m.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "m881.lib 1.206"
#define VSTRING "m881.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "m881.lib 1.207"
#define VSTRING "m881.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "net.lib 1.206"
#define VSTRING "net.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: net.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "net.lib 1.207"
#define VSTRING "net.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207

View File

@ -513,6 +513,7 @@ STDLIB_OBJ = \
stdlib_div.o \
stdlib_exit.o \
stdlib_free.o \
stdlib_free_unused_slabs.o \
stdlib_getdefstacksize.o \
stdlib_getenv.o \
stdlib_getmemstats.o \
@ -546,6 +547,7 @@ STDLIB_OBJ = \
stdlib_set_process_window.o \
stdlib_shell_escape.o \
stdlib_showerror.o \
stdlib_slab.o \
stdlib_slab_max_size.o \
stdlib_srand.o \
stdlib_arg.o \
@ -795,6 +797,10 @@ stdlib_free.o : stdlib_free.c stdlib_memory.h
stdlib_malloc.o : stdlib_malloc.c stdlib_memory.h
stdlib_slab.o : stdlib_slab.c stdlib_memory.h
stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h
stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h

View File

@ -367,111 +367,24 @@ remove_and_free_memory_node(struct MemoryNode * mn)
}
#endif /* __MEM_DEBUG */
/* Are we using the slab allocator? */
if (__slab_data.sd_InUse)
#if defined(__USE_SLAB_ALLOCATOR)
{
assert( __slab_data.sd_MaxSlabSize > 0 );
/* Number of bytes to allocate exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
{
Remove((struct Node *)&mn[-1]);
__slab_data.sd_NumSingleAllocations--;
}
/* Otherwise the allocation should have come from a slab. */
/* Are we using the slab allocator? */
if (__slab_data.sd_InUse)
__slab_free(mn,allocation_size);
else if (__memory_pool != NULL)
FreePooled(__memory_pool,mn,allocation_size);
else
{
struct MinList * slab_list = NULL;
size_t entry_size;
ULONG chunk_size;
int slab_index;
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* released. We end up picking the smallest chunk
* size that still works.
*/
for(slab_index = 0, chunk_size = (1UL << slab_index) ;
slab_index < 31 ;
slab_index++, chunk_size += chunk_size)
{
assert( (chunk_size % sizeof(LONG)) == 0);
if(entry_size <= chunk_size)
{
slab_list = &__slab_data.sd_Slabs[slab_index];
break;
}
}
/* Find the slab which contains the memory chunk. */
if(slab_list != NULL)
{
struct SlabNode * sn;
BYTE * first_byte;
BYTE * last_byte;
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
assert( sn->sn_ChunkSize == chunk_size );
first_byte = (BYTE *)&sn[1];
last_byte = &first_byte[__slab_data.sd_MaxSlabSize - chunk_size];
/* Is this where the chunk belongs? */
if(first_byte <= (BYTE *)mn && (BYTE *)mn <= last_byte)
{
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)mn);
assert( sn->sn_UseCount > 0 );
sn->sn_UseCount--;
/* If this slab is empty, mark it as unused and
* allow it to be purged.
*/
if(sn->sn_UseCount == 0)
{
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 1;
}
/* This slab now has room. Move it to front of the list
* so that searching for a free chunk will pick it
* first.
*/
if(sn != (struct SlabNode *)slab_list->mlh_Head)
{
Remove((struct Node *)sn);
AddHead((struct List *)slab_list, (struct Node *)sn);
}
break;
}
}
}
}
FreeMem(mn,allocation_size);
}
else if (__memory_pool != NULL)
#else
{
FreePooled(__memory_pool,mn,allocation_size);
}
else
{
FreeMem(mn,allocation_size);
if (__memory_pool != NULL)
FreePooled(__memory_pool,mn,allocation_size);
else
FreeMem(mn,allocation_size);
}
#endif /* __USE_SLAB_ALLOCATOR */
__current_memory_allocated -= allocation_size;
__current_num_memory_chunks_allocated--;

View File

@ -72,47 +72,6 @@ struct MinList NOCOMMON __memory_list;
/****************************************************************************/
struct SlabData NOCOMMON __slab_data;
/****************************************************************************/
/* Free all currently unused slabs, regardless of whether they
* are ready to be purged (SlabNode.sn_EmptyDecay == 0).
*/
void
__free_unused_slabs(void)
{
struct MinNode * free_node;
struct MinNode * free_node_next;
struct SlabNode * sn;
__memory_lock();
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
free_node->mln_Succ != NULL ;
free_node = free_node_next)
{
free_node_next = (struct MinNode *)free_node->mln_Succ;
/* free_node points to SlabNode.sn_EmptyLink, which
* directly follows the SlabNode.sn_MinNode.
*/
sn = (struct SlabNode *)&free_node[-1];
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs of the same size. */
Remove((struct Node *)sn);
FreeVec(sn);
}
__memory_unlock();
}
/****************************************************************************/
size_t
__get_allocation_size(size_t size)
{
@ -183,255 +142,50 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
}
#endif /* __MEM_DEBUG */
/* Are we using the slab allocator? */
if (__slab_data.sd_InUse)
#if defined(__USE_SLAB_ALLOCATOR)
{
mn = NULL;
assert( __slab_data.sd_MaxSlabSize > 0 );
/* Number of bytes to allocate exceeds the slab size?
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
/* Are we using the slab allocator? */
if (__slab_data.sd_InUse)
{
mn = __slab_allocate(allocation_size);
}
else if (__memory_pool != NULL)
{
mn = AllocPooled(__memory_pool,allocation_size);
}
else
{
struct MinNode * single_allocation;
#if defined(__amigaos4__)
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_PRIVATE);
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
#else
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_ANY);
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(single_allocation != NULL)
{
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
__slab_data.sd_NumSingleAllocations++;
mn = (struct MemoryNode *)&single_allocation[1];
}
}
/* Otherwise allocate a chunk from a slab. */
}
#else
{
if(__memory_pool != NULL)
{
mn = AllocPooled(__memory_pool,allocation_size);
}
else
{
struct MinList * slab_list = NULL;
ULONG entry_size;
ULONG chunk_size;
int slab_index;
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* allocated. We end up picking the smallest chunk
* size that still works.
*/
for(slab_index = 0, chunk_size = (1UL << slab_index) ;
slab_index < 31 ;
slab_index++, chunk_size += chunk_size)
#if defined(__amigaos4__)
{
assert( (chunk_size % sizeof(LONG)) == 0);
if(entry_size <= chunk_size)
{
slab_list = &__slab_data.sd_Slabs[slab_index];
break;
}
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
if(slab_list != NULL)
#else
{
struct SlabNode * sn;
/* Find the first slab which has a free chunk and use it. */
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
assert( sn->sn_ChunkSize == chunk_size );
mn = (struct MemoryNode *)RemHead((struct List *)&sn->sn_FreeList);
if(mn != NULL)
{
/* Was this slab empty before we began using it again? */
if(sn->sn_UseCount == 0)
{
/* Mark it as no longer empty. */
Remove((struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 0;
}
sn->sn_UseCount++;
/* Is this slab now fully utilized? Move it to the
* end of the queue so that it will not be checked
* before other slabs of the same size have been
* tested. Those at the front of the queue should
* still have room left.
*/
if(sn->sn_UseCount == sn->sn_Count && sn != (struct SlabNode *)slab_list->mlh_TailPred)
{
Remove((struct Node *)sn);
AddTail((struct List *)slab_list, (struct Node *)sn);
}
break;
}
}
/* There is no slab with a free chunk? Then we might have to
* allocate a new one.
*/
if(mn == NULL)
{
struct MinNode * free_node;
struct MinNode * free_node_next;
struct SlabNode * new_sn = NULL;
/* Try to recycle an empty (unused) slab, if possible. */
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
free_node->mln_Succ != NULL ;
free_node = free_node_next)
{
free_node_next = (struct MinNode *)free_node->mln_Succ;
/* free_node points to SlabNode.sn_EmptyLink, which
* directly follows the SlabNode.sn_MinNode.
*/
sn = (struct SlabNode *)&free_node[-1];
/* Is this empty slab ready to be reused? */
if(sn->sn_EmptyDecay == 0)
{
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs which keep chunks
* of the same size.
*/
Remove((struct Node *)sn);
new_sn = sn;
break;
}
}
/* We couldn't reuse an empty slab? Then we'll have to allocate
* memory for another one.
*/
if(new_sn == NULL)
{
#if defined(__amigaos4__)
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_PRIVATE);
}
#else
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_ANY);
}
#endif /* __amigaos4__ */
}
if(new_sn != NULL)
{
struct MinNode * free_chunk;
ULONG num_free_chunks = 0;
BYTE * first_byte;
BYTE * last_byte;
/* Split up the slab memory into individual chunks
* of the same size and keep track of them
* in the free list. The memory managed by
* this slab immediately follows the
* SlabNode header.
*/
first_byte = (BYTE *)&new_sn[1];
last_byte = &first_byte[__slab_data.sd_MaxSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
free_chunk = (struct MinNode *)(((BYTE *)free_chunk) + chunk_size))
{
AddTail((struct List *)&new_sn->sn_FreeList, (struct Node *)free_chunk);
num_free_chunks++;
}
/* Grab the first free chunk (there has to be one). */
mn = (struct MemoryNode *)RemHead((struct List *)&new_sn->sn_FreeList);
assert( mn != NULL );
/* Set up the new slab and put it where it belongs. */
new_sn->sn_EmptyDecay = 0;
new_sn->sn_UseCount = 1;
new_sn->sn_Count = num_free_chunks;
new_sn->sn_ChunkSize = chunk_size;
AddHead((struct List *)slab_list,(struct Node *)&new_sn);
}
/* Mark unused slabs for purging, and purge those which
* are ready to be purged.
*/
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
free_node->mln_Succ != NULL ;
free_node = free_node_next)
{
free_node_next = (struct MinNode *)free_node->mln_Succ;
/* free_node points to SlabNode.sn_EmptyLink, which
* directly follows the SlabNode.sn_MinNode.
*/
sn = (struct SlabNode *)&free_node[-1];
/* Is this empty slab ready to be purged? */
if(sn->sn_EmptyDecay == 0)
{
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs of the same size. */
Remove((struct Node *)sn);
FreeVec(sn);
}
/* Give it another chance. */
else
{
sn->sn_EmptyDecay--;
}
}
}
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
}
}
else if (__memory_pool != NULL)
{
mn = AllocPooled(__memory_pool,allocation_size);
}
else
{
#if defined(__amigaos4__)
{
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
#else
{
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
}
#endif /* __USE_SLAB_ALLOCATOR */
if(mn == NULL)
{
@ -626,67 +380,63 @@ STDLIB_DESTRUCTOR(stdlib_memory_exit)
}
#endif /* __MEM_DEBUG */
/* Is the slab memory allocator enabled? */
if (__slab_data.sd_InUse)
#if defined(__USE_SLAB_ALLOCATOR)
{
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
struct MinNode * mn_next;
int i;
/* Free the memory allocated for each slab. */
for(i = 0 ; i < 31 ; i++)
/* Is the slab memory allocator enabled? */
if (__slab_data.sd_InUse)
{
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = sn_next)
__slab_exit();
}
else
{
if (__memory_pool != NULL)
{
sn_next = (struct SlabNode *)sn->sn_MinNode.mln_Succ;
NewList((struct List *)&__memory_list);
FreeVec(sn);
DeletePool(__memory_pool);
__memory_pool = NULL;
}
else if (__memory_list.mlh_Head != NULL)
{
#ifdef __MEM_DEBUG
{
while(NOT IsListEmpty((struct List *)&__memory_list))
__free_memory_node((struct MemoryNode *)__memory_list.mlh_Head,__FILE__,__LINE__);
}
#else
{
while(NOT IsListEmpty((struct List *)&__memory_list))
__free_memory_node((struct MemoryNode *)__memory_list.mlh_Head,NULL,0);
}
#endif /* __MEM_DEBUG */
}
NewList((struct List *)&__slab_data.sd_Slabs[i]);
}
/* Free the memory allocated for each allocation which did not
* go into a slab.
*/
for(mn = __slab_data.sd_SingleAllocations.mlh_Head ; mn->mln_Succ != NULL ; mn = mn_next)
{
mn_next = mn->mln_Succ;
FreeVec(mn);
}
NewList((struct List *)&__slab_data.sd_SingleAllocations);
NewList((struct List *)&__slab_data.sd_EmptySlabs);
__slab_data.sd_InUse = FALSE;
}
else if (__memory_pool != NULL)
#else
{
NewList((struct List *)&__memory_list);
if (__memory_pool != NULL)
{
NewList((struct List *)&__memory_list);
DeletePool(__memory_pool);
__memory_pool = NULL;
}
else if (__memory_list.mlh_Head != NULL)
{
#ifdef __MEM_DEBUG
{
while(NOT IsListEmpty((struct List *)&__memory_list))
__free_memory_node((struct MemoryNode *)__memory_list.mlh_Head,__FILE__,__LINE__);
DeletePool(__memory_pool);
__memory_pool = NULL;
}
#else
else if (__memory_list.mlh_Head != NULL)
{
while(NOT IsListEmpty((struct List *)&__memory_list))
__free_memory_node((struct MemoryNode *)__memory_list.mlh_Head,NULL,0);
#ifdef __MEM_DEBUG
{
while(NOT IsListEmpty((struct List *)&__memory_list))
__free_memory_node((struct MemoryNode *)__memory_list.mlh_Head,__FILE__,__LINE__);
}
#else
{
while(NOT IsListEmpty((struct List *)&__memory_list))
__free_memory_node((struct MemoryNode *)__memory_list.mlh_Head,NULL,0);
}
#endif /* __MEM_DEBUG */
}
#endif /* __MEM_DEBUG */
}
#endif /* __USE_SLAB_ALLOCATOR */
#if defined(__THREAD_SAFE)
{
@ -722,42 +472,46 @@ STDLIB_CONSTRUCTOR(stdlib_memory_init)
NewList((struct List *)&__memory_list);
/* Enable the slab memory allocator? */
if(__slab_max_size > 0)
#if defined(__USE_SLAB_ALLOCATOR)
{
size_t size;
/* If the maximum allocation size to be made from the slab
* is not already a power of 2, round it up. We do not
* support allocations larger than 2^31, and the maximum
* allocation size should be much smaller.
*
* Note that the maximum allocation size also defines the
* amount of memory which each slab manages.
*/
size = sizeof(struct MinNode);
while(size < __slab_max_size && (size & 0x80000000) == 0)
size += size;
/* If the slab size looks sound, enable the slab memory allocator. */
if((size & 0x80000000) == 0)
/* ZZZ this is just for the purpose of testing */
#if 0
{
int i;
TEXT slab_size_var[20];
assert( size <= __slab_max_size );
if(GetVar("SLAB_SIZE", slab_size_var, sizeof(slab_size_var), 0) > 0)
{
LONG value;
/* Start with an empty slab list. */
for(i = 0 ; i < 31 ; i++)
NewList((struct List *)&__slab_data.sd_Slabs[i]);
if(StrToLong(slab_size_var,&value) > 0 && value > 0)
__slab_max_size = (size_t)value;
}
}
#endif
NewList((struct List *)&__slab_data.sd_SingleAllocations);
NewList((struct List *)&__slab_data.sd_EmptySlabs);
__slab_data.sd_MaxSlabSize = size;
__slab_data.sd_InUse = TRUE;
/* Enable the slab memory allocator? */
if(__slab_max_size > 0)
{
__slab_init(__slab_max_size);
}
else
{
#if defined(__amigaos4__)
{
__memory_pool = CreatePool(MEMF_PRIVATE,(ULONG)__default_pool_size,(ULONG)__default_puddle_size);
}
#else
{
/* There is no support for memory pools in the operating system
* prior to Kickstart 3.0 (V39).
*/
if(((struct Library *)SysBase)->lib_Version >= 39)
__memory_pool = CreatePool(MEMF_ANY,(ULONG)__default_pool_size,(ULONG)__default_puddle_size);
}
#endif /* __amigaos4__ */
}
}
else
#else
{
#if defined(__amigaos4__)
{
@ -773,6 +527,7 @@ STDLIB_CONSTRUCTOR(stdlib_memory_init)
}
#endif /* __amigaos4__ */
}
#endif /* __USE_SLAB_ALLOCATOR) */
success = TRUE;

View File

@ -58,6 +58,14 @@
*/
/*#define __USE_MEM_TREES*/
/****************************************************************************/
/*
* Uncomment this to enable the slab allocator.
*/
#define __USE_SLAB_ALLOCATOR
/****************************************************************************/
/*
@ -144,10 +152,6 @@ extern char * __getcwd(char * buffer,size_t buffer_size,const char *file,int lin
/****************************************************************************/
extern void __free_unused_slabs(void);
/****************************************************************************/
struct MemoryNode
{
struct MinNode mn_MinNode;
@ -285,6 +289,14 @@ extern ULONG NOCOMMON __slab_max_size;
/****************************************************************************/
extern void __free_unused_slabs(void);
extern void * __slab_allocate(size_t allocation_size);
extern void __slab_free(void * address,size_t allocation_size);
extern void __slab_init(size_t slab_size);
extern void __slab_exit(void);
/****************************************************************************/
extern struct MemoryTree NOCOMMON __memory_tree;
extern struct MinList NOCOMMON __memory_list;
extern APTR NOCOMMON __memory_pool;

583
library/stdlib_slab.c Normal file
View File

@ -0,0 +1,583 @@
/*
* :ts=4
*
* Portable ISO 'C' (1994) runtime library for the Amiga computer
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Neither the name of Olaf Barthel nor the names of contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*#define DEBUG*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
/****************************************************************************/
#ifndef _STDLIB_MEMORY_H
#include "stdlib_memory.h"
#endif /* _STDLIB_MEMORY_H */
/****************************************************************************/
struct SlabData NOCOMMON __slab_data;
/****************************************************************************/
void *
__slab_allocate(size_t allocation_size)
{
void * allocation = NULL;
D(("allocating %lu bytes of memory",allocation_size));
assert( __slab_data.sd_MaxSlabSize > 0 );
/* Number of bytes to allocate exceeds the slab size?
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
{
struct MinNode * single_allocation;
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_MaxSlabSize));
D(("allocating %ld (MinNode) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,sizeof(*single_allocation) + allocation_size));
#if defined(__amigaos4__)
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_PRIVATE);
}
#else
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(single_allocation != NULL)
{
D(("single allocation = 0x%08lx",allocation));
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
__slab_data.sd_NumSingleAllocations++;
allocation = &single_allocation[1];
D(("single allocation succeeded at 0x%08lx (number of single allocations = %lu)", allocation, __slab_data.sd_NumSingleAllocations));
}
else
{
D(("single allocation failed"));
}
}
/* Otherwise allocate a chunk from a slab. */
else
{
struct MinList * slab_list = NULL;
ULONG entry_size;
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_MaxSlabSize));
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* allocated. We end up picking the smallest chunk
* size that still works.
*/
for(slab_index = 2, chunk_size = (1UL << slab_index) ;
slab_index < 31 ;
slab_index++, chunk_size += chunk_size)
{
if(entry_size <= chunk_size)
{
D(("using slab #%ld (%lu bytes per chunk)", slab_index, chunk_size));
assert( (chunk_size % sizeof(LONG)) == 0 );
slab_list = &__slab_data.sd_Slabs[slab_index];
break;
}
}
if(slab_list != NULL)
{
struct SlabNode * sn;
SHOWVALUE(chunk_size);
/* Find the first slab which has a free chunk and use it. */
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
D(("slab = 0x%08lx, chunk size = %ld", sn, sn->sn_ChunkSize));
assert( sn->sn_ChunkSize == chunk_size );
allocation = (struct MemoryNode *)RemHead((struct List *)&sn->sn_FreeList);
if(allocation != NULL)
{
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,sn,sn->sn_UseCount));
/* Was this slab empty before we began using it again? */
if(sn->sn_UseCount == 0)
{
D(("slab is no longer empty"));
/* Mark it as no longer empty. */
Remove((struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 0;
}
sn->sn_UseCount++;
/* Is this slab now fully utilized? Move it to the
* end of the queue so that it will not be checked
* before other slabs of the same size have been
* tested. Those at the front of the queue should
* still have room left.
*/
if(sn->sn_UseCount == sn->sn_Count && sn != (struct SlabNode *)slab_list->mlh_TailPred)
{
D(("slab is full"));
Remove((struct Node *)sn);
AddTail((struct List *)slab_list, (struct Node *)sn);
}
break;
}
}
/* There is no slab with a free chunk? Then we might have to
* allocate a new one.
*/
if(allocation == NULL)
{
struct MinNode * free_node;
struct MinNode * free_node_next;
struct SlabNode * new_sn = NULL;
BOOL purge = FALSE;
D(("no slab is available which still has free room"));
/* Try to recycle an empty (unused) slab, if possible. */
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
free_node->mln_Succ != NULL ;
free_node = free_node_next)
{
free_node_next = (struct MinNode *)free_node->mln_Succ;
/* free_node points to SlabNode.sn_EmptyLink, which
* directly follows the SlabNode.sn_MinNode.
*/
sn = (struct SlabNode *)&free_node[-1];
/* Is this empty slab ready to be reused? */
if(sn->sn_EmptyDecay == 0)
{
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs which keep chunks
* of the same size.
*/
Remove((struct Node *)sn);
D(("reusing a slab"));
new_sn = sn;
break;
}
}
/* We couldn't reuse an empty slab? Then we'll have to allocate
* memory for another one.
*/
if(new_sn == NULL)
{
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_MaxSlabSize));
#if defined(__amigaos4__)
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_PRIVATE);
}
#else
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(new_sn == NULL)
D(("slab allocation failed"));
purge = TRUE;
}
if(new_sn != NULL)
{
struct MinNode * free_chunk;
ULONG num_free_chunks = 0;
BYTE * first_byte;
BYTE * last_byte;
D(("setting up slab 0x%08lx", new_sn));
assert( chunk_size <= __slab_data.sd_MaxSlabSize );
memset(new_sn,0,sizeof(*new_sn));
NewList((struct List *)&new_sn->sn_FreeList);
/* Split up the slab memory into individual chunks
* of the same size and keep track of them
* in the free list. The memory managed by
* this slab immediately follows the
* SlabNode header.
*/
first_byte = (BYTE *)&new_sn[1];
last_byte = &first_byte[__slab_data.sd_MaxSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
free_chunk = (struct MinNode *)(((BYTE *)free_chunk) + chunk_size))
{
AddTail((struct List *)&new_sn->sn_FreeList, (struct Node *)free_chunk);
num_free_chunks++;
}
D(("slab contains %lu chunks, %lu bytes each",num_free_chunks,chunk_size));
/* Grab the first free chunk (there has to be one). */
allocation = (struct MemoryNode *)RemHead((struct List *)&new_sn->sn_FreeList);
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,new_sn,new_sn->sn_UseCount+1));
assert( allocation != NULL );
/* Set up the new slab and put it where it belongs. */
new_sn->sn_EmptyDecay = 0;
new_sn->sn_UseCount = 1;
new_sn->sn_Count = num_free_chunks;
new_sn->sn_ChunkSize = chunk_size;
SHOWVALUE(new_sn->sn_ChunkSize);
AddHead((struct List *)slab_list,(struct Node *)new_sn);
}
/* Mark unused slabs for purging, and purge those which
* are ready to be purged.
*/
if(purge)
{
D(("purging empty slabs"));
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
free_node->mln_Succ != NULL ;
free_node = free_node_next)
{
free_node_next = (struct MinNode *)free_node->mln_Succ;
/* free_node points to SlabNode.sn_EmptyLink, which
* directly follows the SlabNode.sn_MinNode.
*/
sn = (struct SlabNode *)&free_node[-1];
/* Is this empty slab ready to be purged? */
if(sn->sn_EmptyDecay == 0)
{
D(("freeing empty slab"));
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs of the same size. */
Remove((struct Node *)sn);
FreeVec(sn);
}
/* Give it another chance. */
else
{
sn->sn_EmptyDecay--;
}
}
}
}
}
else
{
D(("no matching slab found"));
}
}
return(allocation);
}
/****************************************************************************/
void
__slab_free(void * address,size_t allocation_size)
{
D(("freeing allocation at 0x%08lx, %lu bytes",address,allocation_size));
assert( __slab_data.sd_MaxSlabSize > 0 );
/* Number of bytes allocated exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
{
struct MinNode * mn = address;
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_MaxSlabSize));
Remove((struct Node *)&mn[-1]);
FreeVec(&mn[-1]);
assert( __slab_data.sd_NumSingleAllocations > 0 );
__slab_data.sd_NumSingleAllocations--;
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
}
/* Otherwise the allocation should have come from a slab. */
else
{
struct MinList * slab_list = NULL;
size_t entry_size;
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_MaxSlabSize));
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* released. We end up picking the smallest chunk
* size that still works.
*/
for(slab_index = 2, chunk_size = (1UL << slab_index) ;
slab_index < 31 ;
slab_index++, chunk_size += chunk_size)
{
if(entry_size <= chunk_size)
{
D(("using slab #%ld (%ld bytes per chunk)", slab_index, chunk_size));
assert( (chunk_size % sizeof(LONG)) == 0 );
slab_list = &__slab_data.sd_Slabs[slab_index];
break;
}
}
/* Find the slab which contains the memory chunk. */
if(slab_list != NULL)
{
const size_t usable_range = __slab_data.sd_MaxSlabSize - chunk_size;
struct SlabNode * sn;
BYTE * first_byte;
BYTE * last_byte;
BOOL freed = FALSE;
assert( chunk_size <= __slab_data.sd_MaxSlabSize );
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
SHOWVALUE(sn->sn_ChunkSize);
assert( sn->sn_ChunkSize == chunk_size );
first_byte = (BYTE *)&sn[1];
last_byte = &first_byte[usable_range];
/* Is this where the chunk belongs? */
if(first_byte <= (BYTE *)address && (BYTE *)address <= last_byte)
{
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
assert( sn->sn_UseCount > 0 );
sn->sn_UseCount--;
/* If this slab is empty, mark it as unused and
* allow it to be purged.
*/
if(sn->sn_UseCount == 0)
{
D(("slab is now empty"));
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 1;
}
/* This slab now has room. Move it to front of the list
* so that searching for a free chunk will pick it
* first.
*/
if(sn != (struct SlabNode *)slab_list->mlh_Head)
{
D(("moving slab to the head of the list"));
Remove((struct Node *)sn);
AddHead((struct List *)slab_list, (struct Node *)sn);
}
freed = TRUE;
break;
}
}
if(!freed)
D(("allocation at 0x%08lx could not be freed",address));
}
else
{
D(("no matching slab found"));
}
}
}
/****************************************************************************/
void
__slab_init(size_t slab_size)
{
size_t size;
SETDEBUGLEVEL(2);
D(("slab_size = %ld",slab_size));
/* If the maximum allocation size to be made from the slab
* is not already a power of 2, round it up. We do not
* support allocations larger than 2^31, and the maximum
* allocation size should be much smaller.
*
* Note that the maximum allocation size also defines the
* amount of memory which each slab manages.
*/
size = sizeof(struct MinNode);
while(size < slab_size && (size & 0x80000000) == 0)
size += size;
D(("size = %lu",size));
/* If the slab size looks sound, enable the slab memory allocator. */
if((size & 0x80000000) == 0)
{
int i;
D(("activating slab allocator"));
assert( size <= slab_size );
/* Start with an empty list of slabs for each chunk size. */
for(i = 0 ; i < 31 ; i++)
NewList((struct List *)&__slab_data.sd_Slabs[i]);
NewList((struct List *)&__slab_data.sd_SingleAllocations);
NewList((struct List *)&__slab_data.sd_EmptySlabs);
__slab_data.sd_MaxSlabSize = size;
__slab_data.sd_InUse = TRUE;
}
}
/****************************************************************************/
void
__slab_exit(void)
{
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
struct MinNode * mn_next;
int i;
ENTER();
D(("freeing slabs"));
/* Free the memory allocated for each slab. */
for(i = 0 ; i < 31 ; i++)
{
if(__slab_data.sd_Slabs[i].mlh_Head->mln_Succ != NULL)
D(("freeing slab #%ld (%lu bytes per chunk)", i, (1UL << i)));
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = sn_next)
{
sn_next = (struct SlabNode *)sn->sn_MinNode.mln_Succ;
FreeVec(sn);
}
}
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
D(("freeing single allocations"));
/* Free the memory allocated for each allocation which did not
* go into a slab.
*/
for(mn = __slab_data.sd_SingleAllocations.mlh_Head ;
mn->mln_Succ != NULL ;
mn = mn_next)
{
mn_next = mn->mln_Succ;
FreeVec(mn);
}
__slab_data.sd_InUse = FALSE;
LEAVE();
}

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 206
#define DATE "24.4.2015"
#define VERS "unix.lib 1.206"
#define VSTRING "unix.lib 1.206 (24.4.2015)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.206 (24.4.2015)"
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "unix.lib 1.207"
#define VSTRING "unix.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.207 (18.11.2016)"

View File

@ -1 +1 @@
206
207