1
0
mirror of https://github.com/adtools/clib2.git synced 2025-12-08 14:59:05 +00:00

Slab allocator changes

If the first slab in the list of slabs which share the same chunk size has no more room, it means that all other slabs following it have no room either. This speeds up the test to find a slab with free space, which can now abort and directly proceed to allocate memory for a new slab.

If an empty slab's decay count hits zero, it is moved to the front of the empty slab list to be reclaimed more quickly.

Allocations made from the slab now carry a pointer back to the slab which they are a part of. This speeds up deallocation but has the downside of making the smallest usable slab chunk size 64 bytes, which is double what used to be the minimum before.
This commit is contained in:
Olaf Barthel
2016-11-22 11:07:38 +01:00
parent 1ea8953bd3
commit 0c5b88d2d3
23 changed files with 287 additions and 128 deletions

View File

@ -333,6 +333,7 @@ C_LIB = \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \
@ -1137,6 +1138,8 @@ $(LIBC_OBJS)/stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memo
$(LIBC_OBJS)/stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_get_slab_allocations.o : stdlib_get_slab_allocations.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "amiga.lib 1.209"
#define VSTRING "amiga.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "amiga.lib 1.210"
#define VSTRING "amiga.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "c.lib 1.209"
#define VSTRING "c.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "c.lib 1.210"
#define VSTRING "c.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -1,3 +1,24 @@
c.lib 1.210 (22.11.2016)
- Added __get_slab_allocations() function which will report information
about each memory allocation made by the slab allocator which does
not come from a slab.
- If the first slab in the list of slabs which share the same chunk
size has no more room, it means that all other slabs following
it have no room either. This speeds up the test to find a slab with
free space, which can now abort and directly proceed to allocate
memory for a new slab.
- If an empty slab's decay count hits zero, it is moved to the front
of the empty slab list to be reclaimed more quickly.
- Allocations made from the slab now carry a pointer back to the
slab which they are a part of. This speeds up deallocation but
has the downside of making the smallest usable slab chunk size
64 bytes, which is double what used to be the minimum before.
c.lib 1.209 (21.11.2016)
- The maximum slab size is now 2^17 bytes (= 131072). If you request

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "debug.lib 1.209"
#define VSTRING "debug.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "debug.lib 1.210"
#define VSTRING "debug.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -181,7 +181,8 @@ extern unsigned long __slab_max_size;
* following function to do so.
*
* Please note that this function works within the context of the memory
* allocation system and may not be safe to call from interrupt code.
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
extern void __free_unused_slabs(void);
@ -198,7 +199,8 @@ extern void __free_unused_slabs(void);
* not operational.
*
* Please note that this function works within the context of the memory
* allocation system and may not be safe to call from interrupt code.
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
/****************************************************************************/
@ -215,7 +217,8 @@ struct __slab_usage_information
size_t sui_num_single_allocations;
/* Total number of bytes allocated for memory not managed
* by slabs.
* by slabs. This includes the management overhead for
* each allocation.
*/
size_t sui_total_single_allocation_size;
@ -259,6 +262,68 @@ void __get_slab_usage(__slab_usage_callback callback);
/****************************************************************************/
/*
* You can obtain runtime statistics about the memory allocations
* which the slab allocator did not fit into slabs. This works
* just like __get_slab_usage() in that the callback function
* you provide will be called for each single allocation that
* is not part of a slab.
*
* Your callback function must return 0 if it wants to be called again,
* for the next slab, or return -1 to stop. Note that your callback
* function may not be called if the slab allocator did not
* allocate memory outside of slabs.
*
* Please note that this function works within the context of the memory
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
/* This is what your callback function will see when it is invoked. */
struct __slab_allocation_information
{
/* Number of allocations which are not managed by slabs, but
* are handled separate.
*/
size_t sai_num_single_allocations;
/* Total number of bytes allocated for memory not managed
* by slabs. This includes the management overhead for
* each allocation.
*/
size_t sai_total_single_allocation_size;
/*
* The following data is updated for each slab which
* your callback function sees.
*/
/* Index number of the allocation being reported (0 = no allocations
* outside of slabs are in use).
*/
int sai_allocation_index;
/* Size of this allocation, as requested by the program which
* called malloc(), realloc() or alloca().
*/
size_t sai_allocation_size;
/* Total size of this allocation, including management data
* structure overhead.
*/
size_t sai_total_allocation_size;
};
/****************************************************************************/
typedef int (*__slab_allocation_callback)(const struct __slab_allocation_information * sui);
/****************************************************************************/
void __get_slab_allocations(__slab_allocation_callback callback);
/****************************************************************************/
/*
* You can request to use the alloca() variant that actually does allocate
* memory from the system rather than the current stack frame, which will

View File

@ -219,6 +219,7 @@ C_LIB := \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "m.lib 1.209"
#define VSTRING "m.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m.lib 1.210"
#define VSTRING "m.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "m881.lib 1.209"
#define VSTRING "m881.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m881.lib 1.210"
#define VSTRING "m881.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "net.lib 1.209"
#define VSTRING "net.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "net.lib 1.210"
#define VSTRING "net.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -520,6 +520,7 @@ STDLIB_OBJ = \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_ldiv.o \

View File

@ -47,7 +47,7 @@ __get_slab_usage(__slab_usage_callback callback)
if(__slab_data.sd_InUse)
{
struct __slab_usage_information sui;
struct SlabNode * sn;
const struct SlabNode * sn;
BOOL stop;
int i;
@ -98,6 +98,10 @@ __get_slab_usage(__slab_usage_callback callback)
}
}
}
else
{
(*callback)(&sui);
}
__memory_unlock();
}

View File

@ -234,6 +234,15 @@ struct SlabNode
struct MinList sn_FreeList;
};
/* Memory allocations which are not part of a slab are
* tracked using this data structure.
*/
struct SlabSingleAllocation
{
struct MinNode ssa_MinNode;
ULONG ssa_Size;
};
/* This is the global bookkeeping information for managing
* memory allocations from the slab data structure.
*/

View File

@ -47,9 +47,17 @@ struct SlabData NOCOMMON __slab_data;
/****************************************************************************/
struct SlabChunk
{
struct SlabNode * sc_Parent;
};
/****************************************************************************/
void *
__slab_allocate(size_t allocation_size)
{
struct SlabChunk * chunk;
void * allocation = NULL;
D(("allocating %lu bytes of memory",allocation_size));
@ -60,33 +68,36 @@ __slab_allocate(size_t allocation_size)
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size > __slab_data.sd_StandardSlabSize)
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
{
struct MinNode * single_allocation;
struct SlabSingleAllocation * single_allocation;
ULONG total_single_allocation_size = sizeof(*single_allocation) + allocation_size;
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_StandardSlabSize));
D(("allocating %ld (MinNode) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,sizeof(*single_allocation) + allocation_size));
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,total_single_allocation_size));
#if defined(__amigaos4__)
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_PRIVATE);
single_allocation = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
}
#else
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_ANY);
single_allocation = AllocMem(total_single_allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(single_allocation != NULL)
{
single_allocation->ssa_Size = total_single_allocation_size;
allocation = &single_allocation[1];
D(("single allocation = 0x%08lx",allocation));
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
__slab_data.sd_NumSingleAllocations++;
__slab_data.sd_TotalSingleAllocationSize += sizeof(*single_allocation) + allocation_size;
allocation = &single_allocation[1];
__slab_data.sd_TotalSingleAllocationSize += total_single_allocation_size;
D(("single allocation succeeded at 0x%08lx (number of single allocations = %lu)", allocation, __slab_data.sd_NumSingleAllocations));
}
@ -113,6 +124,11 @@ __slab_allocate(size_t allocation_size)
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* allocated. We end up picking the smallest chunk
@ -139,18 +155,28 @@ __slab_allocate(size_t allocation_size)
SHOWVALUE(chunk_size);
/* Find the first slab which has a free chunk and use it. */
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
/* The slab list is organized in such a way that the first
* entry always has a free chunk ready for allocation. If
* there is no such free chunk, it means that no other
* slab nodes in this list have any free chunks.
*/
sn = (struct SlabNode *)slab_list->mlh_Head;
/* Make sure that the slab list is not empty. */
if(sn->sn_MinNode.mln_Succ != NULL)
{
D(("slab = 0x%08lx, chunk size = %ld", sn, sn->sn_ChunkSize));
assert( sn->sn_ChunkSize == chunk_size );
allocation = (struct MemoryNode *)RemHead((struct List *)&sn->sn_FreeList);
if(allocation != NULL)
chunk = (struct SlabChunk *)RemHead((struct List *)&sn->sn_FreeList);
if(chunk != NULL)
{
/* Keep track of this chunk's parent slab. */
chunk->sc_Parent = sn;
allocation = &chunk[1];
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,sn,sn->sn_UseCount));
/* Was this slab empty before we began using it again? */
@ -178,8 +204,6 @@ __slab_allocate(size_t allocation_size)
Remove((struct Node *)sn);
AddTail((struct List *)slab_list, (struct Node *)sn);
}
break;
}
}
@ -283,12 +307,17 @@ __slab_allocate(size_t allocation_size)
D(("slab contains %lu chunks, %lu bytes each",num_free_chunks,chunk_size));
/* Grab the first free chunk (there has to be one). */
allocation = (struct MemoryNode *)RemHead((struct List *)&new_sn->sn_FreeList);
chunk = (struct SlabChunk *)RemHead((struct List *)&new_sn->sn_FreeList);
assert( chunk != NULL );
/* Keep track of this chunk's parent slab. */
chunk->sc_Parent = new_sn;
allocation = &chunk[1];
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,new_sn,new_sn->sn_UseCount+1));
assert( allocation != NULL );
/* Set up the new slab and put it where it belongs. */
new_sn->sn_EmptyDecay = 0;
new_sn->sn_UseCount = 1;
@ -335,6 +364,19 @@ __slab_allocate(size_t allocation_size)
else
{
sn->sn_EmptyDecay--;
/* Is this slab ready for reuse now? */
if(sn->sn_EmptyDecay == 0)
{
/* Move it to the front of the list, so that
* will be collected as soon as possible.
*/
if(free_node != (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head)
{
Remove((struct Node *)free_node);
AddHead((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)free_node);
}
}
}
}
}
@ -354,6 +396,8 @@ __slab_allocate(size_t allocation_size)
void
__slab_free(void * address,size_t allocation_size)
{
struct SlabChunk * chunk;
D(("freeing allocation at 0x%08lx, %lu bytes",address,allocation_size));
assert( __slab_data.sd_StandardSlabSize > 0 );
@ -361,23 +405,33 @@ __slab_free(void * address,size_t allocation_size)
/* Number of bytes allocated exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size > __slab_data.sd_StandardSlabSize)
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
{
struct MinNode * mn = address;
struct SlabSingleAllocation * single_allocation = address;
ULONG size;
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_StandardSlabSize));
Remove((struct Node *)&mn[-1]);
/* Management information (MinNode linkage, size in bytes) precedes
* the address returned by malloc(), etc.
*/
single_allocation--;
FreeVec(&mn[-1]);
size = single_allocation->ssa_Size;
assert( sizeof(*single_allocation) + allocation_size == size );
Remove((struct Node *)single_allocation);
FreeMem(single_allocation, size);
assert( __slab_data.sd_NumSingleAllocations > 0 );
__slab_data.sd_NumSingleAllocations--;
assert( __slab_data.sd_TotalSingleAllocationSize <= sizeof(*mn) + allocation_size );
assert( size <= __slab_data.sd_TotalSingleAllocationSize );
__slab_data.sd_TotalSingleAllocationSize -= sizeof(*mn) + allocation_size;
__slab_data.sd_TotalSingleAllocationSize -= size;
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
}
@ -399,6 +453,11 @@ __slab_free(void * address,size_t allocation_size)
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* released. We end up picking the smallest chunk
@ -419,69 +478,56 @@ __slab_free(void * address,size_t allocation_size)
}
}
/* Find the slab which contains the memory chunk. */
/* Pick the slab which contains the memory chunk. */
if(slab_list != NULL)
{
const size_t usable_range = __slab_data.sd_StandardSlabSize - chunk_size;
struct SlabNode * sn;
BYTE * first_byte;
BYTE * last_byte;
BOOL freed = FALSE;
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
/* The pointer back to the slab which this chunk belongs
* to precedes the address which __slab_allocate()
* returned.
*/
chunk = address;
chunk--;
sn = chunk->sc_Parent;
SHOWVALUE(sn->sn_ChunkSize);
assert( sn->sn_ChunkSize == chunk_size );
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
assert( sn->sn_UseCount > 0 );
sn->sn_UseCount--;
/* If this slab is empty, mark it as unused and
* allow it to be purged.
*/
if(sn->sn_UseCount == 0)
{
SHOWVALUE(sn->sn_ChunkSize);
D(("slab is now empty"));
assert( sn->sn_ChunkSize == chunk_size );
first_byte = (BYTE *)&sn[1];
last_byte = &first_byte[usable_range];
/* Is this where the chunk belongs? */
if(first_byte <= (BYTE *)address && (BYTE *)address <= last_byte)
{
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
assert( sn->sn_UseCount > 0 );
sn->sn_UseCount--;
/* If this slab is empty, mark it as unused and
* allow it to be purged.
*/
if(sn->sn_UseCount == 0)
{
D(("slab is now empty"));
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 1;
}
/* This slab now has room. Move it to front of the list
* so that searching for a free chunk will pick it
* first.
*/
if(sn != (struct SlabNode *)slab_list->mlh_Head)
{
D(("moving slab to the head of the list"));
Remove((struct Node *)sn);
AddHead((struct List *)slab_list, (struct Node *)sn);
}
freed = TRUE;
break;
}
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 1;
}
if(!freed)
D(("allocation at 0x%08lx could not be freed",address));
/* This slab now has room. Move it to front of the list
* so that searching for a free chunk will pick it
* first.
*/
if(sn != (struct SlabNode *)slab_list->mlh_Head)
{
D(("moving slab to the head of the list"));
Remove((struct Node *)sn);
AddHead((struct List *)slab_list, (struct Node *)sn);
}
}
else
{
@ -554,6 +600,7 @@ __slab_exit(void)
if(__slab_data.sd_InUse)
{
struct SlabSingleAllocation * single_allocation;
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
@ -590,7 +637,9 @@ __slab_exit(void)
{
mn_next = mn->mln_Succ;
FreeVec(mn);
single_allocation = (struct SlabSingleAllocation *)mn;
FreeMem(single_allocation, single_allocation->ssa_Size);
}
__slab_data.sd_InUse = FALSE;

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "unix.lib 1.209"
#define VSTRING "unix.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.209 (21.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "unix.lib 1.210"
#define VSTRING "unix.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.210 (22.11.2016)"

View File

@ -1 +1 @@
209
210

View File

@ -31,7 +31,7 @@ WARNINGS = \
INCLUDE = -I../library/include
LIB = -L../library/lib
OPTIONS = -DNDEBUG -fno-builtin -fwritable-strings -DNO_INLINE_STDARG -DIEEE_FLOATING_POINT_SUPPORT
OPTIONS = -DNDEBUG -fno-builtin -fwritable-strings -DNO_INLINE_STDARG -DIEEE_FLOATING_POINT_SUPPORT -DVERBOSE
#OPTIONS = -D__MEM_DEBUG -fno-builtin
#OPTIONS = -DDEBUG -D__MEM_DEBUG -DNO_INLINE_STDARG -fno-builtin
OPTIMIZE = -O
@ -49,13 +49,15 @@ LIBS = -lm -lc -lgcc
all: test fgets_test iotest sscanf_test printf_test sprintf_test \
stack_size_test translate_test strtok_test uname simple \
fstat_stdout_test simple_sprintf date_test sscanf_64 factorial \
execvp_test setlocale rand fstat_test base_dir_nametest
execvp_test setlocale rand fstat_test base_dir_nametest \
malloc-test
clean:
$(DELETE) #?.o #?.map test fgets_test iotest sscanf_test printf_test \
sprintf_test stack_size_test translate_test strtok_test uname \
simple fstat_stdout_test fstat_test simple_sprintf date_test sscanf_64 \
factorial execvp_test setlocale rand base_dir_nametest
factorial execvp_test setlocale rand base_dir_nametest \
malloc-test
##############################################################################
@ -143,6 +145,10 @@ rand : rand.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ rand.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
malloc-test: malloc-test.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ rand.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
##############################################################################
mkid: