1
0
mirror of https://github.com/adtools/clib2.git synced 2025-12-08 14:59:05 +00:00

Slab allocator update

Added more consistency checking to the slab allocator, which is built if DEBUG is defined in "stdlib_slab.c".

Memory allocations are no longer guaranteed to be aligned to 64 bit word boundaries. In fact, this has not even worked reliably in the past 10 years.

Memory allocation request sizes are now rounded to multiples of 32 bit words (the size of an address pointer) instead to the size of a 64 bit word.

Reduced the memory footprint of the memory allocation management data structures by reusing the most significant bit of the memory allocation size. This allows many more allocations to fit into the 32 byte chunk slabs, but limits the maximum memory allocation size to a little less than 2 GBytes.

Added integer overflow checks to the memory management code.

Reduced the memory management overhead further. This cuts an additional 8 bytes per allocation, unless neither the slab allocator nor memory pools are available. With this reduction the slab allocator is able to use 16 byte chunks, which cover memory allocation requests of 1..8 bytes.

Fixed a bug caused by returning an allocation back to a slab which passed the wrong pointer.
This commit is contained in:
Olaf Barthel
2016-11-23 16:37:46 +01:00
parent f8cf752e6a
commit d2acae7cd7
19 changed files with 375 additions and 155 deletions

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "amiga.lib 1.210"
#define VSTRING "amiga.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "amiga.lib 1.211"
#define VSTRING "amiga.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "c.lib 1.210"
#define VSTRING "c.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "c.lib 1.211"
#define VSTRING "c.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211

View File

@ -1,3 +1,34 @@
c.lib 1.211 (23.11.2016)
- Added more consistency checking to the slab allocator, which is
built if DEBUG is defined in "stdlib_slab.c".
- Memory allocations are no longer guaranteed to be aligned to
64 bit word boundaries. In fact, this has not even worked
reliably in the past 10 years.
- Memory allocation request sizes are now rounded to multiples of
32 bit words (the size of an address pointer) instead to the
size of a 64 bit word.
- Reduced the memory footprint of the memory allocation management
data structures by reusing the most significant bit of the
memory allocation size. This allows many more allocations to fit
into the 32 byte chunk slabs, but limits the maximum memory
allocation size to a little less than 2 GBytes.
- Added integer overflow checks to the memory management code.
- Reduced the memory management overhead further. This cuts an
additional 8 bytes per allocation, unless neither the slab
allocator nor memory pools are available. With this reduction
the slab allocator is able to use 16 byte chunks, which cover
memory allocation requests of 1..8 bytes.
- Fixed a bug caused by returning an allocation back to a slab
which passed the wrong pointer.
c.lib 1.210 (22.11.2016)
- Added __get_slab_allocations() function which will report information

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "debug.lib 1.210"
#define VSTRING "debug.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "debug.lib 1.211"
#define VSTRING "debug.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m.lib 1.210"
#define VSTRING "m.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "m.lib 1.211"
#define VSTRING "m.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m881.lib 1.210"
#define VSTRING "m881.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "m881.lib 1.211"
#define VSTRING "m881.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "net.lib 1.210"
#define VSTRING "net.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "net.lib 1.211"
#define VSTRING "net.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211

View File

@ -31,6 +31,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*#define DEBUG*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
@ -165,7 +167,7 @@ dump_memory(unsigned char * m,int size,int ignore)
STATIC VOID
check_memory_node(struct MemoryNode * mn,const char * file,int line)
{
size_t size = mn->mn_Size;
ULONG size = GET_MN_SIZE(mn);
unsigned char * head = (unsigned char *)(mn + 1);
unsigned char * body = head + MALLOC_HEAD_SIZE;
unsigned char * tail = body + size;
@ -227,10 +229,12 @@ check_memory_node(struct MemoryNode * mn,const char * file,int line)
if(mn->mn_AlreadyFree)
{
for(i = 0 ; i < size ; i++)
ULONG j;
for(j = 0 ; j < size ; j++)
{
if(body[i] != MALLOC_FREE_FILL)
max_body_damage = i+1;
if(body[j] != MALLOC_FREE_FILL)
max_body_damage = j+1;
}
if(max_body_damage > 0)
@ -345,17 +349,17 @@ remove_and_free_memory_node(struct MemoryNode * mn)
__memory_lock();
#if defined(__MEM_DEBUG)
{
Remove((struct Node *)mn);
#if defined(__USE_MEM_TREES) && defined(__MEM_DEBUG)
#if defined(__USE_MEM_TREES)
{
__red_black_tree_remove(&__memory_tree,mn);
}
#endif /* __USE_MEM_TREES && __MEM_DEBUG */
#endif /* __USE_MEM_TREES */
#ifdef __MEM_DEBUG
{
allocation_size = sizeof(*mn) + MALLOC_HEAD_SIZE + mn->mn_Size + MALLOC_TAIL_SIZE;
allocation_size = sizeof(*mn) + MALLOC_HEAD_SIZE + GET_MN_SIZE(mn) + MALLOC_TAIL_SIZE;
assert( allocation_size == mn->mn_AllocationSize );
@ -363,7 +367,7 @@ remove_and_free_memory_node(struct MemoryNode * mn)
}
#else
{
allocation_size = sizeof(*mn) + mn->mn_Size;
allocation_size = sizeof(*mn) + GET_MN_SIZE(mn);
}
#endif /* __MEM_DEBUG */
@ -371,19 +375,57 @@ remove_and_free_memory_node(struct MemoryNode * mn)
{
/* Are we using the slab allocator? */
if (__slab_data.sd_InUse)
{
__slab_free(mn,allocation_size);
}
else if (__memory_pool != NULL)
{
FreePooled(__memory_pool,mn,allocation_size);
}
else
{
#if defined(__MEM_DEBUG)
{
FreeMem(mn,allocation_size);
}
#else
{
struct MinNode * mln = (struct MinNode *)mn;
mln--;
Remove((struct Node *)mln);
FreeMem(mln,sizeof(*mln) + allocation_size);
}
#endif /* __MEM_DEBUG */
}
}
#else
{
if (__memory_pool != NULL)
{
FreePooled(__memory_pool,mn,allocation_size);
}
else
{
#if defined(__MEM_DEBUG)
{
FreeMem(mn,allocation_size);
}
#else
{
struct MinNode * mln = (struct MinNode *)mn;
mln--;
Remove((struct Node *)mln);
FreeMem(mln,sizeof(*mln) + allocation_size);
}
#endif /* __MEM_DEBUG */
}
}
#endif /* __USE_SLAB_ALLOCATOR */
__current_memory_allocated -= allocation_size;
@ -401,7 +443,7 @@ __free_memory_node(struct MemoryNode * mn,const char * UNUSED file,int UNUSED li
#ifdef __MEM_DEBUG
{
size_t size = mn->mn_Size;
ULONG size = GET_MN_SIZE(mn);
check_memory_node(mn,file,line);
@ -409,7 +451,7 @@ __free_memory_node(struct MemoryNode * mn,const char * UNUSED file,int UNUSED li
{
#ifdef __MEM_DEBUG_LOG
{
kprintf("[%s] - %10ld 0x%08lx [",__program_name,mn->mn_Size,mn->mn_Allocation);
kprintf("[%s] - %10ld 0x%08lx [",__program_name,size,mn->mn_Allocation);
if(mn->mn_File != NULL)
kprintf("allocated at %s:%ld, ",mn->mn_File,mn->mn_Line);
@ -436,14 +478,14 @@ __free_memory_node(struct MemoryNode * mn,const char * UNUSED file,int UNUSED li
{
#ifdef __MEM_DEBUG_LOG
{
kprintf("[%s] - %10ld 0x%08lx [",__program_name,mn->mn_Size,mn->mn_Allocation);
kprintf("[%s] - %10ld 0x%08lx [",__program_name,size,mn->mn_Allocation);
kprintf("FAILED]\n");
}
#endif /* __MEM_DEBUG_LOG */
kprintf("[%s] %s:%ld:Allocation at address 0x%08lx, size %ld",
__program_name,file,line,mn->mn_Allocation,mn->mn_Size);
__program_name,file,line,mn->mn_Allocation,size);
if(mn->mn_File != NULL)
kprintf(", allocated at %s:%ld",mn->mn_File,mn->mn_Line);
@ -467,6 +509,9 @@ __free_memory(void * ptr,BOOL force,const char * file,int line)
assert(ptr != NULL);
SHOWPOINTER(ptr);
SHOWVALUE(force);
#ifdef __MEM_DEBUG
{
/*if((rand() % 16) == 0)
@ -480,7 +525,7 @@ __free_memory(void * ptr,BOOL force,const char * file,int line)
{
if(mn != NULL)
{
if(force || (NOT mn->mn_NeverFree))
if(force || FLAG_IS_CLEAR(mn->mn_Size, MN_SIZE_NEVERFREE))
__free_memory_node(mn,file,line);
}
else
@ -502,7 +547,9 @@ __free_memory(void * ptr,BOOL force,const char * file,int line)
{
assert( mn != NULL );
if(mn != NULL && (force || (NOT mn->mn_NeverFree)))
SHOWVALUE(mn->mn_Size);
if(mn != NULL && (force || FLAG_IS_CLEAR(mn->mn_Size, MN_SIZE_NEVERFREE)))
__free_memory_node(mn,file,line);
}
#endif /* __MEM_DEBUG */

View File

@ -31,6 +31,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*#define DEBUG*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
@ -72,25 +74,6 @@ struct MinList NOCOMMON __memory_list;
/****************************************************************************/
size_t
__get_allocation_size(size_t size)
{
#ifndef __MEM_DEBUG
{
size_t total_allocation_size;
total_allocation_size = sizeof(struct MemoryNode) + size;
/* Round up the allocation size to the physical allocation granularity. */
size += ((total_allocation_size + MEM_BLOCKMASK) & ~((ULONG)MEM_BLOCKMASK)) - total_allocation_size;
}
#endif /* __MEM_DEBUG */
return(size);
}
/****************************************************************************/
void *
__allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_name,int UNUSED debug_line_number)
{
@ -135,13 +118,32 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
#else
{
/* Round up the allocation size to the physical allocation granularity. */
size = __get_allocation_size(size);
/* Round up allocation to a multiple of 32 bits. */
if((size & 3) != 0)
size += 4 - (size & 3);
allocation_size = sizeof(*mn) + size;
}
#endif /* __MEM_DEBUG */
/* Integer overflow has occured? */
if(size == 0 || allocation_size < size)
{
__set_errno(ENOMEM);
goto out;
}
/* We reuse the MemoryNode.mn_Size field to mark
* allocations are not suitable for use with
* free() and realloc(). This limits allocation
* sizes to a little less than 2 GBytes.
*/
if(allocation_size & MN_SIZE_NEVERFREE)
{
__set_errno(ENOMEM);
goto out;
}
#if defined(__USE_SLAB_ALLOCATOR)
{
/* Are we using the slab allocator? */
@ -155,15 +157,27 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
else
{
#if defined(__amigaos4__)
{
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
#else
#ifdef __MEM_DEBUG
{
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
#else
{
struct MinNode * mln;
mln = AllocMem(sizeof(*mln) + allocation_size,MEMF_ANY);
if(mln != NULL)
{
AddTail((struct List *)&__memory_list,(struct Node *)mln);
mn = (struct MemoryNode *)&mln[1];
}
else
{
mn = NULL;
}
}
#endif /* __MEM_DEBUG */
}
}
#else
@ -174,15 +188,27 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
else
{
#if defined(__amigaos4__)
{
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
#else
#ifdef __MEM_DEBUG
{
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
#else
{
struct MinNode * mln;
mln = AllocMem(sizeof(*mln) + allocation_size,MEMF_ANY);
if(mln != NULL)
{
AddTail((struct List *)&__memory_list,(struct Node *)mln);
mn = (struct MemoryNode *)&mln[1];
}
else
{
mn = NULL;
}
}
#endif /* __MEM_DEBUG */
}
}
#endif /* __USE_SLAB_ALLOCATOR */
@ -194,9 +220,9 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
mn->mn_Size = size;
mn->mn_NeverFree = never_free;
AddTail((struct List *)&__memory_list,(struct Node *)mn);
if(never_free)
SET_FLAG(mn->mn_Size, MN_SIZE_NEVERFREE);
__current_memory_allocated += allocation_size;
if(__maximum_memory_allocated < __current_memory_allocated)
@ -212,6 +238,8 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
char * body = head + MALLOC_HEAD_SIZE;
char * tail = body + size;
AddTail((struct List *)&__memory_list,(struct Node *)mn);
mn->mn_AlreadyFree = FALSE;
mn->mn_Allocation = body;
mn->mn_AllocationSize = allocation_size;

View File

@ -124,7 +124,6 @@
#define __find_memory_node __find_memory_node_debug
#define __free_memory_node __free_memory_node_debug
#define __get_allocation_size __get_allocation_size_debug
#define __allocate_memory __allocate_memory_debug
#define __memory_pool __memory_pool_debug
@ -151,16 +150,24 @@ extern char * __getcwd(char * buffer,size_t buffer_size,const char *file,int lin
/****************************************************************************/
/* If this flag is set in mn_Size, then this memory allocation
* cannot be released with free() or used with realloc(). This
* flag is set by alloca().
*/
#define MN_SIZE_NEVERFREE (0x80000000UL)
/* This obtains the allocation size from a memory node, ignoring
* the "never free" flag altogether.
*/
#define GET_MN_SIZE(mn) ((mn)->mn_Size & ~MN_SIZE_NEVERFREE)
struct MemoryNode
{
struct MinNode mn_MinNode;
size_t mn_Size;
UBYTE mn_NeverFree;
#ifdef __MEM_DEBUG
struct MinNode mn_MinNode;
UBYTE mn_AlreadyFree;
UBYTE mn_Pad0[2];
UBYTE mn_Pad0[3];
void * mn_Allocation;
size_t mn_AllocationSize;
@ -179,9 +186,9 @@ struct MemoryNode
UBYTE mn_Pad1[3];
#endif /* __USE_MEM_TREES */
#else
UBYTE mn_Pad0[3];
#endif /* __MEM_DEBUG */
ULONG mn_Size;
};
#ifdef __USE_MEM_TREES

View File

@ -59,42 +59,57 @@ __slab_allocate(size_t allocation_size)
{
struct SlabChunk * chunk;
void * allocation = NULL;
size_t allocation_size_with_chunk_header;
D(("allocating %lu bytes of memory",allocation_size));
assert( __slab_data.sd_StandardSlabSize > 0 );
/* Check for integer overflow. */
allocation_size_with_chunk_header = sizeof(*chunk) + allocation_size;
if(allocation_size_with_chunk_header < allocation_size)
return(NULL);
/* Number of bytes to allocate exceeds the slab size?
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
if(allocation_size_with_chunk_header > __slab_data.sd_StandardSlabSize)
{
struct SlabSingleAllocation * single_allocation;
ULONG total_single_allocation_size = sizeof(*single_allocation) + allocation_size;
struct SlabSingleAllocation * ssa;
ULONG total_single_allocation_size = sizeof(*ssa) + allocation_size;
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_StandardSlabSize));
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,total_single_allocation_size));
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*ssa),allocation_size,total_single_allocation_size));
/* No integer overflow? */
if(allocation_size < total_single_allocation_size)
{
#if defined(__amigaos4__)
{
single_allocation = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
ssa = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
}
#else
{
single_allocation = AllocMem(total_single_allocation_size,MEMF_ANY);
ssa = AllocMem(total_single_allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(single_allocation != NULL)
}
/* Integer overflow has occured. */
else
{
single_allocation->ssa_Size = total_single_allocation_size;
ssa = NULL;
}
allocation = &single_allocation[1];
if(ssa != NULL)
{
ssa->ssa_Size = total_single_allocation_size;
allocation = &ssa[1];
D(("single allocation = 0x%08lx",allocation));
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)ssa);
__slab_data.sd_NumSingleAllocations++;
__slab_data.sd_TotalSingleAllocationSize += total_single_allocation_size;
@ -116,18 +131,19 @@ __slab_allocate(size_t allocation_size)
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size = sizeof(*chunk) + allocation_size;
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
D(("final entry size prior to picking slab size = %ld bytes",entry_size));
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
@ -184,9 +200,8 @@ __slab_allocate(size_t allocation_size)
{
D(("slab is no longer empty"));
/* Mark it as no longer empty. */
/* Pull it out of the list of slabs available for reuse. */
Remove((struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 0;
}
sn->sn_UseCount++;
@ -238,7 +253,8 @@ __slab_allocate(size_t allocation_size)
Remove((struct Node *)free_node);
/* Unlink from list of slabs which keep chunks
* of the same size.
* of the same size. It will be added there
* again, at a different position.
*/
Remove((struct Node *)sn);
@ -254,21 +270,26 @@ __slab_allocate(size_t allocation_size)
*/
if(new_sn == NULL)
{
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_StandardSlabSize));
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*new_sn) + __slab_data.sd_StandardSlabSize));
#if defined(__amigaos4__)
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
new_sn = (struct SlabNode *)AllocVec(sizeof(*new_sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
}
#else
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
new_sn = (struct SlabNode *)AllocVec(sizeof(*new_sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(new_sn == NULL)
D(("slab allocation failed"));
/* If this allocation went well, try to free all currently unused
* slabs which are ready for purging. This is done so that we don't
* keep allocating new memory all the time without cutting back on
* unused slabs.
*/
purge = TRUE;
}
@ -369,7 +390,7 @@ __slab_allocate(size_t allocation_size)
if(sn->sn_EmptyDecay == 0)
{
/* Move it to the front of the list, so that
* will be collected as soon as possible.
* it will be collected as soon as possible.
*/
if(free_node != (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head)
{
@ -405,32 +426,53 @@ __slab_free(void * address,size_t allocation_size)
/* Number of bytes allocated exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
if(sizeof(*chunk) + allocation_size > __slab_data.sd_StandardSlabSize)
{
struct SlabSingleAllocation * single_allocation = address;
struct SlabSingleAllocation * ssa = address;
ULONG size;
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_StandardSlabSize));
assert( __slab_data.sd_NumSingleAllocations > 0 );
/* Management information (MinNode linkage, size in bytes) precedes
* the address returned by malloc(), etc.
*/
single_allocation--;
ssa--;
size = single_allocation->ssa_Size;
/* Verify that the allocation is really on the list we
* will remove it from.
*/
#if DEBUG
{
struct MinNode * mln;
BOOL found_allocation_in_list = FALSE;
assert( sizeof(*single_allocation) + allocation_size == size );
for(mln = __slab_data.sd_SingleAllocations.mlh_Head ;
mln->mln_Succ != NULL ;
mln = mln->mln_Succ)
{
if(mln == (struct MinNode *)ssa)
{
found_allocation_in_list = TRUE;
break;
}
}
Remove((struct Node *)single_allocation);
assert( found_allocation_in_list );
}
#endif /* DEBUG */
FreeMem(single_allocation, size);
assert( __slab_data.sd_NumSingleAllocations > 0 );
__slab_data.sd_NumSingleAllocations--;
size = ssa->ssa_Size;
assert( size > 0 );
assert( sizeof(*ssa) + allocation_size == size );
assert( size <= __slab_data.sd_TotalSingleAllocationSize );
Remove((struct Node *)ssa);
FreeMem(ssa, size);
__slab_data.sd_NumSingleAllocations--;
__slab_data.sd_TotalSingleAllocationSize -= size;
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
@ -445,19 +487,18 @@ __slab_free(void * address,size_t allocation_size)
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size = sizeof(*chunk) + allocation_size;
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* released. We end up picking the smallest chunk
@ -494,13 +535,79 @@ __slab_free(void * address,size_t allocation_size)
sn = chunk->sc_Parent;
#if DEBUG
{
struct SlabNode * other_sn;
BOOL slab_found = FALSE;
BOOL chunk_found = FALSE;
for(other_sn = (struct SlabNode *)slab_list->mlh_Head ;
other_sn->sn_MinNode.mln_Succ != NULL ;
other_sn = (struct SlabNode *)other_sn->sn_MinNode.mln_Succ)
{
if(other_sn == sn)
{
slab_found = TRUE;
break;
}
}
assert( slab_found );
if(slab_found)
{
struct MinNode * free_chunk;
BYTE * first_byte;
BYTE * last_byte;
first_byte = (BYTE *)&sn[1];
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
free_chunk = (struct MinNode *)(((BYTE *)free_chunk) + chunk_size))
{
if(free_chunk == (struct MinNode *)chunk)
{
chunk_found = TRUE;
break;
}
}
}
assert( chunk_found );
}
#endif /* DEBUG */
SHOWVALUE(sn->sn_ChunkSize);
assert( sn->sn_ChunkSize != 0 );
assert( sn->sn_ChunkSize == chunk_size );
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
#if DEBUG
{
struct MinNode * mln;
BOOL chunk_already_free = FALSE;
for(mln = sn->sn_FreeList.mlh_Head ;
mln->mln_Succ != NULL ;
mln = mln->mln_Succ)
{
if(mln == (struct MinNode *)chunk)
{
chunk_already_free = TRUE;
break;
}
}
assert( NOT chunk_already_free );
}
#endif /* DEBUG */
AddHead((struct List *)&sn->sn_FreeList, (struct Node *)chunk);
assert( sn->sn_UseCount > 0 );
@ -600,7 +707,7 @@ __slab_exit(void)
if(__slab_data.sd_InUse)
{
struct SlabSingleAllocation * single_allocation;
struct SlabSingleAllocation * ssa;
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
@ -637,9 +744,9 @@ __slab_exit(void)
{
mn_next = mn->mln_Succ;
single_allocation = (struct SlabSingleAllocation *)mn;
ssa = (struct SlabSingleAllocation *)mn;
FreeMem(single_allocation, single_allocation->ssa_Size);
FreeMem(ssa, ssa->ssa_Size);
}
__slab_data.sd_InUse = FALSE;

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "unix.lib 1.210"
#define VSTRING "unix.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.210 (22.11.2016)"
#define REVISION 211
#define DATE "23.11.2016"
#define VERS "unix.lib 1.211"
#define VSTRING "unix.lib 1.211 (23.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.211 (23.11.2016)"

View File

@ -1 +1 @@
210
211