mirror of
https://github.com/adtools/clib2.git
synced 2025-12-08 14:59:05 +00:00
If the first slab in the list of slabs which share the same chunk size has no more room, it means that all other slabs following it have no room either. This speeds up the test to find a slab with free space, which can now abort and directly proceed to allocate memory for a new slab. If an empty slab's decay count hits zero, it is moved to the front of the empty slab list to be reclaimed more quickly. Allocations made from the slab now carry a pointer back to the slab which they are a part of. This speeds up deallocation but has the downside of making the smallest usable slab chunk size 64 bytes, which is double what used to be the minimum before.
650 lines
18 KiB
C
650 lines
18 KiB
C
/*
|
|
* :ts=4
|
|
*
|
|
* Portable ISO 'C' (1994) runtime library for the Amiga computer
|
|
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
*
|
|
* - Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
*
|
|
* - Neither the name of Olaf Barthel nor the names of contributors
|
|
* may be used to endorse or promote products derived from this
|
|
* software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/*#define DEBUG*/
|
|
|
|
#ifndef _STDLIB_HEADERS_H
|
|
#include "stdlib_headers.h"
|
|
#endif /* _STDLIB_HEADERS_H */
|
|
|
|
/****************************************************************************/
|
|
|
|
#ifndef _STDLIB_MEMORY_H
|
|
#include "stdlib_memory.h"
|
|
#endif /* _STDLIB_MEMORY_H */
|
|
|
|
/****************************************************************************/
|
|
|
|
struct SlabData NOCOMMON __slab_data;
|
|
|
|
/****************************************************************************/
|
|
|
|
struct SlabChunk
|
|
{
|
|
struct SlabNode * sc_Parent;
|
|
};
|
|
|
|
/****************************************************************************/
|
|
|
|
void *
|
|
__slab_allocate(size_t allocation_size)
|
|
{
|
|
struct SlabChunk * chunk;
|
|
void * allocation = NULL;
|
|
|
|
D(("allocating %lu bytes of memory",allocation_size));
|
|
|
|
assert( __slab_data.sd_StandardSlabSize > 0 );
|
|
|
|
/* Number of bytes to allocate exceeds the slab size?
|
|
* If so, allocate this memory chunk separately and
|
|
* keep track of it.
|
|
*/
|
|
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
|
|
{
|
|
struct SlabSingleAllocation * single_allocation;
|
|
ULONG total_single_allocation_size = sizeof(*single_allocation) + allocation_size;
|
|
|
|
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_StandardSlabSize));
|
|
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,total_single_allocation_size));
|
|
|
|
#if defined(__amigaos4__)
|
|
{
|
|
single_allocation = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
|
|
}
|
|
#else
|
|
{
|
|
single_allocation = AllocMem(total_single_allocation_size,MEMF_ANY);
|
|
}
|
|
#endif /* __amigaos4__ */
|
|
|
|
if(single_allocation != NULL)
|
|
{
|
|
single_allocation->ssa_Size = total_single_allocation_size;
|
|
|
|
allocation = &single_allocation[1];
|
|
|
|
D(("single allocation = 0x%08lx",allocation));
|
|
|
|
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
|
|
|
|
__slab_data.sd_NumSingleAllocations++;
|
|
__slab_data.sd_TotalSingleAllocationSize += total_single_allocation_size;
|
|
|
|
D(("single allocation succeeded at 0x%08lx (number of single allocations = %lu)", allocation, __slab_data.sd_NumSingleAllocations));
|
|
}
|
|
else
|
|
{
|
|
D(("single allocation failed"));
|
|
}
|
|
}
|
|
/* Otherwise allocate a chunk from a slab. */
|
|
else
|
|
{
|
|
struct MinList * slab_list = NULL;
|
|
ULONG entry_size;
|
|
ULONG chunk_size;
|
|
int slab_index;
|
|
|
|
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_StandardSlabSize));
|
|
|
|
/* Chunks must be at least as small as a MinNode, because
|
|
* that's what we use for keeping track of the chunks which
|
|
* are available for allocation within each slab.
|
|
*/
|
|
entry_size = allocation_size;
|
|
if(entry_size < sizeof(struct MinNode))
|
|
entry_size = sizeof(struct MinNode);
|
|
|
|
/* Add room for a pointer back to the slab which
|
|
* the chunk belongs to.
|
|
*/
|
|
entry_size += sizeof(*chunk);
|
|
|
|
/* Find a slab which keeps track of chunks that are no
|
|
* larger than the amount of memory which needs to be
|
|
* allocated. We end up picking the smallest chunk
|
|
* size that still works.
|
|
*/
|
|
for(slab_index = 2, chunk_size = (1UL << slab_index) ;
|
|
slab_index < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ;
|
|
slab_index++, chunk_size += chunk_size)
|
|
{
|
|
if(entry_size <= chunk_size)
|
|
{
|
|
D(("using slab #%ld (%lu bytes per chunk)", slab_index, chunk_size));
|
|
|
|
assert( (chunk_size % sizeof(LONG)) == 0 );
|
|
|
|
slab_list = &__slab_data.sd_Slabs[slab_index];
|
|
break;
|
|
}
|
|
}
|
|
|
|
if(slab_list != NULL)
|
|
{
|
|
struct SlabNode * sn;
|
|
|
|
SHOWVALUE(chunk_size);
|
|
|
|
/* The slab list is organized in such a way that the first
|
|
* entry always has a free chunk ready for allocation. If
|
|
* there is no such free chunk, it means that no other
|
|
* slab nodes in this list have any free chunks.
|
|
*/
|
|
sn = (struct SlabNode *)slab_list->mlh_Head;
|
|
|
|
/* Make sure that the slab list is not empty. */
|
|
if(sn->sn_MinNode.mln_Succ != NULL)
|
|
{
|
|
D(("slab = 0x%08lx, chunk size = %ld", sn, sn->sn_ChunkSize));
|
|
|
|
assert( sn->sn_ChunkSize == chunk_size );
|
|
|
|
chunk = (struct SlabChunk *)RemHead((struct List *)&sn->sn_FreeList);
|
|
if(chunk != NULL)
|
|
{
|
|
/* Keep track of this chunk's parent slab. */
|
|
chunk->sc_Parent = sn;
|
|
|
|
allocation = &chunk[1];
|
|
|
|
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,sn,sn->sn_UseCount));
|
|
|
|
/* Was this slab empty before we began using it again? */
|
|
if(sn->sn_UseCount == 0)
|
|
{
|
|
D(("slab is no longer empty"));
|
|
|
|
/* Mark it as no longer empty. */
|
|
Remove((struct Node *)&sn->sn_EmptyLink);
|
|
sn->sn_EmptyDecay = 0;
|
|
}
|
|
|
|
sn->sn_UseCount++;
|
|
|
|
/* Is this slab now fully utilized? Move it to the
|
|
* end of the queue so that it will not be checked
|
|
* before other slabs of the same size have been
|
|
* tested. Those at the front of the queue should
|
|
* still have room left.
|
|
*/
|
|
if(sn->sn_UseCount == sn->sn_Count && sn != (struct SlabNode *)slab_list->mlh_TailPred)
|
|
{
|
|
D(("slab is full"));
|
|
|
|
Remove((struct Node *)sn);
|
|
AddTail((struct List *)slab_list, (struct Node *)sn);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* There is no slab with a free chunk? Then we might have to
|
|
* allocate a new one.
|
|
*/
|
|
if(allocation == NULL)
|
|
{
|
|
struct MinNode * free_node;
|
|
struct MinNode * free_node_next;
|
|
struct SlabNode * new_sn = NULL;
|
|
BOOL purge = FALSE;
|
|
|
|
D(("no slab is available which still has free room"));
|
|
|
|
/* Try to recycle an empty (unused) slab, if possible. */
|
|
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
|
|
free_node->mln_Succ != NULL ;
|
|
free_node = free_node_next)
|
|
{
|
|
free_node_next = (struct MinNode *)free_node->mln_Succ;
|
|
|
|
/* free_node points to SlabNode.sn_EmptyLink, which
|
|
* directly follows the SlabNode.sn_MinNode.
|
|
*/
|
|
sn = (struct SlabNode *)&free_node[-1];
|
|
|
|
/* Is this empty slab ready to be reused? */
|
|
if(sn->sn_EmptyDecay == 0)
|
|
{
|
|
/* Unlink from list of empty slabs. */
|
|
Remove((struct Node *)free_node);
|
|
|
|
/* Unlink from list of slabs which keep chunks
|
|
* of the same size.
|
|
*/
|
|
Remove((struct Node *)sn);
|
|
|
|
D(("reusing a slab"));
|
|
|
|
new_sn = sn;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* We couldn't reuse an empty slab? Then we'll have to allocate
|
|
* memory for another one.
|
|
*/
|
|
if(new_sn == NULL)
|
|
{
|
|
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_StandardSlabSize));
|
|
|
|
#if defined(__amigaos4__)
|
|
{
|
|
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
|
|
}
|
|
#else
|
|
{
|
|
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
|
|
}
|
|
#endif /* __amigaos4__ */
|
|
|
|
if(new_sn == NULL)
|
|
D(("slab allocation failed"));
|
|
|
|
purge = TRUE;
|
|
}
|
|
|
|
if(new_sn != NULL)
|
|
{
|
|
struct MinNode * free_chunk;
|
|
ULONG num_free_chunks = 0;
|
|
BYTE * first_byte;
|
|
BYTE * last_byte;
|
|
|
|
D(("setting up slab 0x%08lx", new_sn));
|
|
|
|
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
|
|
|
|
memset(new_sn,0,sizeof(*new_sn));
|
|
|
|
NewList((struct List *)&new_sn->sn_FreeList);
|
|
|
|
/* Split up the slab memory into individual chunks
|
|
* of the same size and keep track of them
|
|
* in the free list. The memory managed by
|
|
* this slab immediately follows the
|
|
* SlabNode header.
|
|
*/
|
|
first_byte = (BYTE *)&new_sn[1];
|
|
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
|
|
|
|
for(free_chunk = (struct MinNode *)first_byte ;
|
|
free_chunk <= (struct MinNode *)last_byte;
|
|
free_chunk = (struct MinNode *)(((BYTE *)free_chunk) + chunk_size))
|
|
{
|
|
AddTail((struct List *)&new_sn->sn_FreeList, (struct Node *)free_chunk);
|
|
num_free_chunks++;
|
|
}
|
|
|
|
D(("slab contains %lu chunks, %lu bytes each",num_free_chunks,chunk_size));
|
|
|
|
/* Grab the first free chunk (there has to be one). */
|
|
chunk = (struct SlabChunk *)RemHead((struct List *)&new_sn->sn_FreeList);
|
|
|
|
assert( chunk != NULL );
|
|
|
|
/* Keep track of this chunk's parent slab. */
|
|
chunk->sc_Parent = new_sn;
|
|
|
|
allocation = &chunk[1];
|
|
|
|
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,new_sn,new_sn->sn_UseCount+1));
|
|
|
|
/* Set up the new slab and put it where it belongs. */
|
|
new_sn->sn_EmptyDecay = 0;
|
|
new_sn->sn_UseCount = 1;
|
|
new_sn->sn_Count = num_free_chunks;
|
|
new_sn->sn_ChunkSize = chunk_size;
|
|
|
|
SHOWVALUE(new_sn->sn_ChunkSize);
|
|
|
|
AddHead((struct List *)slab_list,(struct Node *)new_sn);
|
|
}
|
|
|
|
/* Mark unused slabs for purging, and purge those which
|
|
* are ready to be purged.
|
|
*/
|
|
if(purge)
|
|
{
|
|
D(("purging empty slabs"));
|
|
|
|
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
|
|
free_node->mln_Succ != NULL ;
|
|
free_node = free_node_next)
|
|
{
|
|
free_node_next = (struct MinNode *)free_node->mln_Succ;
|
|
|
|
/* free_node points to SlabNode.sn_EmptyLink, which
|
|
* directly follows the SlabNode.sn_MinNode.
|
|
*/
|
|
sn = (struct SlabNode *)&free_node[-1];
|
|
|
|
/* Is this empty slab ready to be purged? */
|
|
if(sn->sn_EmptyDecay == 0)
|
|
{
|
|
D(("freeing empty slab"));
|
|
|
|
/* Unlink from list of empty slabs. */
|
|
Remove((struct Node *)free_node);
|
|
|
|
/* Unlink from list of slabs of the same size. */
|
|
Remove((struct Node *)sn);
|
|
|
|
FreeVec(sn);
|
|
}
|
|
/* Give it another chance. */
|
|
else
|
|
{
|
|
sn->sn_EmptyDecay--;
|
|
|
|
/* Is this slab ready for reuse now? */
|
|
if(sn->sn_EmptyDecay == 0)
|
|
{
|
|
/* Move it to the front of the list, so that
|
|
* will be collected as soon as possible.
|
|
*/
|
|
if(free_node != (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head)
|
|
{
|
|
Remove((struct Node *)free_node);
|
|
AddHead((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)free_node);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
D(("no matching slab found"));
|
|
}
|
|
}
|
|
|
|
return(allocation);
|
|
}
|
|
|
|
/****************************************************************************/
|
|
|
|
void
|
|
__slab_free(void * address,size_t allocation_size)
|
|
{
|
|
struct SlabChunk * chunk;
|
|
|
|
D(("freeing allocation at 0x%08lx, %lu bytes",address,allocation_size));
|
|
|
|
assert( __slab_data.sd_StandardSlabSize > 0 );
|
|
|
|
/* Number of bytes allocated exceeds the slab size?
|
|
* Then the chunk was allocated separately.
|
|
*/
|
|
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
|
|
{
|
|
struct SlabSingleAllocation * single_allocation = address;
|
|
ULONG size;
|
|
|
|
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_StandardSlabSize));
|
|
|
|
/* Management information (MinNode linkage, size in bytes) precedes
|
|
* the address returned by malloc(), etc.
|
|
*/
|
|
single_allocation--;
|
|
|
|
size = single_allocation->ssa_Size;
|
|
|
|
assert( sizeof(*single_allocation) + allocation_size == size );
|
|
|
|
Remove((struct Node *)single_allocation);
|
|
|
|
FreeMem(single_allocation, size);
|
|
|
|
assert( __slab_data.sd_NumSingleAllocations > 0 );
|
|
|
|
__slab_data.sd_NumSingleAllocations--;
|
|
|
|
assert( size <= __slab_data.sd_TotalSingleAllocationSize );
|
|
|
|
__slab_data.sd_TotalSingleAllocationSize -= size;
|
|
|
|
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
|
|
}
|
|
/* Otherwise the allocation should have come from a slab. */
|
|
else
|
|
{
|
|
struct MinList * slab_list = NULL;
|
|
size_t entry_size;
|
|
ULONG chunk_size;
|
|
int slab_index;
|
|
|
|
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_StandardSlabSize));
|
|
|
|
/* Chunks must be at least as small as a MinNode, because
|
|
* that's what we use for keeping track of the chunks which
|
|
* are available for allocation within each slab.
|
|
*/
|
|
entry_size = allocation_size;
|
|
if(entry_size < sizeof(struct MinNode))
|
|
entry_size = sizeof(struct MinNode);
|
|
|
|
/* Add room for a pointer back to the slab which
|
|
* the chunk belongs to.
|
|
*/
|
|
entry_size += sizeof(*chunk);
|
|
|
|
/* Find a slab which keeps track of chunks that are no
|
|
* larger than the amount of memory which needs to be
|
|
* released. We end up picking the smallest chunk
|
|
* size that still works.
|
|
*/
|
|
for(slab_index = 2, chunk_size = (1UL << slab_index) ;
|
|
slab_index < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ;
|
|
slab_index++, chunk_size += chunk_size)
|
|
{
|
|
if(entry_size <= chunk_size)
|
|
{
|
|
D(("using slab #%ld (%ld bytes per chunk)", slab_index, chunk_size));
|
|
|
|
assert( (chunk_size % sizeof(LONG)) == 0 );
|
|
|
|
slab_list = &__slab_data.sd_Slabs[slab_index];
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* Pick the slab which contains the memory chunk. */
|
|
if(slab_list != NULL)
|
|
{
|
|
struct SlabNode * sn;
|
|
|
|
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
|
|
|
|
/* The pointer back to the slab which this chunk belongs
|
|
* to precedes the address which __slab_allocate()
|
|
* returned.
|
|
*/
|
|
chunk = address;
|
|
chunk--;
|
|
|
|
sn = chunk->sc_Parent;
|
|
|
|
SHOWVALUE(sn->sn_ChunkSize);
|
|
|
|
assert( sn->sn_ChunkSize == chunk_size );
|
|
|
|
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
|
|
|
|
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
|
|
|
|
assert( sn->sn_UseCount > 0 );
|
|
|
|
sn->sn_UseCount--;
|
|
|
|
/* If this slab is empty, mark it as unused and
|
|
* allow it to be purged.
|
|
*/
|
|
if(sn->sn_UseCount == 0)
|
|
{
|
|
D(("slab is now empty"));
|
|
|
|
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
|
|
sn->sn_EmptyDecay = 1;
|
|
}
|
|
|
|
/* This slab now has room. Move it to front of the list
|
|
* so that searching for a free chunk will pick it
|
|
* first.
|
|
*/
|
|
if(sn != (struct SlabNode *)slab_list->mlh_Head)
|
|
{
|
|
D(("moving slab to the head of the list"));
|
|
|
|
Remove((struct Node *)sn);
|
|
AddHead((struct List *)slab_list, (struct Node *)sn);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
D(("no matching slab found"));
|
|
}
|
|
}
|
|
}
|
|
|
|
/****************************************************************************/
|
|
|
|
void
|
|
__slab_init(size_t slab_size)
|
|
{
|
|
const size_t max_slab_size = (1UL << (NUM_ENTRIES(__slab_data.sd_Slabs)));
|
|
size_t size;
|
|
|
|
SETDEBUGLEVEL(2);
|
|
|
|
D(("slab_size = %ld",slab_size));
|
|
|
|
/* Do not allow for a slab size that is larger than
|
|
* what we support.
|
|
*/
|
|
if(slab_size > max_slab_size)
|
|
slab_size = max_slab_size;
|
|
|
|
/* If the maximum allocation size to be made from the slab
|
|
* is not already a power of 2, round it up. We do not
|
|
* support allocations larger than 2^17, and the maximum
|
|
* allocation size should be much smaller.
|
|
*
|
|
* Note that the maximum allocation size also defines the
|
|
* amount of memory which each slab manages.
|
|
*/
|
|
size = sizeof(struct MinNode);
|
|
while(size < slab_size && (size & 0x80000000) == 0)
|
|
size += size;
|
|
|
|
D(("size = %lu",size));
|
|
|
|
/* If the slab size looks sound, enable the slab memory allocator. */
|
|
if((size & 0x80000000) == 0)
|
|
{
|
|
int i;
|
|
|
|
D(("activating slab allocator"));
|
|
|
|
memset(&__slab_data,0,sizeof(__slab_data));
|
|
|
|
assert( size <= slab_size );
|
|
|
|
/* Start with an empty list of slabs for each chunk size. */
|
|
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
|
|
NewList((struct List *)&__slab_data.sd_Slabs[i]);
|
|
|
|
NewList((struct List *)&__slab_data.sd_SingleAllocations);
|
|
NewList((struct List *)&__slab_data.sd_EmptySlabs);
|
|
|
|
__slab_data.sd_StandardSlabSize = size;
|
|
__slab_data.sd_InUse = TRUE;
|
|
}
|
|
}
|
|
|
|
/****************************************************************************/
|
|
|
|
void
|
|
__slab_exit(void)
|
|
{
|
|
ENTER();
|
|
|
|
if(__slab_data.sd_InUse)
|
|
{
|
|
struct SlabSingleAllocation * single_allocation;
|
|
struct SlabNode * sn;
|
|
struct SlabNode * sn_next;
|
|
struct MinNode * mn;
|
|
struct MinNode * mn_next;
|
|
int i;
|
|
|
|
D(("freeing slabs"));
|
|
|
|
/* Free the memory allocated for each slab. */
|
|
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
|
|
{
|
|
if(__slab_data.sd_Slabs[i].mlh_Head->mln_Succ != NULL)
|
|
D(("freeing slab #%ld (%lu bytes per chunk)", i, (1UL << i)));
|
|
|
|
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
|
|
sn->sn_MinNode.mln_Succ != NULL ;
|
|
sn = sn_next)
|
|
{
|
|
sn_next = (struct SlabNode *)sn->sn_MinNode.mln_Succ;
|
|
|
|
FreeVec(sn);
|
|
}
|
|
}
|
|
|
|
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
|
|
D(("freeing single allocations"));
|
|
|
|
/* Free the memory allocated for each allocation which did not
|
|
* go into a slab.
|
|
*/
|
|
for(mn = __slab_data.sd_SingleAllocations.mlh_Head ;
|
|
mn->mln_Succ != NULL ;
|
|
mn = mn_next)
|
|
{
|
|
mn_next = mn->mln_Succ;
|
|
|
|
single_allocation = (struct SlabSingleAllocation *)mn;
|
|
|
|
FreeMem(single_allocation, single_allocation->ssa_Size);
|
|
}
|
|
|
|
__slab_data.sd_InUse = FALSE;
|
|
}
|
|
|
|
LEAVE();
|
|
}
|