1
0
mirror of https://github.com/adtools/clib2.git synced 2025-12-08 14:59:05 +00:00

4 Commits

Author SHA1 Message Date
7e201fea06 Maximum slab size limited, debug mode errors fixed
The maximum slab size is now 2^17 bytes (= 131072). If you request a slab size larger than this, you will get slab sizes of 131072 bytes instead.

Enabling the memory management debugging code no longer produces compiler errors.
2016-11-21 12:27:40 +01:00
799ee705e8 New monitoring function for slab allocator
Added __get_slab_usage() function which can be used to query the slab allocator memory usage at runtime.
2016-11-19 15:49:21 +01:00
3425e33cf9 New functions and data structures for slab allocator 2016-11-19 15:48:51 +01:00
ef66e530b7 This was missing from the previous commit :-( 2016-11-19 13:08:27 +01:00
25 changed files with 441 additions and 114 deletions

View File

@ -332,6 +332,7 @@ C_LIB = \
stdlib_getmemstats.o \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \
@ -1134,6 +1135,8 @@ $(LIBC_OBJS)/stdlib_slab.o : stdlib_slab.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "amiga.lib 1.207"
#define VSTRING "amiga.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "amiga.lib 1.209"
#define VSTRING "amiga.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "c.lib 1.207"
#define VSTRING "c.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "c.lib 1.209"
#define VSTRING "c.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209

View File

@ -1,3 +1,22 @@
c.lib 1.209 (21.11.2016)
- The maximum slab size is now 2^17 bytes (= 131072). If you request
a slab size larger than this, you will get slab sizes of 131072
bytes instead.
- Enabling the memory management debugging code no longer produces
compiler errors.
c.lib 1.208 (19.11.2016)
- Updated <stdlib.h> with new functions and data structures for
use with the slab allocator.
- Added __get_slab_usage() function which can be used to query
the slab allocator memory usage at runtime.
c.lib 1.207 (18.11.2016)
- Added a slab allocator which replaces the use of memory pools or the

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "debug.lib 1.207"
#define VSTRING "debug.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "debug.lib 1.209"
#define VSTRING "debug.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209

View File

@ -161,6 +161,104 @@ extern int rand_r(unsigned int * seed);
/****************************************************************************/
/*
* You can switch the built-in memory allocator, which is a thin wrapper
* around the AmigaOS built-in memory management system, to use a slab
* allocator. For this to work, you need to declare a global variable
* and set it to the size of the slabs to be used. This variable must
* be initialized at load time when the clib2 startup code runs:
*
* unsigned long __slab_max_size = 4096;
*/
extern unsigned long __slab_max_size;
/****************************************************************************/
/*
* If you are using the slab allocator and need to quickly release the
* memory of all slabs which are currently unused, you can call the
* following function to do so.
*
* Please note that this function works within the context of the memory
* allocation system and may not be safe to call from interrupt code.
*/
extern void __free_unused_slabs(void);
/****************************************************************************/
/*
* You can obtain runtime statistics about the slab allocator by
* invoking the __get_slab_usage() function which in turn invokes
* your callback function for each single slab currently in play.
*
* Your callback function must return 0 if it wants to be called again,
* for the next slab, or return -1 to stop. Note that your callback
* function may not be called if the slab allocator is currently
* not operational.
*
* Please note that this function works within the context of the memory
* allocation system and may not be safe to call from interrupt code.
*/
/****************************************************************************/
/* This is what your callback function will see when it is invoked. */
struct __slab_usage_information
{
/* The size of all slabs, in bytes. */
size_t sui_slab_size;
/* Number of allocations which are not managed by slabs, but
* are handled separate.
*/
size_t sui_num_single_allocations;
/* Total number of bytes allocated for memory not managed
* by slabs.
*/
size_t sui_total_single_allocation_size;
/* Number of slabs currently in play. This can be 0. */
size_t sui_num_slabs;
/* Number of currently unused slabs which contain no data. */
size_t sui_num_empty_slabs;
/* Number of slabs in use which are completely filled with data. */
size_t sui_num_full_slabs;
/* Total number of bytes allocated for all slabs. */
size_t sui_total_slab_allocation_size;
/*
* The following data is updated for each slab which
* your callback function sees.
*/
/* Index number of the slab being reported (0 = no slabs are in use). */
int sui_slab_index;
/* How large are the memory chunks managed by this slab? */
size_t sui_chunk_size;
/* How many memory chunks fit into this slab? */
size_t sui_num_chunks;
/* How many memory chunks in this slab are being used? */
size_t sui_num_chunks_used;
};
/****************************************************************************/
typedef int (*__slab_usage_callback)(const struct __slab_usage_information * sui);
/****************************************************************************/
void __get_slab_usage(__slab_usage_callback callback);
/****************************************************************************/
/*
* You can request to use the alloca() variant that actually does allocate
* memory from the system rather than the current stack frame, which will

View File

@ -218,6 +218,7 @@ C_LIB := \
stdlib_getmemstats.o \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "m.lib 1.207"
#define VSTRING "m.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "m.lib 1.209"
#define VSTRING "m.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "m881.lib 1.207"
#define VSTRING "m881.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "m881.lib 1.209"
#define VSTRING "m881.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "net.lib 1.207"
#define VSTRING "net.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "net.lib 1.209"
#define VSTRING "net.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209

View File

@ -518,6 +518,8 @@ STDLIB_OBJ = \
stdlib_getenv.o \
stdlib_getmemstats.o \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_ldiv.o \
@ -801,6 +803,8 @@ stdlib_slab.o : stdlib_slab.c stdlib_memory.h
stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h
stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h
stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h

View File

@ -0,0 +1,80 @@
/*
* :ts=4
*
* Portable ISO 'C' (1994) runtime library for the Amiga computer
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Neither the name of Olaf Barthel nor the names of contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
/****************************************************************************/
#ifndef _STDLIB_MEMORY_H
#include "stdlib_memory.h"
#endif /* _STDLIB_MEMORY_H */
/****************************************************************************/
/* Free all currently unused slabs, regardless of whether they
* are ready to be purged (SlabNode.sn_EmptyDecay == 0).
*/
void
__free_unused_slabs(void)
{
if(__slab_data.sd_InUse)
{
struct MinNode * free_node;
struct MinNode * free_node_next;
struct SlabNode * sn;
__memory_lock();
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
free_node->mln_Succ != NULL ;
free_node = free_node_next)
{
free_node_next = (struct MinNode *)free_node->mln_Succ;
/* free_node points to SlabNode.sn_EmptyLink, which
* directly follows the SlabNode.sn_MinNode.
*/
sn = (struct SlabNode *)&free_node[-1];
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs of the same size. */
Remove((struct Node *)sn);
FreeVec(sn);
}
__memory_unlock();
}
}

View File

@ -0,0 +1,104 @@
/*
* :ts=4
*
* Portable ISO 'C' (1994) runtime library for the Amiga computer
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Neither the name of Olaf Barthel nor the names of contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
/****************************************************************************/
#ifndef _STDLIB_MEMORY_H
#include "stdlib_memory.h"
#endif /* _STDLIB_MEMORY_H */
/****************************************************************************/
void
__get_slab_usage(__slab_usage_callback callback)
{
if(__slab_data.sd_InUse)
{
struct __slab_usage_information sui;
struct SlabNode * sn;
BOOL stop;
int i;
memset(&sui,0,sizeof(sui));
__memory_lock();
sui.sui_slab_size = __slab_data.sd_StandardSlabSize;
sui.sui_num_single_allocations = __slab_data.sd_NumSingleAllocations;
sui.sui_total_single_allocation_size = __slab_data.sd_TotalSingleAllocationSize;
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
{
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
if (sn->sn_UseCount == 0)
sui.sui_num_empty_slabs++;
else if (sn->sn_Count == sn->sn_UseCount)
sui.sui_num_full_slabs++;
sui.sui_num_slabs++;
sui.sui_total_slab_allocation_size += sizeof(*sn) + __slab_data.sd_StandardSlabSize;
}
}
if(sui.sui_num_slabs > 0)
{
for(i = 0, stop = FALSE ; NOT stop && i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
{
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
sui.sui_chunk_size = sn->sn_ChunkSize;
sui.sui_num_chunks = sn->sn_Count;
sui.sui_num_chunks_used = sn->sn_UseCount;
sui.sui_slab_index++;
if((*callback)(&sui) != 0)
{
stop = TRUE;
break;
}
}
}
}
__memory_unlock();
}
}

View File

@ -92,7 +92,7 @@ __get_allocation_size(size_t size)
/****************************************************************************/
void *
__allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,int UNUSED unused_line)
__allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_name,int UNUSED debug_line_number)
{
struct MemoryNode * mn;
size_t allocation_size;
@ -215,8 +215,8 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
mn->mn_AlreadyFree = FALSE;
mn->mn_Allocation = body;
mn->mn_AllocationSize = allocation_size;
mn->mn_File = (char *)file;
mn->mn_Line = line;
mn->mn_File = (char *)debug_file_name;
mn->mn_Line = debug_line_number;
mn->mn_FreeFile = NULL;
mn->mn_FreeLine = 0;
@ -228,7 +228,7 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
{
kprintf("[%s] + %10ld 0x%08lx [",__program_name,size,body);
kprintf("allocated at %s:%ld]\n",file,line);
kprintf("allocated at %s:%ld]\n",debug_file_name,debug_line_number);
}
#endif /* __MEM_DEBUG_LOG */
@ -264,7 +264,7 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
{
kprintf("[%s] + %10ld 0x%08lx [",__program_name,size,NULL);
kprintf("FAILED: allocated at %s:%ld]\n",file,line);
kprintf("FAILED: allocated at %s:%ld]\n",debug_file_name,debug_line_number);
}
}
#endif /* __MEM_DEBUG_LOG */

View File

@ -65,7 +65,6 @@
*/
#define __USE_SLAB_ALLOCATOR
/****************************************************************************/
/*
@ -201,7 +200,7 @@ struct MemoryTree
/* This keeps track of individual slabs. Each slab begins with this
* header and is followed by the memory it manages. The size of that
* memory "slab" is fixed and matches what is stored in
* SlabData.sd_MaxSlabSize.
* SlabData.sd_StandardSlabSize.
*
* Each slab manages allocations of a specific maximum size, e.g. 8, 16, 32,
* 64, etc. bytes. Multiple slabs can exist which manage allocations of the same
@ -246,13 +245,13 @@ struct SlabData
* which are 8 bytes in size, sd_Slabs[4] is for 16 byte
* chunks, etc. The minimum chunk size is 8, which is why
* lists 0..2 are not used. Currently, there is an upper limit
* of 2^31 bytes per chunk, but you should not be using slab
* of 2^17 bytes per chunk, but you should not be using slab
* chunks much larger than 4096 bytes.
*/
struct MinList sd_Slabs[31];
struct MinList sd_Slabs[17];
/* Memory allocations which are larger than the limit
* found in the sd_MaxSlabSize field are kept in this list.
* found in the sd_StandardSlabSize field are kept in this list.
* They are never associated with a slab.
*/
struct MinList sd_SingleAllocations;
@ -263,18 +262,20 @@ struct SlabData
*/
struct MinList sd_EmptySlabs;
/* This is the maximum size of a memory allocation which may
/* This is the standard size of a memory allocation which may
* be made from a slab that can accommodate it. This number
* is initialized from the __slab_max_size global variable,
* if > 0, and unless it already is a power of two, it will
* be rounded up to the next largest power of two.
*/
size_t sd_MaxSlabSize;
size_t sd_StandardSlabSize;
/* This field keeps track of how many entries there are in
* the sd_SingleAllocations list.
/* These fields kees track of how many entries there are in
* the sd_SingleAllocations list, and how much memory these
* allocations occupy.
*/
ULONG sd_NumSingleAllocations;
size_t sd_NumSingleAllocations;
size_t sd_TotalSingleAllocationSize;
/* If this is set to TRUE, then memory allocations will be
* be managed through slabs.
@ -285,7 +286,7 @@ struct SlabData
/****************************************************************************/
extern struct SlabData NOCOMMON __slab_data;
extern ULONG NOCOMMON __slab_max_size;
extern unsigned long NOCOMMON __slab_max_size;
/****************************************************************************/

View File

@ -54,17 +54,17 @@ __slab_allocate(size_t allocation_size)
D(("allocating %lu bytes of memory",allocation_size));
assert( __slab_data.sd_MaxSlabSize > 0 );
assert( __slab_data.sd_StandardSlabSize > 0 );
/* Number of bytes to allocate exceeds the slab size?
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
if(allocation_size > __slab_data.sd_StandardSlabSize)
{
struct MinNode * single_allocation;
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_MaxSlabSize));
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_StandardSlabSize));
D(("allocating %ld (MinNode) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,sizeof(*single_allocation) + allocation_size));
#if defined(__amigaos4__)
@ -84,6 +84,7 @@ __slab_allocate(size_t allocation_size)
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
__slab_data.sd_NumSingleAllocations++;
__slab_data.sd_TotalSingleAllocationSize += sizeof(*single_allocation) + allocation_size;
allocation = &single_allocation[1];
@ -102,7 +103,7 @@ __slab_allocate(size_t allocation_size)
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_MaxSlabSize));
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
@ -118,7 +119,7 @@ __slab_allocate(size_t allocation_size)
* size that still works.
*/
for(slab_index = 2, chunk_size = (1UL << slab_index) ;
slab_index < 31 ;
slab_index < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ;
slab_index++, chunk_size += chunk_size)
{
if(entry_size <= chunk_size)
@ -229,15 +230,15 @@ __slab_allocate(size_t allocation_size)
*/
if(new_sn == NULL)
{
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_MaxSlabSize));
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_StandardSlabSize));
#if defined(__amigaos4__)
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_PRIVATE);
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
}
#else
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_ANY);
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
}
#endif /* __amigaos4__ */
@ -256,7 +257,7 @@ __slab_allocate(size_t allocation_size)
D(("setting up slab 0x%08lx", new_sn));
assert( chunk_size <= __slab_data.sd_MaxSlabSize );
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
memset(new_sn,0,sizeof(*new_sn));
@ -269,7 +270,7 @@ __slab_allocate(size_t allocation_size)
* SlabNode header.
*/
first_byte = (BYTE *)&new_sn[1];
last_byte = &first_byte[__slab_data.sd_MaxSlabSize - chunk_size];
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
@ -355,16 +356,16 @@ __slab_free(void * address,size_t allocation_size)
{
D(("freeing allocation at 0x%08lx, %lu bytes",address,allocation_size));
assert( __slab_data.sd_MaxSlabSize > 0 );
assert( __slab_data.sd_StandardSlabSize > 0 );
/* Number of bytes allocated exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
if(allocation_size > __slab_data.sd_StandardSlabSize)
{
struct MinNode * mn = address;
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_MaxSlabSize));
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_StandardSlabSize));
Remove((struct Node *)&mn[-1]);
@ -374,6 +375,10 @@ __slab_free(void * address,size_t allocation_size)
__slab_data.sd_NumSingleAllocations--;
assert( __slab_data.sd_TotalSingleAllocationSize <= sizeof(*mn) + allocation_size );
__slab_data.sd_TotalSingleAllocationSize -= sizeof(*mn) + allocation_size;
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
}
/* Otherwise the allocation should have come from a slab. */
@ -384,7 +389,7 @@ __slab_free(void * address,size_t allocation_size)
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_MaxSlabSize));
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
@ -400,7 +405,7 @@ __slab_free(void * address,size_t allocation_size)
* size that still works.
*/
for(slab_index = 2, chunk_size = (1UL << slab_index) ;
slab_index < 31 ;
slab_index < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ;
slab_index++, chunk_size += chunk_size)
{
if(entry_size <= chunk_size)
@ -417,13 +422,13 @@ __slab_free(void * address,size_t allocation_size)
/* Find the slab which contains the memory chunk. */
if(slab_list != NULL)
{
const size_t usable_range = __slab_data.sd_MaxSlabSize - chunk_size;
const size_t usable_range = __slab_data.sd_StandardSlabSize - chunk_size;
struct SlabNode * sn;
BYTE * first_byte;
BYTE * last_byte;
BOOL freed = FALSE;
assert( chunk_size <= __slab_data.sd_MaxSlabSize );
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
@ -490,15 +495,22 @@ __slab_free(void * address,size_t allocation_size)
void
__slab_init(size_t slab_size)
{
const size_t max_slab_size = (1UL << (NUM_ENTRIES(__slab_data.sd_Slabs)));
size_t size;
SETDEBUGLEVEL(2);
D(("slab_size = %ld",slab_size));
/* Do not allow for a slab size that is larger than
* what we support.
*/
if(slab_size > max_slab_size)
slab_size = max_slab_size;
/* If the maximum allocation size to be made from the slab
* is not already a power of 2, round it up. We do not
* support allocations larger than 2^31, and the maximum
* support allocations larger than 2^17, and the maximum
* allocation size should be much smaller.
*
* Note that the maximum allocation size also defines the
@ -517,17 +529,19 @@ __slab_init(size_t slab_size)
D(("activating slab allocator"));
memset(&__slab_data,0,sizeof(__slab_data));
assert( size <= slab_size );
/* Start with an empty list of slabs for each chunk size. */
for(i = 0 ; i < 31 ; i++)
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
NewList((struct List *)&__slab_data.sd_Slabs[i]);
NewList((struct List *)&__slab_data.sd_SingleAllocations);
NewList((struct List *)&__slab_data.sd_EmptySlabs);
__slab_data.sd_MaxSlabSize = size;
__slab_data.sd_InUse = TRUE;
__slab_data.sd_StandardSlabSize = size;
__slab_data.sd_InUse = TRUE;
}
}
@ -536,48 +550,51 @@ __slab_init(size_t slab_size)
void
__slab_exit(void)
{
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
struct MinNode * mn_next;
int i;
ENTER();
D(("freeing slabs"));
/* Free the memory allocated for each slab. */
for(i = 0 ; i < 31 ; i++)
if(__slab_data.sd_InUse)
{
if(__slab_data.sd_Slabs[i].mlh_Head->mln_Succ != NULL)
D(("freeing slab #%ld (%lu bytes per chunk)", i, (1UL << i)));
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
struct MinNode * mn_next;
int i;
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = sn_next)
D(("freeing slabs"));
/* Free the memory allocated for each slab. */
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
{
sn_next = (struct SlabNode *)sn->sn_MinNode.mln_Succ;
if(__slab_data.sd_Slabs[i].mlh_Head->mln_Succ != NULL)
D(("freeing slab #%ld (%lu bytes per chunk)", i, (1UL << i)));
FreeVec(sn);
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = sn_next)
{
sn_next = (struct SlabNode *)sn->sn_MinNode.mln_Succ;
FreeVec(sn);
}
}
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
D(("freeing single allocations"));
/* Free the memory allocated for each allocation which did not
* go into a slab.
*/
for(mn = __slab_data.sd_SingleAllocations.mlh_Head ;
mn->mln_Succ != NULL ;
mn = mn_next)
{
mn_next = mn->mln_Succ;
FreeVec(mn);
}
__slab_data.sd_InUse = FALSE;
}
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
D(("freeing single allocations"));
/* Free the memory allocated for each allocation which did not
* go into a slab.
*/
for(mn = __slab_data.sd_SingleAllocations.mlh_Head ;
mn->mln_Succ != NULL ;
mn = mn_next)
{
mn_next = mn->mln_Succ;
FreeVec(mn);
}
__slab_data.sd_InUse = FALSE;
LEAVE();
}

View File

@ -35,4 +35,4 @@
/****************************************************************************/
ULONG NOCOMMON __slab_max_size;
unsigned long __slab_max_size;

View File

@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 207
#define DATE "18.11.2016"
#define VERS "unix.lib 1.207"
#define VSTRING "unix.lib 1.207 (18.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.207 (18.11.2016)"
#define REVISION 209
#define DATE "21.11.2016"
#define VERS "unix.lib 1.209"
#define VSTRING "unix.lib 1.209 (21.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.209 (21.11.2016)"

View File

@ -1 +1 @@
207
209