1
0
mirror of https://github.com/adtools/clib2.git synced 2025-12-08 14:59:05 +00:00

3 Commits

Author SHA1 Message Date
obarthel
5617c0eacf Slab allocator update
Unused slabs which get recycled are no longer reinitialized from scratch if their chunk size matches what the allocator needed. If the chunk size matches, the list of available chunks is left unchanged, and just the various counters are reset.

Added __get_slab_stats() function.

Added support for global __slab_purge_threshold tuning variable.

Added a short test program for the slab allocator.

The malloc-test program was linked against the wrong object file in GNUmakefile.68k. Fixed.
2016-11-27 15:53:40 +01:00
Olaf Barthel
ac710b333e Accidentally omitted from version 1.211 2016-11-24 09:45:35 +01:00
Olaf Barthel
d2acae7cd7 Slab allocator update
Added more consistency checking to the slab allocator, which is built if DEBUG is defined in "stdlib_slab.c".

Memory allocations are no longer guaranteed to be aligned to 64 bit word boundaries. In fact, this has not even worked reliably in the past 10 years.

Memory allocation request sizes are now rounded to multiples of 32 bit words (the size of an address pointer) instead to the size of a 64 bit word.

Reduced the memory footprint of the memory allocation management data structures by reusing the most significant bit of the memory allocation size. This allows many more allocations to fit into the 32 byte chunk slabs, but limits the maximum memory allocation size to a little less than 2 GBytes.

Added integer overflow checks to the memory management code.

Reduced the memory management overhead further. This cuts an additional 8 bytes per allocation, unless neither the slab allocator nor memory pools are available. With this reduction the slab allocator is able to use 16 byte chunks, which cover memory allocation requests of 1..8 bytes.

Fixed a bug caused by returning an allocation back to a slab which passed the wrong pointer.
2016-11-23 16:37:46 +01:00
29 changed files with 880 additions and 232 deletions

View File

@@ -334,6 +334,7 @@ C_LIB = \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_get_slab_stats.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \
@@ -373,6 +374,7 @@ C_LIB = \
stdlib_showerror.o \
stdlib_slab.o \
stdlib_slab_max_size.o \
stdlib_slab_purge_threshold.o \
stdlib_srand.o \
stdlib_stacksize.o \
stdlib_stack_usage.o \
@@ -1124,25 +1126,29 @@ $(LIBC_OBJS)/stdlib_getdefstacksize.o : stdlib_getdefstacksize.c stdlib_gcc_help
$(LIBC_OBJS)/stdlib_shell_escape.o : stdlib_shell_escape.c stdlib_gcc_help.h
$(LIBC_OBJS)/stdlib_alloca.o : stdlib_alloca.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_alloca.o : stdlib_alloca.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_calloc.o : stdlib_calloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_calloc.o : stdlib_calloc.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_free.o : stdlib_free.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_free.o : stdlib_free.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_malloc.o : stdlib_malloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_malloc.o : stdlib_malloc.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_slab.o : stdlib_slab.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_slab.o : stdlib_slab.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_slab_purge_threshold.o : stdlib_slab_purge_threshold.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_get_slab_stats.o : stdlib_get_slab_stats.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_get_slab_allocations.o : stdlib_get_slab_allocations.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_get_slab_allocations.o : stdlib_get_slab_allocations.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h include/stdlib.h
$(LIBC_OBJS)/stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h include/stdlib.h
##############################################################################

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "amiga.lib 1.210"
#define VSTRING "amiga.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "amiga.lib 1.212"
#define VSTRING "amiga.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "c.lib 1.210"
#define VSTRING "c.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "c.lib 1.212"
#define VSTRING "c.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -1,3 +1,46 @@
c.lib 1.212 (27.11.2016)
- Unused slabs which get recycled are no longer reinitialized from
scratch if their chunk size matches what the allocator needed.
If the chunk size matches, the list of available chunks is
left unchanged, and just the various counters are reset.
- Added __get_slab_stats() function.
- Added support for global __slab_purge_threshold tuning variable.
c.lib 1.211 (23.11.2016)
- Added more consistency checking to the slab allocator, which is
built if DEBUG is defined in "stdlib_slab.c".
- Memory allocations are no longer guaranteed to be aligned to
64 bit word boundaries. In fact, this has not even worked
reliably in the past 10 years.
- Memory allocation request sizes are now rounded to multiples of
32 bit words (the size of an address pointer) instead to the
size of a 64 bit word.
- Reduced the memory footprint of the memory allocation management
data structures by reusing the most significant bit of the
memory allocation size. This allows many more allocations to fit
into the 32 byte chunk slabs, but limits the maximum memory
allocation size to a little less than 2 GBytes.
- Added integer overflow checks to the memory management code.
- Reduced the memory management overhead further. This cuts an
additional 8 bytes per allocation, unless neither the slab
allocator nor memory pools are available. With this reduction
the slab allocator is able to use 16 byte chunks, which cover
memory allocation requests of 1..8 bytes.
- Fixed a bug caused by returning an allocation back to a slab
which passed the wrong pointer.
c.lib 1.210 (22.11.2016)
- Added __get_slab_allocations() function which will report information

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "debug.lib 1.210"
#define VSTRING "debug.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "debug.lib 1.212"
#define VSTRING "debug.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -173,6 +173,19 @@ extern int rand_r(unsigned int * seed);
extern unsigned long __slab_max_size;
/*
* The slab allocator will periodically free all currently unused memory.
* You can control how much memory should be released, instead of
* releasing everything.
*
* This would make the slab allocator release only up to 512 KBytes of
* unused memory at a time:
*
* unsigned long __slab_purge_threshold = 512 * 1024;
*/
extern unsigned long __slab_purge_threshold;
/****************************************************************************/
/*
@@ -250,6 +263,11 @@ struct __slab_usage_information
/* How many memory chunks in this slab are being used? */
size_t sui_num_chunks_used;
/* How many time was this slab reused without reinitializing
* it all over again from scratch?
*/
size_t sui_num_reused;
};
/****************************************************************************/
@@ -324,6 +342,31 @@ void __get_slab_allocations(__slab_allocation_callback callback);
/****************************************************************************/
/*
* You can obtain information about the memory managed by the slab allocator,
* as well as additional information about the slab allocator's performance
* in JSON format. This format can be used for more detailed analysis.
*
* You supply a function which will be called for each line of the JSON
* data produced. You can store this data in a file, or in the clipboard,
* for later use. Your function must return 0 if it wants to be called
* again, or return -1 if it wants to stop (e.g. if an error occured
* when writing the JSON data to disk). The same "user_data" pointer which
* you pass to __get_slab_stats() will be passed to your callback function.
*
* Please note that this function works within the context of the memory
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
typedef int (* __slab_status_callback)(void * user_data, const char * line, size_t line_length);
/****************************************************************************/
extern void __get_slab_stats(void * user_data, __slab_status_callback callback);
/****************************************************************************/
/*
* You can request to use the alloca() variant that actually does allocate
* memory from the system rather than the current stack frame, which will

View File

@@ -220,6 +220,7 @@ C_LIB := \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_get_slab_stats.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \
@@ -260,6 +261,7 @@ C_LIB := \
stdlib_showerror.o \
stdlib_slab.o \
stdlib_slab_max_size.o \
stdlib_slab_purge_threshold.o \
stdlib_srand.o \
stdlib_stacksize.o \
stdlib_stack_usage.o \

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m.lib 1.210"
#define VSTRING "m.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "m.lib 1.212"
#define VSTRING "m.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m881.lib 1.210"
#define VSTRING "m881.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "m881.lib 1.212"
#define VSTRING "m881.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "net.lib 1.210"
#define VSTRING "net.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "net.lib 1.212"
#define VSTRING "net.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -521,6 +521,7 @@ STDLIB_OBJ = \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_get_slab_stats.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_ldiv.o \
@@ -552,6 +553,7 @@ STDLIB_OBJ = \
stdlib_showerror.o \
stdlib_slab.o \
stdlib_slab_max_size.o \
stdlib_slab_purge_threshold.o \
stdlib_srand.o \
stdlib_arg.o \
stdlib_stack_usage.o \
@@ -804,6 +806,10 @@ stdlib_slab.o : stdlib_slab.c stdlib_memory.h
stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memory.h
stdlib_slab_max_size.o : stdlib_slab_max_size.c stdlib_memory.h
stdlib_slab_purge_threshold.o : stdlib_slab_purge_threshold.o stdlib_memory.h
stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h
stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h

View File

@@ -31,6 +31,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*#define DEBUG*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
@@ -165,7 +167,7 @@ dump_memory(unsigned char * m,int size,int ignore)
STATIC VOID
check_memory_node(struct MemoryNode * mn,const char * file,int line)
{
size_t size = mn->mn_Size;
ULONG size = GET_MN_SIZE(mn);
unsigned char * head = (unsigned char *)(mn + 1);
unsigned char * body = head + MALLOC_HEAD_SIZE;
unsigned char * tail = body + size;
@@ -227,10 +229,12 @@ check_memory_node(struct MemoryNode * mn,const char * file,int line)
if(mn->mn_AlreadyFree)
{
for(i = 0 ; i < size ; i++)
ULONG j;
for(j = 0 ; j < size ; j++)
{
if(body[i] != MALLOC_FREE_FILL)
max_body_damage = i+1;
if(body[j] != MALLOC_FREE_FILL)
max_body_damage = j+1;
}
if(max_body_damage > 0)
@@ -345,17 +349,17 @@ remove_and_free_memory_node(struct MemoryNode * mn)
__memory_lock();
Remove((struct Node *)mn);
#if defined(__USE_MEM_TREES) && defined(__MEM_DEBUG)
#if defined(__MEM_DEBUG)
{
__red_black_tree_remove(&__memory_tree,mn);
}
#endif /* __USE_MEM_TREES && __MEM_DEBUG */
Remove((struct Node *)mn);
#ifdef __MEM_DEBUG
{
allocation_size = sizeof(*mn) + MALLOC_HEAD_SIZE + mn->mn_Size + MALLOC_TAIL_SIZE;
#if defined(__USE_MEM_TREES)
{
__red_black_tree_remove(&__memory_tree,mn);
}
#endif /* __USE_MEM_TREES */
allocation_size = sizeof(*mn) + MALLOC_HEAD_SIZE + GET_MN_SIZE(mn) + MALLOC_TAIL_SIZE;
assert( allocation_size == mn->mn_AllocationSize );
@@ -363,7 +367,7 @@ remove_and_free_memory_node(struct MemoryNode * mn)
}
#else
{
allocation_size = sizeof(*mn) + mn->mn_Size;
allocation_size = sizeof(*mn) + GET_MN_SIZE(mn);
}
#endif /* __MEM_DEBUG */
@@ -371,18 +375,56 @@ remove_and_free_memory_node(struct MemoryNode * mn)
{
/* Are we using the slab allocator? */
if (__slab_data.sd_InUse)
{
__slab_free(mn,allocation_size);
}
else if (__memory_pool != NULL)
{
FreePooled(__memory_pool,mn,allocation_size);
}
else
FreeMem(mn,allocation_size);
{
#if defined(__MEM_DEBUG)
{
FreeMem(mn,allocation_size);
}
#else
{
struct MinNode * mln = (struct MinNode *)mn;
mln--;
Remove((struct Node *)mln);
FreeMem(mln,sizeof(*mln) + allocation_size);
}
#endif /* __MEM_DEBUG */
}
}
#else
{
if (__memory_pool != NULL)
{
FreePooled(__memory_pool,mn,allocation_size);
}
else
FreeMem(mn,allocation_size);
{
#if defined(__MEM_DEBUG)
{
FreeMem(mn,allocation_size);
}
#else
{
struct MinNode * mln = (struct MinNode *)mn;
mln--;
Remove((struct Node *)mln);
FreeMem(mln,sizeof(*mln) + allocation_size);
}
#endif /* __MEM_DEBUG */
}
}
#endif /* __USE_SLAB_ALLOCATOR */
@@ -401,7 +443,7 @@ __free_memory_node(struct MemoryNode * mn,const char * UNUSED file,int UNUSED li
#ifdef __MEM_DEBUG
{
size_t size = mn->mn_Size;
ULONG size = GET_MN_SIZE(mn);
check_memory_node(mn,file,line);
@@ -409,7 +451,7 @@ __free_memory_node(struct MemoryNode * mn,const char * UNUSED file,int UNUSED li
{
#ifdef __MEM_DEBUG_LOG
{
kprintf("[%s] - %10ld 0x%08lx [",__program_name,mn->mn_Size,mn->mn_Allocation);
kprintf("[%s] - %10ld 0x%08lx [",__program_name,size,mn->mn_Allocation);
if(mn->mn_File != NULL)
kprintf("allocated at %s:%ld, ",mn->mn_File,mn->mn_Line);
@@ -436,14 +478,14 @@ __free_memory_node(struct MemoryNode * mn,const char * UNUSED file,int UNUSED li
{
#ifdef __MEM_DEBUG_LOG
{
kprintf("[%s] - %10ld 0x%08lx [",__program_name,mn->mn_Size,mn->mn_Allocation);
kprintf("[%s] - %10ld 0x%08lx [",__program_name,size,mn->mn_Allocation);
kprintf("FAILED]\n");
}
#endif /* __MEM_DEBUG_LOG */
kprintf("[%s] %s:%ld:Allocation at address 0x%08lx, size %ld",
__program_name,file,line,mn->mn_Allocation,mn->mn_Size);
__program_name,file,line,mn->mn_Allocation,size);
if(mn->mn_File != NULL)
kprintf(", allocated at %s:%ld",mn->mn_File,mn->mn_Line);
@@ -467,6 +509,9 @@ __free_memory(void * ptr,BOOL force,const char * file,int line)
assert(ptr != NULL);
SHOWPOINTER(ptr);
SHOWVALUE(force);
#ifdef __MEM_DEBUG
{
/*if((rand() % 16) == 0)
@@ -480,7 +525,7 @@ __free_memory(void * ptr,BOOL force,const char * file,int line)
{
if(mn != NULL)
{
if(force || (NOT mn->mn_NeverFree))
if(force || FLAG_IS_CLEAR(mn->mn_Size, MN_SIZE_NEVERFREE))
__free_memory_node(mn,file,line);
}
else
@@ -502,7 +547,9 @@ __free_memory(void * ptr,BOOL force,const char * file,int line)
{
assert( mn != NULL );
if(mn != NULL && (force || (NOT mn->mn_NeverFree)))
SHOWVALUE(mn->mn_Size);
if(mn != NULL && (force || FLAG_IS_CLEAR(mn->mn_Size, MN_SIZE_NEVERFREE)))
__free_memory_node(mn,file,line);
}
#endif /* __MEM_DEBUG */

View File

@@ -0,0 +1,200 @@
/*
* :ts=4
*
* Portable ISO 'C' (1994) runtime library for the Amiga computer
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Neither the name of Olaf Barthel nor the names of contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
/****************************************************************************/
#ifndef _STDLIB_MEMORY_H
#include "stdlib_memory.h"
#endif /* _STDLIB_MEMORY_H */
/****************************************************************************/
struct context
{
int status;
void * user_data;
__slab_status_callback callback;
char * buffer;
size_t buffer_size;
};
/****************************************************************************/
static void print(struct context * ct, const char * format, ...)
{
if(ct->status == 0)
{
va_list args;
int len;
va_start(args,format);
len = vsnprintf(ct->buffer, ct->buffer_size, format, args);
va_end(args);
/* This shouldn't happen: the buffer ought to be large enough
* to hold every single line.
*/
if(len >= (int)ct->buffer_size)
len = strlen(ct->buffer);
ct->status = (*ct->callback)(ct->user_data, ct->buffer, len);
}
}
/****************************************************************************/
void
__get_slab_stats(void * user_data, __slab_status_callback callback)
{
if(__slab_data.sd_InUse)
{
static int times_checked = 1;
const struct SlabNode * sn;
size_t num_empty_slabs = 0;
size_t num_full_slabs = 0;
size_t num_slabs = 0;
size_t slab_allocation_size = 0;
size_t total_slab_allocation_size = 0;
struct context ct;
char line[1024];
char time_buffer[40];
time_t now;
struct tm when;
int i;
memset(&ct, 0, sizeof(ct));
ct.user_data = user_data;
ct.callback = callback;
ct.buffer = line;
ct.buffer_size = sizeof(line);
__memory_lock();
now = time(NULL);
localtime_r(&now, &when);
strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", &when);
print(&ct,"{\n");
print(&ct,"\t\"when\": \"%s\",\n", time_buffer);
print(&ct,"\t\"times_checked\": %d,\n", times_checked++);
print(&ct,"\t\"slab_size\": %zu,\n", __slab_data.sd_StandardSlabSize);
print(&ct,"\t\"num_single_allocations\": %zu,\n", __slab_data.sd_NumSingleAllocations);
print(&ct,"\t\"total_single_allocation_size\": %zu,\n", __slab_data.sd_TotalSingleAllocationSize);
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
{
const struct SlabSingleAllocation * ssa;
print(&ct,"\t\"single_allocations\": [\n");
for(ssa = (struct SlabSingleAllocation *)__slab_data.sd_SingleAllocations.mlh_Head ;
ssa->ssa_MinNode.mln_Succ != NULL && ct.status == 0 ;
ssa = (struct SlabSingleAllocation *)ssa->ssa_MinNode.mln_Succ)
{
print(&ct,"\t\t{ \"size\": %lu, \"total_size\": %lu }%s\n",
ssa->ssa_Size - sizeof(*ssa), ssa->ssa_Size,
ssa->ssa_MinNode.mln_Succ->mln_Succ != NULL ? "," : "");
}
print(&ct,"\t],\n");
}
else
{
print(&ct,"\t\"single_allocations\": [],\n");
}
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
{
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
if (sn->sn_UseCount == 0)
num_empty_slabs++;
else if (sn->sn_UseCount == sn->sn_Count)
num_full_slabs++;
num_slabs++;
slab_allocation_size += sn->sn_ChunkSize * sn->sn_UseCount;
total_slab_allocation_size += sizeof(*sn) + __slab_data.sd_StandardSlabSize;
}
}
print(&ct,"\t\"num_slabs\": %zu,\n", num_slabs);
print(&ct,"\t\"num_empty_slabs\": %zu,\n", num_empty_slabs);
print(&ct,"\t\"num_full_slabs\": %zu,\n", num_full_slabs);
print(&ct,"\t\"slab_allocation_size\": %zu,\n", slab_allocation_size);
print(&ct,"\t\"total_slab_allocation_size\": %zu,\n", total_slab_allocation_size);
if(num_slabs > 0)
{
const char * eol = "";
print(&ct,"\t\"slabs\": [\n");
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) && ct.status == 0 ; i++)
{
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL && ct.status == 0 ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
{
print(&ct,"%s\t\t{ \"size\": %lu, \"chunks\": %lu, \"chunks_in_use\": %lu, \"times_reused\": %lu }",
eol,
sn->sn_ChunkSize,
sn->sn_Count,
sn->sn_UseCount,
sn->sn_NumReused);
eol = ",\n";
}
}
print(&ct,"\n\t]\n");
}
else
{
print(&ct,"\t\"slabs\": []\n");
}
print(&ct,"}\n");
__memory_unlock();
}
}

View File

@@ -87,6 +87,7 @@ __get_slab_usage(__slab_usage_callback callback)
sui.sui_chunk_size = sn->sn_ChunkSize;
sui.sui_num_chunks = sn->sn_Count;
sui.sui_num_chunks_used = sn->sn_UseCount;
sui.sui_num_reused = sn->sn_NumReused;
sui.sui_slab_index++;

View File

@@ -31,6 +31,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*#define DEBUG*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
@@ -72,25 +74,6 @@ struct MinList NOCOMMON __memory_list;
/****************************************************************************/
size_t
__get_allocation_size(size_t size)
{
#ifndef __MEM_DEBUG
{
size_t total_allocation_size;
total_allocation_size = sizeof(struct MemoryNode) + size;
/* Round up the allocation size to the physical allocation granularity. */
size += ((total_allocation_size + MEM_BLOCKMASK) & ~((ULONG)MEM_BLOCKMASK)) - total_allocation_size;
}
#endif /* __MEM_DEBUG */
return(size);
}
/****************************************************************************/
void *
__allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_name,int UNUSED debug_line_number)
{
@@ -135,13 +118,32 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
#else
{
/* Round up the allocation size to the physical allocation granularity. */
size = __get_allocation_size(size);
/* Round up allocation to a multiple of 32 bits. */
if((size & 3) != 0)
size += 4 - (size & 3);
allocation_size = sizeof(*mn) + size;
}
#endif /* __MEM_DEBUG */
/* Integer overflow has occured? */
if(size == 0 || allocation_size < size)
{
__set_errno(ENOMEM);
goto out;
}
/* We reuse the MemoryNode.mn_Size field to mark
* allocations are not suitable for use with
* free() and realloc(). This limits allocation
* sizes to a little less than 2 GBytes.
*/
if(allocation_size & MN_SIZE_NEVERFREE)
{
__set_errno(ENOMEM);
goto out;
}
#if defined(__USE_SLAB_ALLOCATOR)
{
/* Are we using the slab allocator? */
@@ -155,15 +157,27 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
else
{
#if defined(__amigaos4__)
{
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
#else
#ifdef __MEM_DEBUG
{
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
#else
{
struct MinNode * mln;
mln = AllocMem(sizeof(*mln) + allocation_size,MEMF_ANY);
if(mln != NULL)
{
AddTail((struct List *)&__memory_list,(struct Node *)mln);
mn = (struct MemoryNode *)&mln[1];
}
else
{
mn = NULL;
}
}
#endif /* __MEM_DEBUG */
}
}
#else
@@ -174,15 +188,27 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
}
else
{
#if defined(__amigaos4__)
{
mn = AllocMem(allocation_size,MEMF_PRIVATE);
}
#else
#ifdef __MEM_DEBUG
{
mn = AllocMem(allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
#else
{
struct MinNode * mln;
mln = AllocMem(sizeof(*mln) + allocation_size,MEMF_ANY);
if(mln != NULL)
{
AddTail((struct List *)&__memory_list,(struct Node *)mln);
mn = (struct MemoryNode *)&mln[1];
}
else
{
mn = NULL;
}
}
#endif /* __MEM_DEBUG */
}
}
#endif /* __USE_SLAB_ALLOCATOR */
@@ -193,10 +219,10 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
goto out;
}
mn->mn_Size = size;
mn->mn_NeverFree = never_free;
mn->mn_Size = size;
AddTail((struct List *)&__memory_list,(struct Node *)mn);
if(never_free)
SET_FLAG(mn->mn_Size, MN_SIZE_NEVERFREE);
__current_memory_allocated += allocation_size;
if(__maximum_memory_allocated < __current_memory_allocated)
@@ -212,6 +238,8 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_nam
char * body = head + MALLOC_HEAD_SIZE;
char * tail = body + size;
AddTail((struct List *)&__memory_list,(struct Node *)mn);
mn->mn_AlreadyFree = FALSE;
mn->mn_Allocation = body;
mn->mn_AllocationSize = allocation_size;
@@ -475,7 +503,7 @@ STDLIB_CONSTRUCTOR(stdlib_memory_init)
#if defined(__USE_SLAB_ALLOCATOR)
{
/* ZZZ this is just for the purpose of testing */
#if 0
#if 1
{
TEXT slab_size_var[20];

View File

@@ -124,7 +124,6 @@
#define __find_memory_node __find_memory_node_debug
#define __free_memory_node __free_memory_node_debug
#define __get_allocation_size __get_allocation_size_debug
#define __allocate_memory __allocate_memory_debug
#define __memory_pool __memory_pool_debug
@@ -151,16 +150,24 @@ extern char * __getcwd(char * buffer,size_t buffer_size,const char *file,int lin
/****************************************************************************/
/* If this flag is set in mn_Size, then this memory allocation
* cannot be released with free() or used with realloc(). This
* flag is set by alloca().
*/
#define MN_SIZE_NEVERFREE (0x80000000UL)
/* This obtains the allocation size from a memory node, ignoring
* the "never free" flag altogether.
*/
#define GET_MN_SIZE(mn) ((mn)->mn_Size & ~MN_SIZE_NEVERFREE)
struct MemoryNode
{
struct MinNode mn_MinNode;
size_t mn_Size;
UBYTE mn_NeverFree;
#ifdef __MEM_DEBUG
struct MinNode mn_MinNode;
UBYTE mn_AlreadyFree;
UBYTE mn_Pad0[2];
UBYTE mn_Pad0[3];
void * mn_Allocation;
size_t mn_AllocationSize;
@@ -179,9 +186,9 @@ struct MemoryNode
UBYTE mn_Pad1[3];
#endif /* __USE_MEM_TREES */
#else
UBYTE mn_Pad0[3];
#endif /* __MEM_DEBUG */
ULONG mn_Size;
};
#ifdef __USE_MEM_TREES
@@ -228,6 +235,11 @@ struct SlabNode
/* How many chunks of this slab are currently in use? */
ULONG sn_UseCount;
/* How many times was this slab reused instead of allocating
* it from system memory?
*/
ULONG sn_NumReused;
/* This contains all the chunks of memory which are available
* for allocation.
*/
@@ -279,7 +291,7 @@ struct SlabData
*/
size_t sd_StandardSlabSize;
/* These fields kees track of how many entries there are in
/* These fields keep track of how many entries there are in
* the sd_SingleAllocations list, and how much memory these
* allocations occupy.
*/
@@ -296,6 +308,7 @@ struct SlabData
extern struct SlabData NOCOMMON __slab_data;
extern unsigned long NOCOMMON __slab_max_size;
extern unsigned long NOCOMMON __slab_purge_threshold;
/****************************************************************************/

View File

@@ -31,6 +31,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*#define DEBUG*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
@@ -76,6 +78,7 @@ __realloc(void *ptr,size_t size,const char * file,int line)
#endif /* UNIX_PATH_SEMANTICS */
else
{
size_t old_size;
struct MemoryNode * mn;
BOOL reallocate;
@@ -108,29 +111,23 @@ __realloc(void *ptr,size_t size,const char * file,int line)
}
#endif /* __MEM_DEBUG */
if(mn == NULL || mn->mn_NeverFree)
if(mn == NULL || FLAG_IS_SET(mn->mn_Size, MN_SIZE_NEVERFREE))
{
SHOWMSG("cannot free this chunk");
goto out;
}
old_size = GET_MN_SIZE(mn);
/* Don't do anything unless the size of the allocation
has really changed. */
#if defined(__MEM_DEBUG)
{
reallocate = (mn->mn_Size != size);
reallocate = (old_size != size);
}
#else
{
size_t rounded_allocation_size;
/* Round the total allocation size to the operating system
granularity. */
rounded_allocation_size = __get_allocation_size(size);
assert( rounded_allocation_size >= size );
if(rounded_allocation_size > mn->mn_Size)
if(size > old_size)
{
/* Allocation size should grow. */
reallocate = TRUE;
@@ -143,7 +140,7 @@ __realloc(void *ptr,size_t size,const char * file,int line)
allocation. We also take into account that the
actual size of the allocation is affected by a
certain operating system imposed granularity. */
reallocate = (rounded_allocation_size < mn->mn_Size && rounded_allocation_size <= mn->mn_Size / 2);
reallocate = (size < old_size && size <= old_size / 2);
}
}
#endif /* __MEM_DEBUG */
@@ -152,7 +149,7 @@ __realloc(void *ptr,size_t size,const char * file,int line)
{
void * new_ptr;
D(("realloc() size has changed; old=%ld, new=%ld",mn->mn_Size,size));
D(("realloc() size has changed; old=%ld, new=%ld",old_size,size));
/* We allocate the new memory chunk before we
attempt to replace the old. */
@@ -164,8 +161,8 @@ __realloc(void *ptr,size_t size,const char * file,int line)
}
/* Copy the contents of the old allocation to the new buffer. */
if(size > mn->mn_Size)
size = mn->mn_Size;
if(size > old_size)
size = old_size;
memmove(new_ptr,ptr,size);
@@ -177,7 +174,7 @@ __realloc(void *ptr,size_t size,const char * file,int line)
}
else
{
D(("size didn't actually change that much (%ld -> %ld); returning memory block as is.",mn->mn_Size,size));
D(("size didn't actually change that much (%ld -> %ld); returning memory block as is.",old_size,size));
/* No change in size. */
result = ptr;

View File

@@ -59,42 +59,57 @@ __slab_allocate(size_t allocation_size)
{
struct SlabChunk * chunk;
void * allocation = NULL;
size_t allocation_size_with_chunk_header;
D(("allocating %lu bytes of memory",allocation_size));
assert( __slab_data.sd_StandardSlabSize > 0 );
/* Check for integer overflow. */
allocation_size_with_chunk_header = sizeof(*chunk) + allocation_size;
if(allocation_size_with_chunk_header < allocation_size)
return(NULL);
/* Number of bytes to allocate exceeds the slab size?
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
if(allocation_size_with_chunk_header > __slab_data.sd_StandardSlabSize)
{
struct SlabSingleAllocation * single_allocation;
ULONG total_single_allocation_size = sizeof(*single_allocation) + allocation_size;
struct SlabSingleAllocation * ssa;
ULONG total_single_allocation_size = sizeof(*ssa) + allocation_size;
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_StandardSlabSize));
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,total_single_allocation_size));
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*ssa),allocation_size,total_single_allocation_size));
#if defined(__amigaos4__)
/* No integer overflow? */
if(allocation_size < total_single_allocation_size)
{
single_allocation = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
#if defined(__amigaos4__)
{
ssa = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
}
#else
{
ssa = AllocMem(total_single_allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
}
#else
/* Integer overflow has occured. */
else
{
single_allocation = AllocMem(total_single_allocation_size,MEMF_ANY);
ssa = NULL;
}
#endif /* __amigaos4__ */
if(single_allocation != NULL)
if(ssa != NULL)
{
single_allocation->ssa_Size = total_single_allocation_size;
ssa->ssa_Size = total_single_allocation_size;
allocation = &single_allocation[1];
allocation = &ssa[1];
D(("single allocation = 0x%08lx",allocation));
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)ssa);
__slab_data.sd_NumSingleAllocations++;
__slab_data.sd_TotalSingleAllocationSize += total_single_allocation_size;
@@ -110,24 +125,26 @@ __slab_allocate(size_t allocation_size)
else
{
struct MinList * slab_list = NULL;
BOOL slab_reused = FALSE;
ULONG entry_size;
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size = sizeof(*chunk) + allocation_size;
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
D(("final entry size prior to picking slab size = %ld bytes",entry_size));
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
@@ -184,9 +201,8 @@ __slab_allocate(size_t allocation_size)
{
D(("slab is no longer empty"));
/* Mark it as no longer empty. */
/* Pull it out of the list of slabs available for reuse. */
Remove((struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 0;
}
sn->sn_UseCount++;
@@ -237,10 +253,22 @@ __slab_allocate(size_t allocation_size)
/* Unlink from list of empty slabs. */
Remove((struct Node *)free_node);
/* Unlink from list of slabs which keep chunks
* of the same size.
/* If the chunk size of the reused slab matches
* exactly what we need then we won't have to
* completely reinitialize it again.
*/
Remove((struct Node *)sn);
if(sn->sn_ChunkSize == chunk_size)
{
slab_reused = TRUE;
}
else
{
/* Unlink from list of slabs which keep chunks
* of the same size. It will be added there
* again, at a different position.
*/
Remove((struct Node *)sn);
}
D(("reusing a slab"));
@@ -254,79 +282,100 @@ __slab_allocate(size_t allocation_size)
*/
if(new_sn == NULL)
{
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_StandardSlabSize));
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*new_sn) + __slab_data.sd_StandardSlabSize));
#if defined(__amigaos4__)
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
new_sn = (struct SlabNode *)AllocVec(sizeof(*new_sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
}
#else
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
new_sn = (struct SlabNode *)AllocVec(sizeof(*new_sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(new_sn == NULL)
D(("slab allocation failed"));
/* If this allocation went well, try to free all currently unused
* slabs which are ready for purging. This is done so that we don't
* keep allocating new memory all the time without cutting back on
* unused slabs.
*/
purge = TRUE;
}
if(new_sn != NULL)
{
struct MinNode * free_chunk;
ULONG num_free_chunks = 0;
BYTE * first_byte;
BYTE * last_byte;
D(("setting up slab 0x%08lx", new_sn));
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
memset(new_sn,0,sizeof(*new_sn));
NewList((struct List *)&new_sn->sn_FreeList);
/* Split up the slab memory into individual chunks
* of the same size and keep track of them
* in the free list. The memory managed by
* this slab immediately follows the
* SlabNode header.
*/
first_byte = (BYTE *)&new_sn[1];
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
free_chunk = (struct MinNode *)(((BYTE *)free_chunk) + chunk_size))
/* Do we have to completely initialize this slab from scratch? */
if(NOT slab_reused)
{
AddTail((struct List *)&new_sn->sn_FreeList, (struct Node *)free_chunk);
num_free_chunks++;
}
struct SlabChunk * free_chunk;
ULONG num_free_chunks = 0;
BYTE * first_byte;
BYTE * last_byte;
D(("slab contains %lu chunks, %lu bytes each",num_free_chunks,chunk_size));
memset(new_sn,0,sizeof(*new_sn));
NewList((struct List *)&new_sn->sn_FreeList);
/* This slab has room for new allocations, so make sure that
* it goes to the front of the slab list. It will be used
* by the next allocation request of this size.
*/
AddHead((struct List *)slab_list,(struct Node *)new_sn);
/* Split up the slab memory into individual chunks
* of the same size and keep track of them
* in the free list. The memory managed by
* this slab immediately follows the
* SlabNode header.
*/
first_byte = (BYTE *)&new_sn[1];
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
for(free_chunk = (struct SlabChunk *)first_byte ;
free_chunk <= (struct SlabChunk *)last_byte;
free_chunk = (struct SlabChunk *)(((BYTE *)free_chunk) + chunk_size))
{
AddTail((struct List *)&new_sn->sn_FreeList, (struct Node *)free_chunk);
num_free_chunks++;
}
new_sn->sn_Count = num_free_chunks;
new_sn->sn_ChunkSize = chunk_size;
D(("new slab contains %lu chunks, %lu bytes each",num_free_chunks,chunk_size));
}
/* This slab was reused and need not be reinitialized from scratch. */
else
{
new_sn->sn_NumReused++;
assert( new_sn->sn_FreeList.mlh_Head != NULL );
assert( new_sn->sn_ChunkSize == chunk_size );
assert( new_sn->sn_Count == 0 );
}
/* Grab the first free chunk (there has to be one). */
chunk = (struct SlabChunk *)RemHead((struct List *)&new_sn->sn_FreeList);
assert( chunk != NULL );
/* Keep track of this chunk's parent slab. */
chunk->sc_Parent = new_sn;
assert( chunk != NULL );
assert( chunk->sc_Parent == new_sn );
allocation = &chunk[1];
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,new_sn,new_sn->sn_UseCount+1));
/* This slab is now in use. */
new_sn->sn_UseCount = 1;
/* Set up the new slab and put it where it belongs. */
new_sn->sn_EmptyDecay = 0;
new_sn->sn_UseCount = 1;
new_sn->sn_Count = num_free_chunks;
new_sn->sn_ChunkSize = chunk_size;
SHOWVALUE(new_sn->sn_ChunkSize);
AddHead((struct List *)slab_list,(struct Node *)new_sn);
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,new_sn,new_sn->sn_UseCount));
}
/* Mark unused slabs for purging, and purge those which
@@ -334,6 +383,8 @@ __slab_allocate(size_t allocation_size)
*/
if(purge)
{
size_t total_purged = 0;
D(("purging empty slabs"));
for(free_node = (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head ;
@@ -359,6 +410,15 @@ __slab_allocate(size_t allocation_size)
Remove((struct Node *)sn);
FreeVec(sn);
total_purged += sizeof(*sn) + __slab_data.sd_StandardSlabSize;
/* Stop releasing memory if we reach the threshold. If no
* threshold has been set, we will free as much memory
* as possible.
*/
if(__slab_purge_threshold > 0 && total_purged >= __slab_purge_threshold)
break;
}
/* Give it another chance. */
else
@@ -369,7 +429,7 @@ __slab_allocate(size_t allocation_size)
if(sn->sn_EmptyDecay == 0)
{
/* Move it to the front of the list, so that
* will be collected as soon as possible.
* it will be collected as soon as possible.
*/
if(free_node != (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head)
{
@@ -405,32 +465,53 @@ __slab_free(void * address,size_t allocation_size)
/* Number of bytes allocated exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
if(sizeof(*chunk) + allocation_size > __slab_data.sd_StandardSlabSize)
{
struct SlabSingleAllocation * single_allocation = address;
struct SlabSingleAllocation * ssa = address;
ULONG size;
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_StandardSlabSize));
assert( __slab_data.sd_NumSingleAllocations > 0 );
/* Management information (MinNode linkage, size in bytes) precedes
* the address returned by malloc(), etc.
*/
single_allocation--;
ssa--;
size = single_allocation->ssa_Size;
/* Verify that the allocation is really on the list we
* will remove it from.
*/
#if DEBUG
{
struct MinNode * mln;
BOOL found_allocation_in_list = FALSE;
assert( sizeof(*single_allocation) + allocation_size == size );
for(mln = __slab_data.sd_SingleAllocations.mlh_Head ;
mln->mln_Succ != NULL ;
mln = mln->mln_Succ)
{
if(mln == (struct MinNode *)ssa)
{
found_allocation_in_list = TRUE;
break;
}
}
Remove((struct Node *)single_allocation);
assert( found_allocation_in_list );
}
#endif /* DEBUG */
FreeMem(single_allocation, size);
assert( __slab_data.sd_NumSingleAllocations > 0 );
__slab_data.sd_NumSingleAllocations--;
size = ssa->ssa_Size;
assert( size > 0 );
assert( sizeof(*ssa) + allocation_size == size );
assert( size <= __slab_data.sd_TotalSingleAllocationSize );
Remove((struct Node *)ssa);
FreeMem(ssa, size);
__slab_data.sd_NumSingleAllocations--;
__slab_data.sd_TotalSingleAllocationSize -= size;
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
@@ -445,19 +526,18 @@ __slab_free(void * address,size_t allocation_size)
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size = sizeof(*chunk) + allocation_size;
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
* are available for allocation within each slab.
*/
entry_size = allocation_size;
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* released. We end up picking the smallest chunk
@@ -494,13 +574,79 @@ __slab_free(void * address,size_t allocation_size)
sn = chunk->sc_Parent;
#if DEBUG
{
struct SlabNode * other_sn;
BOOL slab_found = FALSE;
BOOL chunk_found = FALSE;
for(other_sn = (struct SlabNode *)slab_list->mlh_Head ;
other_sn->sn_MinNode.mln_Succ != NULL ;
other_sn = (struct SlabNode *)other_sn->sn_MinNode.mln_Succ)
{
if(other_sn == sn)
{
slab_found = TRUE;
break;
}
}
assert( slab_found );
if(slab_found)
{
struct MinNode * free_chunk;
BYTE * first_byte;
BYTE * last_byte;
first_byte = (BYTE *)&sn[1];
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
free_chunk = (struct MinNode *)(((BYTE *)free_chunk) + chunk_size))
{
if(free_chunk == (struct MinNode *)chunk)
{
chunk_found = TRUE;
break;
}
}
}
assert( chunk_found );
}
#endif /* DEBUG */
SHOWVALUE(sn->sn_ChunkSize);
assert( sn->sn_ChunkSize != 0 );
assert( sn->sn_ChunkSize == chunk_size );
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
#if DEBUG
{
struct MinNode * mln;
BOOL chunk_already_free = FALSE;
for(mln = sn->sn_FreeList.mlh_Head ;
mln->mln_Succ != NULL ;
mln = mln->mln_Succ)
{
if(mln == (struct MinNode *)chunk)
{
chunk_already_free = TRUE;
break;
}
}
assert( NOT chunk_already_free );
}
#endif /* DEBUG */
AddHead((struct List *)&sn->sn_FreeList, (struct Node *)chunk);
assert( sn->sn_UseCount > 0 );
@@ -593,6 +739,21 @@ __slab_init(size_t slab_size)
/****************************************************************************/
#if DEBUG
static int print_json(void * ignore,const char * buffer,size_t len)
{
extern void kputs(const char * str);
kputs(buffer);
return(0);
}
#endif /* DEBUG */
/****************************************************************************/
void
__slab_exit(void)
{
@@ -600,12 +761,24 @@ __slab_exit(void)
if(__slab_data.sd_InUse)
{
struct SlabSingleAllocation * single_allocation;
struct SlabSingleAllocation * ssa;
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
struct MinNode * mn_next;
int i;
size_t slab_count = 0, total_slab_size = 0;
size_t single_allocation_count = 0, total_single_allocation_size = 0;
int i, j;
#if DEBUG
{
kprintf("---BEGIN JSON DATA ---\n");
__get_slab_stats(NULL, print_json);
kprintf("---END JSON DATA ---\n\n");
}
#endif /* DEBUG */
D(("freeing slabs"));
@@ -613,35 +786,56 @@ __slab_exit(void)
for(i = 0 ; i < (int)NUM_ENTRIES(__slab_data.sd_Slabs) ; i++)
{
if(__slab_data.sd_Slabs[i].mlh_Head->mln_Succ != NULL)
D(("freeing slab #%ld (%lu bytes per chunk)", i, (1UL << i)));
D(("freeing slab slot #%ld (%lu bytes per chunk)", i, (1UL << i)));
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head ;
for(sn = (struct SlabNode *)__slab_data.sd_Slabs[i].mlh_Head, j = 0 ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = sn_next)
{
sn_next = (struct SlabNode *)sn->sn_MinNode.mln_Succ;
D((" slab #%ld.%ld at 0x%08lx",i, ++j, sn));
D((" fragmentation = %ld%%",100 * (__slab_data.sd_StandardSlabSize - sn->sn_Count * sn->sn_ChunkSize) / __slab_data.sd_StandardSlabSize));
D((" total space used = %ld (%ld%%)",sn->sn_UseCount * sn->sn_ChunkSize, 100 * sn->sn_UseCount / sn->sn_Count));
D((" number of chunks total = %ld",sn->sn_Count));
D((" number of chunks used = %ld%s",sn->sn_UseCount,sn->sn_UseCount == 0 ? " (empty)" : (sn->sn_UseCount == sn->sn_Count) ? " (full)" : ""));
D((" how often reused = %ld",sn->sn_NumReused));
total_slab_size += sizeof(*sn) + __slab_data.sd_StandardSlabSize;
slab_count++;
FreeVec(sn);
}
}
if(slab_count > 0)
D(("number of slabs = %ld, total slab size = %ld bytes",slab_count, total_slab_size));
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
D(("freeing single allocations"));
/* Free the memory allocated for each allocation which did not
* go into a slab.
*/
for(mn = __slab_data.sd_SingleAllocations.mlh_Head ;
for(mn = __slab_data.sd_SingleAllocations.mlh_Head, j = 0 ;
mn->mln_Succ != NULL ;
mn = mn_next)
{
mn_next = mn->mln_Succ;
single_allocation = (struct SlabSingleAllocation *)mn;
ssa = (struct SlabSingleAllocation *)mn;
FreeMem(single_allocation, single_allocation->ssa_Size);
D((" allocation #%ld at 0x%08lx, %lu bytes", ++j, ssa, ssa->ssa_Size));
total_single_allocation_size += ssa->ssa_Size;
single_allocation_count++;
FreeMem(ssa, ssa->ssa_Size);
}
if(single_allocation_count > 0)
D(("number of single allocations = %ld, total single allocation size = %ld", single_allocation_count, total_single_allocation_size));
__slab_data.sd_InUse = FALSE;
}

View File

@@ -0,0 +1,38 @@
/*
* :ts=4
*
* Portable ISO 'C' (1994) runtime library for the Amiga computer
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Neither the name of Olaf Barthel nor the names of contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
/****************************************************************************/
unsigned long __slab_purge_threshold;

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "unix.lib 1.210"
#define VSTRING "unix.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.210 (22.11.2016)"
#define REVISION 212
#define DATE "27.11.2016"
#define VERS "unix.lib 1.212"
#define VSTRING "unix.lib 1.212 (27.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.212 (27.11.2016)"

View File

@@ -1 +1 @@
210
212

View File

@@ -13,7 +13,7 @@ DELETE = delete all quiet
.c.o:
@echo "Compiling $<"
@$(CC) -c $(CFLAGS) $<
$(CC) -c $(CFLAGS) $<
##############################################################################
@@ -31,7 +31,7 @@ WARNINGS = \
INCLUDE = -I../library/include
LIB = -L../library/lib
OPTIONS = -DNDEBUG -fno-builtin -fwritable-strings -DNO_INLINE_STDARG -DIEEE_FLOATING_POINT_SUPPORT -DVERBOSE
OPTIONS = -DNDEBUG -fno-builtin -fwritable-strings -DNO_INLINE_STDARG -DIEEE_FLOATING_POINT_SUPPORT -DVERBOSE=1
#OPTIONS = -D__MEM_DEBUG -fno-builtin
#OPTIONS = -DDEBUG -D__MEM_DEBUG -DNO_INLINE_STDARG -fno-builtin
OPTIMIZE = -O
@@ -50,14 +50,14 @@ all: test fgets_test iotest sscanf_test printf_test sprintf_test \
stack_size_test translate_test strtok_test uname simple \
fstat_stdout_test simple_sprintf date_test sscanf_64 factorial \
execvp_test setlocale rand fstat_test base_dir_nametest \
malloc-test
malloc-test slab-test
clean:
$(DELETE) #?.o #?.map test fgets_test iotest sscanf_test printf_test \
sprintf_test stack_size_test translate_test strtok_test uname \
simple fstat_stdout_test fstat_test simple_sprintf date_test sscanf_64 \
factorial execvp_test setlocale rand base_dir_nametest \
malloc-test
malloc-test slab-test
##############################################################################
@@ -145,9 +145,13 @@ rand : rand.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ rand.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
malloc-test: malloc-test.o
malloc-test : malloc-test.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ rand.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
$(CC) $(CFLAGS) -o $@ malloc-test.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
slab-test : slab-test.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ slab-test.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
##############################################################################

26
test_programs/slab-test.c Normal file
View File

@@ -0,0 +1,26 @@
#include <stdlib.h>
#include <stdio.h>
unsigned long __slab_max_size = 4096;
static int print_json(void * ignore,const char * buffer,size_t len)
{
fputs(buffer, stdout);
return(0);
}
int
main(int argc,char ** argv)
{
int i;
srand(1);
for(i = 0 ; i < 1000 ; i++)
malloc(1 + (rand() % 8192));
__get_slab_stats(NULL, print_json);
return(0);
}