1
0
mirror of https://github.com/adtools/clib2.git synced 2025-12-08 14:59:05 +00:00

8 Commits

Author SHA1 Message Date
Olaf Barthel
f8cf752e6a Merge branch 'master' of https://github.com/adtools/clib2 2016-11-22 11:07:46 +01:00
Olaf Barthel
0c5b88d2d3 Slab allocator changes
If the first slab in the list of slabs which share the same chunk size has no more room, it means that all other slabs following it have no room either. This speeds up the test to find a slab with free space, which can now abort and directly proceed to allocate memory for a new slab.

If an empty slab's decay count hits zero, it is moved to the front of the empty slab list to be reclaimed more quickly.

Allocations made from the slab now carry a pointer back to the slab which they are a part of. This speeds up deallocation but has the downside of making the smallest usable slab chunk size 64 bytes, which is double what used to be the minimum before.
2016-11-22 11:07:38 +01:00
Olaf Barthel
1ea8953bd3 Added __get_slab_allocations() function
__get_slab_allocations() which will report information about each memory allocation made by the slab allocator which does not come from a slab.
2016-11-22 11:06:29 +01:00
Olaf Barthel
ff908f8a02 Added a malloc test program 2016-11-22 10:54:58 +01:00
Jens Maus
525e193113 added missing .codeclimate.yml and .travis.yml 2016-11-22 10:52:01 +01:00
Jens Maus
2df2393b81 minor tweak 2016-11-22 10:50:03 +01:00
Jens Maus
ecd40943e2 added Travis-CI and CodeClimate code check support. 2016-11-22 10:46:01 +01:00
Olaf Barthel
7e201fea06 Maximum slab size limited, debug mode errors fixed
The maximum slab size is now 2^17 bytes (= 131072). If you request a slab size larger than this, you will get slab sizes of 131072 bytes instead.

Enabling the memory management debugging code no longer produces compiler errors.
2016-11-21 12:27:40 +01:00
29 changed files with 959 additions and 157 deletions

14
.codeclimate.yml Normal file
View File

@@ -0,0 +1,14 @@
---
engines:
duplication:
enabled: false
fixme:
enabled: true
markdownlint:
enabled: true
ratings:
paths:
- "**.c"
- "**.h"
- "**.l"
- "**.md"

36
.travis.yml Normal file
View File

@@ -0,0 +1,36 @@
sudo: required
dist: trusty
language: c
# download and install our required cross compilers
install:
# Make sure we can install i386 packages as some adtools binaries
# requires i386 libraries being installed to work in the 64bit env
# of Travis
- sudo dpkg --add-architecture i386
- sudo apt-get -qq update || true
- sudo apt-get -qq install libc6:i386
# Install all adtools related stuff we need
- curl -L https://dl.bintray.com/jens-maus/adtools/adtools-utils.tar.bz2 | sudo tar xj -C /
- if [[ ${BUILD} =~ os3|release ]]; then curl -L https://dl.bintray.com/jens-maus/adtools/adtools-m68k-amigaos.tar.bz2 | sudo tar xj -C / ; fi
- if [[ ${BUILD} =~ os4|release ]]; then curl -L https://dl.bintray.com/jens-maus/adtools/adtools-ppc-amigaos.tar.bz2 | sudo tar xj -C / ; fi
- if [[ ${BUILD} =~ mos|release ]]; then curl -L https://dl.bintray.com/jens-maus/adtools/adtools-ppc-morphos.tar.bz2 | sudo tar xj -C / ; fi
- if [[ ${BUILD} =~ aros-ppc|release ]]; then curl -L https://dl.bintray.com/jens-maus/adtools/adtools-ppc-aros.tar.bz2 | sudo tar xj -C / ; fi
- if [[ ${BUILD} =~ aros-i386|release ]]; then curl -L https://dl.bintray.com/jens-maus/adtools/adtools-i386-aros.tar.bz2 | sudo tar xj -C / ; fi
- if [[ ${BUILD} =~ aros-x86_64|release ]]; then curl -L https://dl.bintray.com/jens-maus/adtools/adtools-x86_64-aros.tar.bz2 | sudo tar xj -C / ; fi
- if [[ ${BUILD} =~ mingw32|release ]]; then sudo apt-get -qq install binutils-mingw-w64-i686 gcc-mingw-w64-i686 ; fi
# set the PATH variable to the directories the cross compilers are installed.
before_script:
- export PATH=/usr/local/amiga/bin:/opt/m68k-amigaos/bin:/opt/ppc-amigaos/bin:/opt/ppc-morphos/bin:${PATH}
# specify a list of variables to test (here we test the build for our supported
# list of operating systems).
env:
- BUILD="-f GNUmakefile.68k OS=os3"
- BUILD="-f GNUmakefile.os4 OS=os4"
# the build command to execute for each test
script:
- make -C library -j1 ${BUILD}

View File

@@ -1,4 +1,9 @@
# An ISO 'C' (1994) compliant runtime library for the Amiga
# clib2 An ISO 'C' (1994) compliant runtime library for AmigaOS
[![Build Status](https://travis-ci.org/adtools/clib2.svg?branch=master)](https://travis-ci.org/adtools/clib2)
[![Code Climate](https://codeclimate.com/github/adtools/clib2/badges/gpa.svg)](https://codeclimate.com/github/adtools/clib2)
[![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
[![Github Issues](http://githubbadges.herokuapp.com/adtools/clib2/issues.svg)](https://github.com/adtools/clib2/issues)
## What is this?

View File

@@ -333,6 +333,7 @@ C_LIB = \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \
@@ -1137,6 +1138,8 @@ $(LIBC_OBJS)/stdlib_free_unused_slabs.o : stdlib_free_unused_slabs.c stdlib_memo
$(LIBC_OBJS)/stdlib_get_slab_usage.o : stdlib_get_slab_usage.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_get_slab_allocations.o : stdlib_get_slab_allocations.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_realloc.o : stdlib_realloc.c stdlib_memory.h
$(LIBC_OBJS)/stdlib_red_black.o : stdlib_red_black.c stdlib_memory.h

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "amiga.lib 1.208"
#define VSTRING "amiga.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "amiga.lib 1.210"
#define VSTRING "amiga.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: amiga.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "c.lib 1.208"
#define VSTRING "c.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "c.lib 1.210"
#define VSTRING "c.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: c.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -1,3 +1,34 @@
c.lib 1.210 (22.11.2016)
- Added __get_slab_allocations() function which will report information
about each memory allocation made by the slab allocator which does
not come from a slab.
- If the first slab in the list of slabs which share the same chunk
size has no more room, it means that all other slabs following
it have no room either. This speeds up the test to find a slab with
free space, which can now abort and directly proceed to allocate
memory for a new slab.
- If an empty slab's decay count hits zero, it is moved to the front
of the empty slab list to be reclaimed more quickly.
- Allocations made from the slab now carry a pointer back to the
slab which they are a part of. This speeds up deallocation but
has the downside of making the smallest usable slab chunk size
64 bytes, which is double what used to be the minimum before.
c.lib 1.209 (21.11.2016)
- The maximum slab size is now 2^17 bytes (= 131072). If you request
a slab size larger than this, you will get slab sizes of 131072
bytes instead.
- Enabling the memory management debugging code no longer produces
compiler errors.
c.lib 1.208 (19.11.2016)
- Updated <stdlib.h> with new functions and data structures for

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "debug.lib 1.208"
#define VSTRING "debug.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "debug.lib 1.210"
#define VSTRING "debug.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: debug.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -181,7 +181,8 @@ extern unsigned long __slab_max_size;
* following function to do so.
*
* Please note that this function works within the context of the memory
* allocation system and may not be safe to call from interrupt code.
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
extern void __free_unused_slabs(void);
@@ -198,7 +199,8 @@ extern void __free_unused_slabs(void);
* not operational.
*
* Please note that this function works within the context of the memory
* allocation system and may not be safe to call from interrupt code.
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
/****************************************************************************/
@@ -215,7 +217,8 @@ struct __slab_usage_information
size_t sui_num_single_allocations;
/* Total number of bytes allocated for memory not managed
* by slabs.
* by slabs. This includes the management overhead for
* each allocation.
*/
size_t sui_total_single_allocation_size;
@@ -259,6 +262,68 @@ void __get_slab_usage(__slab_usage_callback callback);
/****************************************************************************/
/*
* You can obtain runtime statistics about the memory allocations
* which the slab allocator did not fit into slabs. This works
* just like __get_slab_usage() in that the callback function
* you provide will be called for each single allocation that
* is not part of a slab.
*
* Your callback function must return 0 if it wants to be called again,
* for the next slab, or return -1 to stop. Note that your callback
* function may not be called if the slab allocator did not
* allocate memory outside of slabs.
*
* Please note that this function works within the context of the memory
* allocation system and is not safe to call from interrupt code. It may
* break a Forbid() or Disable() condition.
*/
/* This is what your callback function will see when it is invoked. */
struct __slab_allocation_information
{
/* Number of allocations which are not managed by slabs, but
* are handled separate.
*/
size_t sai_num_single_allocations;
/* Total number of bytes allocated for memory not managed
* by slabs. This includes the management overhead for
* each allocation.
*/
size_t sai_total_single_allocation_size;
/*
* The following data is updated for each slab which
* your callback function sees.
*/
/* Index number of the allocation being reported (0 = no allocations
* outside of slabs are in use).
*/
int sai_allocation_index;
/* Size of this allocation, as requested by the program which
* called malloc(), realloc() or alloca().
*/
size_t sai_allocation_size;
/* Total size of this allocation, including management data
* structure overhead.
*/
size_t sai_total_allocation_size;
};
/****************************************************************************/
typedef int (*__slab_allocation_callback)(const struct __slab_allocation_information * sui);
/****************************************************************************/
void __get_slab_allocations(__slab_allocation_callback callback);
/****************************************************************************/
/*
* You can request to use the alloca() variant that actually does allocate
* memory from the system rather than the current stack frame, which will

View File

@@ -219,6 +219,7 @@ C_LIB := \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_llabs.o \

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "m.lib 1.208"
#define VSTRING "m.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m.lib 1.210"
#define VSTRING "m.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "m881.lib 1.208"
#define VSTRING "m881.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "m881.lib 1.210"
#define VSTRING "m881.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: m881.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "net.lib 1.208"
#define VSTRING "net.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "net.lib 1.210"
#define VSTRING "net.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: net.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -520,6 +520,7 @@ STDLIB_OBJ = \
stdlib_getsp.o \
stdlib_get_errno.o \
stdlib_get_slab_usage.o \
stdlib_get_slab_allocations.o \
stdlib_isresident.o \
stdlib_labs.o \
stdlib_ldiv.o \

View File

@@ -0,0 +1,82 @@
/*
* :ts=4
*
* Portable ISO 'C' (1994) runtime library for the Amiga computer
* Copyright (c) 2002-2015 by Olaf Barthel <obarthel (at) gmx.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Neither the name of Olaf Barthel nor the names of contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _STDLIB_HEADERS_H
#include "stdlib_headers.h"
#endif /* _STDLIB_HEADERS_H */
/****************************************************************************/
#ifndef _STDLIB_MEMORY_H
#include "stdlib_memory.h"
#endif /* _STDLIB_MEMORY_H */
/****************************************************************************/
void
__get_slab_allocations(__slab_allocation_callback callback)
{
if(__slab_data.sd_InUse)
{
struct __slab_allocation_information sai;
memset(&sai,0,sizeof(sai));
__memory_lock();
sai.sai_num_single_allocations = __slab_data.sd_NumSingleAllocations;
sai.sai_total_single_allocation_size = __slab_data.sd_TotalSingleAllocationSize;
if(__slab_data.sd_SingleAllocations.mlh_Head->mln_Succ != NULL)
{
const struct SlabSingleAllocation * ssa;
for(ssa = (struct SlabSingleAllocation *)__slab_data.sd_SingleAllocations.mlh_Head ;
ssa->ssa_MinNode.mln_Succ != NULL ;
ssa = (struct SlabSingleAllocation *)ssa->ssa_MinNode.mln_Succ)
{
sai.sai_allocation_index++;
sai.sai_allocation_size = ssa->ssa_Size - sizeof(*ssa);
sai.sai_total_allocation_size = ssa->ssa_Size;
if((*callback)(&sai) != 0)
break;
}
}
else
{
(*callback)(&sai);
}
__memory_unlock();
}
}

View File

@@ -47,7 +47,7 @@ __get_slab_usage(__slab_usage_callback callback)
if(__slab_data.sd_InUse)
{
struct __slab_usage_information sui;
struct SlabNode * sn;
const struct SlabNode * sn;
BOOL stop;
int i;
@@ -55,7 +55,7 @@ __get_slab_usage(__slab_usage_callback callback)
__memory_lock();
sui.sui_slab_size = __slab_data.sd_MaxSlabSize;
sui.sui_slab_size = __slab_data.sd_StandardSlabSize;
sui.sui_num_single_allocations = __slab_data.sd_NumSingleAllocations;
sui.sui_total_single_allocation_size = __slab_data.sd_TotalSingleAllocationSize;
@@ -72,7 +72,7 @@ __get_slab_usage(__slab_usage_callback callback)
sui.sui_num_slabs++;
sui.sui_total_slab_allocation_size += sizeof(*sn) + __slab_data.sd_MaxSlabSize;
sui.sui_total_slab_allocation_size += sizeof(*sn) + __slab_data.sd_StandardSlabSize;
}
}
@@ -98,6 +98,10 @@ __get_slab_usage(__slab_usage_callback callback)
}
}
}
else
{
(*callback)(&sui);
}
__memory_unlock();
}

View File

@@ -92,7 +92,7 @@ __get_allocation_size(size_t size)
/****************************************************************************/
void *
__allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,int UNUSED unused_line)
__allocate_memory(size_t size,BOOL never_free,const char * UNUSED debug_file_name,int UNUSED debug_line_number)
{
struct MemoryNode * mn;
size_t allocation_size;
@@ -215,8 +215,8 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
mn->mn_AlreadyFree = FALSE;
mn->mn_Allocation = body;
mn->mn_AllocationSize = allocation_size;
mn->mn_File = (char *)file;
mn->mn_Line = line;
mn->mn_File = (char *)debug_file_name;
mn->mn_Line = debug_line_number;
mn->mn_FreeFile = NULL;
mn->mn_FreeLine = 0;
@@ -228,7 +228,7 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
{
kprintf("[%s] + %10ld 0x%08lx [",__program_name,size,body);
kprintf("allocated at %s:%ld]\n",file,line);
kprintf("allocated at %s:%ld]\n",debug_file_name,debug_line_number);
}
#endif /* __MEM_DEBUG_LOG */
@@ -264,7 +264,7 @@ __allocate_memory(size_t size,BOOL never_free,const char * UNUSED unused_file,in
{
kprintf("[%s] + %10ld 0x%08lx [",__program_name,size,NULL);
kprintf("FAILED: allocated at %s:%ld]\n",file,line);
kprintf("FAILED: allocated at %s:%ld]\n",debug_file_name,debug_line_number);
}
}
#endif /* __MEM_DEBUG_LOG */

View File

@@ -200,7 +200,7 @@ struct MemoryTree
/* This keeps track of individual slabs. Each slab begins with this
* header and is followed by the memory it manages. The size of that
* memory "slab" is fixed and matches what is stored in
* SlabData.sd_MaxSlabSize.
* SlabData.sd_StandardSlabSize.
*
* Each slab manages allocations of a specific maximum size, e.g. 8, 16, 32,
* 64, etc. bytes. Multiple slabs can exist which manage allocations of the same
@@ -234,6 +234,15 @@ struct SlabNode
struct MinList sn_FreeList;
};
/* Memory allocations which are not part of a slab are
* tracked using this data structure.
*/
struct SlabSingleAllocation
{
struct MinNode ssa_MinNode;
ULONG ssa_Size;
};
/* This is the global bookkeeping information for managing
* memory allocations from the slab data structure.
*/
@@ -245,13 +254,13 @@ struct SlabData
* which are 8 bytes in size, sd_Slabs[4] is for 16 byte
* chunks, etc. The minimum chunk size is 8, which is why
* lists 0..2 are not used. Currently, there is an upper limit
* of 2^31 bytes per chunk, but you should not be using slab
* of 2^17 bytes per chunk, but you should not be using slab
* chunks much larger than 4096 bytes.
*/
struct MinList sd_Slabs[31];
struct MinList sd_Slabs[17];
/* Memory allocations which are larger than the limit
* found in the sd_MaxSlabSize field are kept in this list.
* found in the sd_StandardSlabSize field are kept in this list.
* They are never associated with a slab.
*/
struct MinList sd_SingleAllocations;
@@ -262,13 +271,13 @@ struct SlabData
*/
struct MinList sd_EmptySlabs;
/* This is the maximum size of a memory allocation which may
/* This is the standard size of a memory allocation which may
* be made from a slab that can accommodate it. This number
* is initialized from the __slab_max_size global variable,
* if > 0, and unless it already is a power of two, it will
* be rounded up to the next largest power of two.
*/
size_t sd_MaxSlabSize;
size_t sd_StandardSlabSize;
/* These fields kees track of how many entries there are in
* the sd_SingleAllocations list, and how much memory these

View File

@@ -47,46 +47,57 @@ struct SlabData NOCOMMON __slab_data;
/****************************************************************************/
struct SlabChunk
{
struct SlabNode * sc_Parent;
};
/****************************************************************************/
void *
__slab_allocate(size_t allocation_size)
{
struct SlabChunk * chunk;
void * allocation = NULL;
D(("allocating %lu bytes of memory",allocation_size));
assert( __slab_data.sd_MaxSlabSize > 0 );
assert( __slab_data.sd_StandardSlabSize > 0 );
/* Number of bytes to allocate exceeds the slab size?
* If so, allocate this memory chunk separately and
* keep track of it.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
{
struct MinNode * single_allocation;
struct SlabSingleAllocation * single_allocation;
ULONG total_single_allocation_size = sizeof(*single_allocation) + allocation_size;
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_MaxSlabSize));
D(("allocating %ld (MinNode) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,sizeof(*single_allocation) + allocation_size));
D(("allocation size is > %ld; this will be stored separately",__slab_data.sd_StandardSlabSize));
D(("allocating %ld (MinNode+Size) + %ld = %ld bytes",sizeof(*single_allocation),allocation_size,total_single_allocation_size));
#if defined(__amigaos4__)
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_PRIVATE);
single_allocation = AllocMem(total_single_allocation_size,MEMF_PRIVATE);
}
#else
{
single_allocation = AllocVec(sizeof(*single_allocation) + allocation_size,MEMF_ANY);
single_allocation = AllocMem(total_single_allocation_size,MEMF_ANY);
}
#endif /* __amigaos4__ */
if(single_allocation != NULL)
{
single_allocation->ssa_Size = total_single_allocation_size;
allocation = &single_allocation[1];
D(("single allocation = 0x%08lx",allocation));
AddTail((struct List *)&__slab_data.sd_SingleAllocations,(struct Node *)single_allocation);
__slab_data.sd_NumSingleAllocations++;
__slab_data.sd_TotalSingleAllocationSize += sizeof(*single_allocation) + allocation_size;
allocation = &single_allocation[1];
__slab_data.sd_TotalSingleAllocationSize += total_single_allocation_size;
D(("single allocation succeeded at 0x%08lx (number of single allocations = %lu)", allocation, __slab_data.sd_NumSingleAllocations));
}
@@ -103,7 +114,7 @@ __slab_allocate(size_t allocation_size)
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_MaxSlabSize));
D(("allocation size is <= %ld; this will be allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
@@ -113,6 +124,11 @@ __slab_allocate(size_t allocation_size)
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* allocated. We end up picking the smallest chunk
@@ -139,18 +155,28 @@ __slab_allocate(size_t allocation_size)
SHOWVALUE(chunk_size);
/* Find the first slab which has a free chunk and use it. */
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
/* The slab list is organized in such a way that the first
* entry always has a free chunk ready for allocation. If
* there is no such free chunk, it means that no other
* slab nodes in this list have any free chunks.
*/
sn = (struct SlabNode *)slab_list->mlh_Head;
/* Make sure that the slab list is not empty. */
if(sn->sn_MinNode.mln_Succ != NULL)
{
D(("slab = 0x%08lx, chunk size = %ld", sn, sn->sn_ChunkSize));
assert( sn->sn_ChunkSize == chunk_size );
allocation = (struct MemoryNode *)RemHead((struct List *)&sn->sn_FreeList);
if(allocation != NULL)
chunk = (struct SlabChunk *)RemHead((struct List *)&sn->sn_FreeList);
if(chunk != NULL)
{
/* Keep track of this chunk's parent slab. */
chunk->sc_Parent = sn;
allocation = &chunk[1];
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,sn,sn->sn_UseCount));
/* Was this slab empty before we began using it again? */
@@ -178,8 +204,6 @@ __slab_allocate(size_t allocation_size)
Remove((struct Node *)sn);
AddTail((struct List *)slab_list, (struct Node *)sn);
}
break;
}
}
@@ -230,15 +254,15 @@ __slab_allocate(size_t allocation_size)
*/
if(new_sn == NULL)
{
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_MaxSlabSize));
D(("no slab is available for reuse; allocating a new slab (%lu bytes)",sizeof(*sn) + __slab_data.sd_StandardSlabSize));
#if defined(__amigaos4__)
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_PRIVATE);
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_PRIVATE);
}
#else
{
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_MaxSlabSize,MEMF_ANY);
new_sn = (struct SlabNode *)AllocVec(sizeof(*sn) + __slab_data.sd_StandardSlabSize,MEMF_ANY);
}
#endif /* __amigaos4__ */
@@ -257,7 +281,7 @@ __slab_allocate(size_t allocation_size)
D(("setting up slab 0x%08lx", new_sn));
assert( chunk_size <= __slab_data.sd_MaxSlabSize );
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
memset(new_sn,0,sizeof(*new_sn));
@@ -270,7 +294,7 @@ __slab_allocate(size_t allocation_size)
* SlabNode header.
*/
first_byte = (BYTE *)&new_sn[1];
last_byte = &first_byte[__slab_data.sd_MaxSlabSize - chunk_size];
last_byte = &first_byte[__slab_data.sd_StandardSlabSize - chunk_size];
for(free_chunk = (struct MinNode *)first_byte ;
free_chunk <= (struct MinNode *)last_byte;
@@ -283,12 +307,17 @@ __slab_allocate(size_t allocation_size)
D(("slab contains %lu chunks, %lu bytes each",num_free_chunks,chunk_size));
/* Grab the first free chunk (there has to be one). */
allocation = (struct MemoryNode *)RemHead((struct List *)&new_sn->sn_FreeList);
chunk = (struct SlabChunk *)RemHead((struct List *)&new_sn->sn_FreeList);
assert( chunk != NULL );
/* Keep track of this chunk's parent slab. */
chunk->sc_Parent = new_sn;
allocation = &chunk[1];
D(("allocation succeeded at 0x%08lx in slab 0x%08lx (slab use count = %lu)",allocation,new_sn,new_sn->sn_UseCount+1));
assert( allocation != NULL );
/* Set up the new slab and put it where it belongs. */
new_sn->sn_EmptyDecay = 0;
new_sn->sn_UseCount = 1;
@@ -335,6 +364,19 @@ __slab_allocate(size_t allocation_size)
else
{
sn->sn_EmptyDecay--;
/* Is this slab ready for reuse now? */
if(sn->sn_EmptyDecay == 0)
{
/* Move it to the front of the list, so that
* will be collected as soon as possible.
*/
if(free_node != (struct MinNode *)__slab_data.sd_EmptySlabs.mlh_Head)
{
Remove((struct Node *)free_node);
AddHead((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)free_node);
}
}
}
}
}
@@ -354,30 +396,42 @@ __slab_allocate(size_t allocation_size)
void
__slab_free(void * address,size_t allocation_size)
{
struct SlabChunk * chunk;
D(("freeing allocation at 0x%08lx, %lu bytes",address,allocation_size));
assert( __slab_data.sd_MaxSlabSize > 0 );
assert( __slab_data.sd_StandardSlabSize > 0 );
/* Number of bytes allocated exceeds the slab size?
* Then the chunk was allocated separately.
*/
if(allocation_size > __slab_data.sd_MaxSlabSize)
if(allocation_size + sizeof(*chunk) > __slab_data.sd_StandardSlabSize)
{
struct MinNode * mn = address;
struct SlabSingleAllocation * single_allocation = address;
ULONG size;
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_MaxSlabSize));
D(("allocation size is > %ld; this was stored separately",__slab_data.sd_StandardSlabSize));
Remove((struct Node *)&mn[-1]);
/* Management information (MinNode linkage, size in bytes) precedes
* the address returned by malloc(), etc.
*/
single_allocation--;
FreeVec(&mn[-1]);
size = single_allocation->ssa_Size;
assert( sizeof(*single_allocation) + allocation_size == size );
Remove((struct Node *)single_allocation);
FreeMem(single_allocation, size);
assert( __slab_data.sd_NumSingleAllocations > 0 );
__slab_data.sd_NumSingleAllocations--;
assert( __slab_data.sd_TotalSingleAllocationSize <= sizeof(*mn) + allocation_size );
assert( size <= __slab_data.sd_TotalSingleAllocationSize );
__slab_data.sd_TotalSingleAllocationSize -= sizeof(*mn) + allocation_size;
__slab_data.sd_TotalSingleAllocationSize -= size;
D(("number of single allocations = %ld", __slab_data.sd_NumSingleAllocations));
}
@@ -389,7 +443,7 @@ __slab_free(void * address,size_t allocation_size)
ULONG chunk_size;
int slab_index;
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_MaxSlabSize));
D(("allocation size is <= %ld; this was allocated from a slab",__slab_data.sd_StandardSlabSize));
/* Chunks must be at least as small as a MinNode, because
* that's what we use for keeping track of the chunks which
@@ -399,6 +453,11 @@ __slab_free(void * address,size_t allocation_size)
if(entry_size < sizeof(struct MinNode))
entry_size = sizeof(struct MinNode);
/* Add room for a pointer back to the slab which
* the chunk belongs to.
*/
entry_size += sizeof(*chunk);
/* Find a slab which keeps track of chunks that are no
* larger than the amount of memory which needs to be
* released. We end up picking the smallest chunk
@@ -419,69 +478,56 @@ __slab_free(void * address,size_t allocation_size)
}
}
/* Find the slab which contains the memory chunk. */
/* Pick the slab which contains the memory chunk. */
if(slab_list != NULL)
{
const size_t usable_range = __slab_data.sd_MaxSlabSize - chunk_size;
struct SlabNode * sn;
BYTE * first_byte;
BYTE * last_byte;
BOOL freed = FALSE;
assert( chunk_size <= __slab_data.sd_MaxSlabSize );
assert( chunk_size <= __slab_data.sd_StandardSlabSize );
for(sn = (struct SlabNode *)slab_list->mlh_Head ;
sn->sn_MinNode.mln_Succ != NULL ;
sn = (struct SlabNode *)sn->sn_MinNode.mln_Succ)
/* The pointer back to the slab which this chunk belongs
* to precedes the address which __slab_allocate()
* returned.
*/
chunk = address;
chunk--;
sn = chunk->sc_Parent;
SHOWVALUE(sn->sn_ChunkSize);
assert( sn->sn_ChunkSize == chunk_size );
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
assert( sn->sn_UseCount > 0 );
sn->sn_UseCount--;
/* If this slab is empty, mark it as unused and
* allow it to be purged.
*/
if(sn->sn_UseCount == 0)
{
SHOWVALUE(sn->sn_ChunkSize);
D(("slab is now empty"));
assert( sn->sn_ChunkSize == chunk_size );
first_byte = (BYTE *)&sn[1];
last_byte = &first_byte[usable_range];
/* Is this where the chunk belongs? */
if(first_byte <= (BYTE *)address && (BYTE *)address <= last_byte)
{
D(("allocation is part of slab 0x%08lx (slab use count = %ld)",sn,sn->sn_UseCount));
AddTail((struct List *)&sn->sn_FreeList, (struct Node *)address);
assert( sn->sn_UseCount > 0 );
sn->sn_UseCount--;
/* If this slab is empty, mark it as unused and
* allow it to be purged.
*/
if(sn->sn_UseCount == 0)
{
D(("slab is now empty"));
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 1;
}
/* This slab now has room. Move it to front of the list
* so that searching for a free chunk will pick it
* first.
*/
if(sn != (struct SlabNode *)slab_list->mlh_Head)
{
D(("moving slab to the head of the list"));
Remove((struct Node *)sn);
AddHead((struct List *)slab_list, (struct Node *)sn);
}
freed = TRUE;
break;
}
AddTail((struct List *)&__slab_data.sd_EmptySlabs,(struct Node *)&sn->sn_EmptyLink);
sn->sn_EmptyDecay = 1;
}
if(!freed)
D(("allocation at 0x%08lx could not be freed",address));
/* This slab now has room. Move it to front of the list
* so that searching for a free chunk will pick it
* first.
*/
if(sn != (struct SlabNode *)slab_list->mlh_Head)
{
D(("moving slab to the head of the list"));
Remove((struct Node *)sn);
AddHead((struct List *)slab_list, (struct Node *)sn);
}
}
else
{
@@ -495,15 +541,22 @@ __slab_free(void * address,size_t allocation_size)
void
__slab_init(size_t slab_size)
{
const size_t max_slab_size = (1UL << (NUM_ENTRIES(__slab_data.sd_Slabs)));
size_t size;
SETDEBUGLEVEL(2);
D(("slab_size = %ld",slab_size));
/* Do not allow for a slab size that is larger than
* what we support.
*/
if(slab_size > max_slab_size)
slab_size = max_slab_size;
/* If the maximum allocation size to be made from the slab
* is not already a power of 2, round it up. We do not
* support allocations larger than 2^31, and the maximum
* support allocations larger than 2^17, and the maximum
* allocation size should be much smaller.
*
* Note that the maximum allocation size also defines the
@@ -533,8 +586,8 @@ __slab_init(size_t slab_size)
NewList((struct List *)&__slab_data.sd_SingleAllocations);
NewList((struct List *)&__slab_data.sd_EmptySlabs);
__slab_data.sd_MaxSlabSize = size;
__slab_data.sd_InUse = TRUE;
__slab_data.sd_StandardSlabSize = size;
__slab_data.sd_InUse = TRUE;
}
}
@@ -547,6 +600,7 @@ __slab_exit(void)
if(__slab_data.sd_InUse)
{
struct SlabSingleAllocation * single_allocation;
struct SlabNode * sn;
struct SlabNode * sn_next;
struct MinNode * mn;
@@ -583,7 +637,9 @@ __slab_exit(void)
{
mn_next = mn->mln_Succ;
FreeVec(mn);
single_allocation = (struct SlabSingleAllocation *)mn;
FreeMem(single_allocation, single_allocation->ssa_Size);
}
__slab_data.sd_InUse = FALSE;

View File

@@ -1,6 +1,6 @@
#define VERSION 1
#define REVISION 208
#define DATE "19.11.2016"
#define VERS "unix.lib 1.208"
#define VSTRING "unix.lib 1.208 (19.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.208 (19.11.2016)"
#define REVISION 210
#define DATE "22.11.2016"
#define VERS "unix.lib 1.210"
#define VSTRING "unix.lib 1.210 (22.11.2016)\r\n"
#define VERSTAG "\0$VER: unix.lib 1.210 (22.11.2016)"

View File

@@ -1 +1 @@
208
210

View File

@@ -31,7 +31,7 @@ WARNINGS = \
INCLUDE = -I../library/include
LIB = -L../library/lib
OPTIONS = -DNDEBUG -fno-builtin -fwritable-strings -DNO_INLINE_STDARG -DIEEE_FLOATING_POINT_SUPPORT
OPTIONS = -DNDEBUG -fno-builtin -fwritable-strings -DNO_INLINE_STDARG -DIEEE_FLOATING_POINT_SUPPORT -DVERBOSE
#OPTIONS = -D__MEM_DEBUG -fno-builtin
#OPTIONS = -DDEBUG -D__MEM_DEBUG -DNO_INLINE_STDARG -fno-builtin
OPTIMIZE = -O
@@ -49,13 +49,15 @@ LIBS = -lm -lc -lgcc
all: test fgets_test iotest sscanf_test printf_test sprintf_test \
stack_size_test translate_test strtok_test uname simple \
fstat_stdout_test simple_sprintf date_test sscanf_64 factorial \
execvp_test setlocale rand fstat_test base_dir_nametest
execvp_test setlocale rand fstat_test base_dir_nametest \
malloc-test
clean:
$(DELETE) #?.o #?.map test fgets_test iotest sscanf_test printf_test \
sprintf_test stack_size_test translate_test strtok_test uname \
simple fstat_stdout_test fstat_test simple_sprintf date_test sscanf_64 \
factorial execvp_test setlocale rand base_dir_nametest
factorial execvp_test setlocale rand base_dir_nametest \
malloc-test
##############################################################################
@@ -143,6 +145,10 @@ rand : rand.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ rand.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
malloc-test: malloc-test.o
@echo "Linking $@"
$(CC) $(CFLAGS) -o $@ rand.o $(LIBS) -Wl,--cref,-M,-Map=$@.map
##############################################################################
mkid:

489
test_programs/malloc-test.c Normal file
View File

@@ -0,0 +1,489 @@
/* malloc-test.c
* by Wolfram Gloger 1995, 1996
*
* This program is provided `as is', there is no warranty.
*/
#if !defined(__STDC__)
#define __STDC__ 1
#endif
#include <stdlib.h>
#include <stdio.h>
#if !defined(_WIN32)
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#endif
#ifndef MEMORY
#define MEMORY 4000000l
#endif
#ifndef BINS_MAX
#define BINS_MAX 32768
#endif
#define SBINS_MAX 1024
#define SIZE 4024
#define I_MAX 5000
#ifndef I_AVERAGE
#define I_AVERAGE 200
#endif
#define ACTIONS_MAX 50
#ifndef SBRK_AVG
#define SBRK_AVG 0
#endif
#ifndef MMAP_THRESH
#define MMAP_THRESH 0
#endif
#ifndef TEST
#define TEST 4 /* minimal testing */
#endif
#ifndef TEST_INC
#define TEST_INC 2047
#endif
#if defined(__i386__) || defined(__sparc__) || defined(mips) || defined(_WIN32)
#define PAGE_SIZE 4096
#elif defined(__alpha__)
#define PAGE_SIZE 8192
#elif defined(__SVR4)
#define PAGE_SIZE 8192
#else
#define PAGE_SIZE 4096 /* default */
#endif
#define RANDOM(s) (lran2(0) % (s))
/* All probabilities are parts in 1024. */
#ifndef PROB_MEMALIGN
#define PROB_MEMALIGN 0
#endif
#ifndef PROB_REALLOC
#define PROB_REALLOC 48
#endif
#ifndef PROB_CALLOC
#define PROB_CALLOC 0
#endif
struct bin {
unsigned char *ptr;
unsigned long size;
} m[BINS_MAX], sm[SBINS_MAX];
unsigned long size = SIZE, bins=0, sbins=0;
unsigned long total_size=0, total_size_max=0;
unsigned char *base_ptr;
unsigned long base_save;
long
#if __STDC__
lran2(long seed)
#else
lran2(seed) long seed;
#endif
#define LRAN2_MAX 714025l /* constants for portable */
#define IA 1366l /* random number generator */
#define IC 150889l /* (see Numerical Recipes p. 211) */
{
static int first = 1;
static long x, y, v[97];
int j;
if(seed || first) {
first = 0;
x = (IC - seed) % LRAN2_MAX;
if(x < 0) x = -x;
for(j=0; j<97; j++) {
x = (IA*x + IC) % LRAN2_MAX;
v[j] = x;
}
x = (IA*x + IC) % LRAN2_MAX;
y = x;
}
j = y % 97;
y = v[j];
x = (IA*x + IC) % LRAN2_MAX;
v[j] = x;
return y;
}
#undef IA
#undef IC
void
#if __STDC__
mem_init(unsigned char *ptr, unsigned long size)
#else
mem_init(ptr, size) unsigned char *ptr; unsigned long size;
#endif
{
unsigned long i, j;
if(size == 0) return;
if(size > sizeof(unsigned long)) {
/* Try the complete initial word. */
*(unsigned long *)ptr = (unsigned long)ptr ^ size;
i = TEST_INC;
} else
i = 0;
for(; i<size; i+=TEST_INC) {
j = (unsigned long)ptr ^ i;
ptr[i] = ((j ^ (j>>8)) & 0xFF);
}
j = (unsigned long)ptr ^ (size-1);
ptr[size-1] = ((j ^ (j>>8)) & 0xFF);
}
int
#if __STDC__
mem_check(unsigned char *ptr, unsigned long size)
#else
mem_check(ptr, size) unsigned char *ptr; unsigned long size;
#endif
{
unsigned long i, j;
if(size == 0) return 0;
if(size > sizeof(unsigned long)) {
if(*(unsigned long *)ptr != ((unsigned long)ptr ^ size)) {
printf ("failed size check: expected %x, found %x!\n",
((unsigned long) ptr ^ size), *(unsigned long *) ptr);
return 1;
}
i = TEST_INC;
} else
i = 0;
for(; i<size; i+=TEST_INC) {
j = (unsigned long)ptr ^ i;
if(ptr[i] != ((j ^ (j>>8)) & 0xFF)) return 2;
}
j = (unsigned long)ptr ^ (size-1);
if(ptr[size-1] != ((j ^ (j>>8)) & 0xFF)) {
printf ("failed last byte check: expected %x, found %x!\n",
((unsigned long) ((j ^ (j>>8)) & 0xFF)), ptr[size-1]);
return 3;
}
return 0;
}
long
#if __STDC__
random_size(long max)
#else
random_size(max) long max;
#endif
{
long r1, r2, r, max_pages;
max_pages = max/PAGE_SIZE;
if(max_pages > 0) {
r1 = RANDOM(1024);
r2 = (r1 & 7)*4;
if(r1 < 512) {
/* small value near power of two */
r = (1L << (r1 >> 6)) + r2;
} else if(r1 < 512+20) {
/* value near a multiple of the page size */
r = (RANDOM(max_pages)+1)*PAGE_SIZE + r2 - 16;
/*printf("r = %4lx\n", r);*/
} else r = RANDOM(max) + 1;
} else r = RANDOM(max) + 1;
/*if(r <= 0) exit(-1);*/
return r;
}
void
#if __STDC__
bin_alloc(struct bin *m)
#else
bin_alloc(m) struct bin *m;
#endif
{
long r, key;
unsigned long sz;
#if TEST > 0
if(mem_check(m->ptr, m->size)) {
printf("bin_alloc: memory corrupt at %p, size=%lu!\n", m->ptr, m->size);
exit(1);
}
#endif
total_size -= m->size;
r = RANDOM(1024);
if(r < PROB_MEMALIGN) {
#if !defined(_WIN32)
if(m->size > 0) free(m->ptr);
m->size = random_size(size);
#if PROB_MEMALIGN
m->ptr = (unsigned char *)memalign(4 << RANDOM(8), m->size);
#endif
#endif
} else if(r < (PROB_MEMALIGN + PROB_REALLOC)) {
if(m->size == 0) {
#ifndef __sparc__
m->ptr = NULL;
#else
/* SunOS4 does not realloc() a NULL pointer */
m->ptr = (unsigned char *)malloc(1);
#endif
}
#if TEST > 2
key = RANDOM(256);
sz = m->size;
for(r=0; r<sz; r++) m->ptr[r] = (r ^ key) & 0xFF;
#endif
m->size = random_size(size);
/*printf("realloc %d\n", (int)m->size);*/
m->ptr = (unsigned char *)realloc(m->ptr, m->size);
#if TEST > 2
if(m->size < sz) sz = m->size;
for(r=0; r<sz; r++)
if(m->ptr[r] != ((r ^ key) & 0xFF)) {
printf("realloc bug !\n");
exit(1);
}
#endif
} else if(r < (PROB_MEMALIGN + PROB_REALLOC + PROB_CALLOC)) {
if(m->size > 0) free(m->ptr);
m->size = random_size(size);
m->ptr = (unsigned char *)calloc(m->size, 1);
#if TEST > 2
for(r=0; r<m->size; r++)
if(m->ptr[r] != '\0') {
printf("calloc bug !\n");
exit(1);
}
#endif
} else { /* normal malloc call */
if(m->size > 0) free(m->ptr);
m->size = random_size(size);
m->ptr = (unsigned char *)malloc(m->size);
}
if(!m->ptr) {
printf("out of memory!\n");
exit(1);
}
total_size += m->size;
if(total_size > total_size_max) total_size_max = total_size;
#if TEST > 0
mem_init(m->ptr, m->size);
#endif
if(m->ptr < base_ptr) {
#ifdef VERBOSE
printf("hmmm, allocating below brk...\n");
#endif
base_ptr = m->ptr;
}
}
void
#if __STDC__
bin_free(struct bin *m)
#else
bin_free(m) struct bin *m;
#endif
{
if(m->size == 0) return;
#if TEST > 0
if(mem_check(m->ptr, m->size)) {
printf("bin_free: memory corrupt!\n");
exit(1);
}
#endif
total_size -= m->size;
free(m->ptr);
m->size = 0;
}
void
bin_test()
{
unsigned int b;
int v;
// printf ("bin_test.\n");
for(b=0; b<bins; b++) {
if(v = mem_check(m[b].ptr, m[b].size)) {
printf("bin_test: memory corrupt! m[%d].ptr = %x, m[%d].size = %d\n",
b, m[b].ptr, b, m[b].size);
printf ("error = %d\n", v);
exit(1);
}
}
for(b=0; b<sbins; b++) {
if(mem_check(sm[b].ptr, sm[b].size)) {
printf("bin_test: memory corrupt! sm[%d].ptr = %x, sm[%d].size = %d\n",
b, sm[b].ptr, b, sm[b].size);
exit(1);
}
}
}
void
print_times()
{
#if !defined(_WIN32) && !defined(AMIGA)
struct rusage ru;
long total_sec, total_usec;
getrusage(RUSAGE_SELF, &ru);
printf(" u=%ld.%06ldsec",
(long)ru.ru_utime.tv_sec, (long)ru.ru_utime.tv_usec);
printf(" s=%ld.%06ldsec",
(long)ru.ru_stime.tv_sec, (long)ru.ru_stime.tv_usec);
total_usec = (long)ru.ru_utime.tv_usec + (long)ru.ru_stime.tv_usec;
total_sec = (long)ru.ru_utime.tv_sec + (long)ru.ru_stime.tv_sec;
if(total_usec >= 1000000) {
total_usec -= 1000000;
total_sec++;
}
printf(" t=%ld.%06ldsec", total_sec, total_usec);
#endif
}
int
#if __STDC__
main(int argc, char *argv[])
#else
main(argc, argv) int argc; char *argv[];
#endif
{
int i, j, next_i, count, max=I_MAX, actions;
unsigned int b;
long sbrk_max, sum;
double sbrk_used_sum, total_size_sum;
void* dummy = 0;
if(argc > 1) max = atoi(argv[1]);
if(argc > 2) size = atoi(argv[2]);
lran2((long)max ^ size);
bins = (MEMORY/size)*4;
if(bins > BINS_MAX) bins = BINS_MAX;
#if 0 // FIX ME? Disable sbrk...
base_ptr = (unsigned char *)sbrk(0);
sum = (long)base_ptr % PAGE_SIZE;
if(sum > 0) {
if((char *)sbrk((long)PAGE_SIZE - sum) == (char *)-1) exit(1);
base_ptr += (long)PAGE_SIZE - sum;
/*printf("base_ptr = %lx\n", (long)base_ptr);*/
}
/* attempt to fill up the region below the initial brk */
for(i=0; i<10000; i++) {
dummy = malloc(1);
if(dummy >= (void*)base_ptr) break;
}
free(dummy);
base_save = ((unsigned long)base_ptr >> 24) << 24;
#endif
#if MMAP_THRESH > 0
if(!mallopt(-3, MMAP_THRESH)) printf("mallopt failed!\n");
if(!mallopt(-4, 200)) printf("mallopt failed!\n");
#endif
#ifdef VERBOSE
printf("# mmap_thresh=%d\n", MMAP_THRESH);
printf("# bins=%d max=%d size=%d\n", bins, max, size);
printf("# base=%lx\n", base_save);
#endif
for(b=0; b<bins; b++) {
if(RANDOM(2) == 0) bin_alloc(&m[b]);
else m[b].size = 0;
}
sbrk_max = 0;
sbrk_used_sum = total_size_sum = 0.0;
for(i=next_i=count=0; i<=max;) {
#if TEST > 1
bin_test();
#endif
#ifdef MSTATS
malloc_stats();
#endif
actions = RANDOM(ACTIONS_MAX);
for(j=0; j<actions; j++) {
b = RANDOM(bins);
bin_free(&m[b]);
#if TEST > 3
bin_test();
#endif
}
i += actions;
#ifdef AFTER_FREE
AFTER_FREE;
#endif
#if SBRK_AVG > 0
if(sbins<SBINS_MAX && RANDOM(SBRK_AVG)==0) {
/* throw in an explicit sbrk call */
sm[sbins].size = RANDOM(10000)+1;
sm[sbins].ptr = sbrk(sm[sbins].size);
if(sbins>0 && sm[sbins].ptr==(sm[sbins-1].ptr+sm[sbins-1].size)) {
sm[sbins-1].size += sm[sbins].size;
sbins--;
}
#ifdef VERBOSE
printf("sbrk #%d %p %ld\n", sbins, sm[sbins].ptr, sm[sbins].size);
#endif
#if TEST > 0
mem_init(sm[sbins].ptr, sm[sbins].size);
#endif
sbins++;
}
#endif
actions = RANDOM(ACTIONS_MAX);
for(j=0; j<actions; j++) {
b = RANDOM(bins);
bin_alloc(&m[b]);
#if TEST > 3
bin_test();
#endif
}
i += actions;
if(i >= next_i) { /* gather statistics */
count++;
#if !defined(_WIN32) && !defined(AMIGA)
sum = (long)sbrk(0);
#else
sum = 0;
#endif
if(sum > sbrk_max) sbrk_max = sum;
sbrk_used_sum += sum;
total_size_sum += (double)total_size;
#ifdef VERBOSE
printf("%8d %7lu\n", i, total_size);
#endif
next_i += I_AVERAGE;
}
}
/* Correct sbrk values. */
sbrk_max -= (long)base_ptr;
sbrk_used_sum -= (double)count*(long)base_ptr;
#ifdef VERBOSE
printf("# initial brk: %lx\n", (long)base_ptr);
printf("# max. sbrk()'ed memory: %ld bytes\n", sbrk_max);
printf("# avg. sbrk()'ed memory: %ld bytes\n",
(long)(sbrk_used_sum/count));
printf("# current size allocated: %ld bytes\n", total_size);
printf("# maximum size allocated: %ld bytes\n", total_size_max);
printf("# average size allocated: %.1f bytes\n", total_size_sum/count);
printf("# current heap waste: %.2f%%\n",
(1.0 - (double)total_size_max/sbrk_max)*100.0);
printf("# average heap waste: %.2f%%\n",
(1.0 - (double)total_size_sum/sbrk_used_sum)*100.0);
printf("# total sbrk calls performed: %d\n", sbins);
#else
printf("size=%7ld waste=%7.3f%%", size,
/* (1.0 - (double)total_size_max/sbrk_max)*100.0, */
(1.0 - (double)total_size_sum/sbrk_used_sum)*100.0);
print_times();
printf("\n");
#endif
return 0;
}
/*
* Local variables:
* tab-width:4
* compile-command: "gcc -Wall malloc-test.c -o malloc-test"
* End:
*/