Merge branch 'release/0.0'
[allocwithin] / allocwithin.c
diff --git a/allocwithin.c b/allocwithin.c
new file mode 100644 (file)
index 0000000..0533854
--- /dev/null
@@ -0,0 +1,917 @@
+/** allocwithin
+ *  This is a very, very basic allocator.
+ *  It burns a lot of space, and makes no effort at being clever nor efficient.
+ *  But it does allow one to use a segment of (it assumes shared) memory as a
+ *  pool for dynamic allocation, and allows that block to be moved around.
+ *  The heavy-handed signal masking is probably not the most elegant way of avoiding interprocess corruption.
+**/
+
+#include "allocwithin_int.h"
+
+#define NOTIFY_ERROR(...) do { fprintf(stderr, "ERROR "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
+#ifdef PRINT_DEBUG
+#define NOTIFY_DEBUG(...) do { fprintf(stderr, "DEBUG [%d] %s> ", __LINE__, __func__); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
+#else /* PRINT_DEBUG */
+#define NOTIFY_DEBUG(...) do { } while (0)
+#endif /* PRINT_DEBUG */
+
+static const char * const src_id_ = "$Id$";
+
+static pthread_once_t init_once_ = PTHREAD_ONCE_INIT;
+static int init_result_ = -1;
+static pthread_mutexattr_t allocw_mutexattr_;
+
+/**
+ *  private function prototypes
+**/
+static void allocw_init_once_(void);
+static int region_init_(allocw_region_t *region, size_t size);
+static int region_fini_(allocw_region_t *region);
+static int region_lock_(allocw_region_t *region, sigset_t *oset);
+static int region_unlock_(allocw_region_t *region, sigset_t *oset);
+static void region_freelist_insert_(allocw_region_t *region, struct allocw_block_ *b);
+static struct allocw_block_ *block_split_(allocw_region_t *region, struct allocw_block_ *b, size_t payload_size);
+static void block_unsplit_(allocw_region_t *region, struct allocw_block_ *b);
+static struct allocw_block_ *alloc_block_(allocw_region_t *region, struct allocw_block_ *block, size_t size);
+
+
+/** allocw_init_once_
+ *  called once to initialize static attrs
+**/
+static
+void allocw_init_once_(void)
+{
+    int r;
+
+    NOTIFY_DEBUG("initializing");
+
+    memset(&allocw_mutexattr_, 0, sizeof allocw_mutexattr_);
+
+    if ( (r = pthread_mutexattr_init(&allocw_mutexattr_)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_mutexattr_init", strerror(r));
+        init_result_ = r;
+        return;
+    }
+
+#if 0
+    /* darwin doesn't like this */
+    if ( (r = pthread_mutexattr_setpshared(&allocw_mutexattr_, PTHREAD_PROCESS_SHARED)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_mutexattr_setpshared", strerror(r));
+        init_result_ = r;
+        return;
+    }
+#endif
+
+    init_result_ = 0;
+}
+
+
+#ifdef PRINT_DEBUG
+/** dump_region
+ *  debug function, display a region's basic details
+**/
+static
+inline
+void dump_region(char *label, allocw_region_t *region)
+{
+    NOTIFY_DEBUG("%s:%p", label, region);
+    NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "size", region->size);
+    NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "free_start", region->free_start);
+    NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "free_end", region->free_end);
+    NOTIFY_DEBUG("\t%s->%s:0x%x", label, "num_alloced", region->num_alloced);
+}
+/** dump_block
+ *  debug function, display a block's basic details
+**/
+static
+inline
+void dump_block(char *label, struct allocw_block_ *block)
+{
+    NOTIFY_DEBUG("%s:%p", label, block);
+    NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "size", block->size);
+    NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "prev_off", block->prev_off);
+    NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "next_off", block->next_off);
+}
+#endif /* PRINT_DEBUG */
+
+
+/** region_init_
+ *  Initializes a region's header fields.
+**/
+static
+int region_init_(allocw_region_t *region, size_t size)
+{
+    struct allocw_block_ *b;
+    int r;
+
+    region->size = size;
+    memset(&region->freelist_mutex, 0, sizeof region->freelist_mutex);
+    if ( (r = pthread_mutex_init(&region->freelist_mutex, &allocw_mutexattr_)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_mutex_init", strerror(r));
+        memset(region, 0, sizeof *region);
+        return r;
+    }
+
+#ifdef PRINT_DEBUG
+    dump_region("region", region);
+#endif /* PRINT_DEBUG */
+
+    /* initialize the freelist */
+    b = (struct allocw_block_ *)region->data_;
+    b->next_off = 0;
+    b->prev_off = 0;
+    b->size = region->size - sizeof *region;
+    region->free_start = region->free_end = BLOCK_OFF(region, b);
+    region->num_alloced = 0;
+
+#ifdef PRINT_DEBUG
+    dump_block("first_free_block", b);
+#endif /* PRINT_DEBUG */
+
+    return 0;
+}
+
+/** allocw_region_init
+ *  Prepares a segment of memory for use as an allocation region.
+ *  @note Total usable size will be #size minus per-allocation overhead;
+ *  
+ *  @param[in] region Pointer to the block of memory to initialize.
+ *  @param[in] size Total size in bytes of memory region to initialize; usable size will be minus region header size and per-allocation overhead.
+ *  @return Success or failure.
+**/
+int allocw_region_init(allocw_region_t *region, size_t size)
+{
+    struct allocw_block_ *b;
+    int r;
+
+    if (region == NULL)
+    {
+        NOTIFY_ERROR("null argument");
+        return EINVAL;
+    }
+    if (size < sizeof *region + sizeof *b )
+    {
+        NOTIFY_ERROR("headers require 0x%zx size, 0x%zx requested", sizeof *region + sizeof *b, size);
+        return ENOMEM;
+    }
+
+    if ( (r = pthread_once(&init_once_, allocw_init_once_)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_once", strerror(r));
+        return r;
+    }
+    if (init_result_)
+    {
+        NOTIFY_ERROR("%s:%d", "allocw_init_once_", init_result_);
+        memset(region, 0, sizeof *region);
+        return init_result_;
+    }
+
+    NOTIFY_DEBUG("sizeof allocw_region_t:0x%zx", sizeof *region);
+    NOTIFY_DEBUG("sizeof allocw_block_:0x%zx", sizeof *b);
+
+    if ( (r = region_init_(region, size)) )
+    {
+        NOTIFY_DEBUG("%s:%s", "allocw_region_init_", "failed");
+        return r;
+    }
+
+    return 0;
+}
+
+
+/** region_fini_
+ *  
+**/
+static
+inline
+int region_fini_(allocw_region_t *region)
+{
+    int r;
+
+    if ( (r = pthread_mutex_destroy(&region->freelist_mutex)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_mutex_destroy", strerror(r));
+        return r;
+    }
+
+    memset(region, 0, sizeof *region);
+
+    return 0;
+}
+
+/** allocw_region_fini
+ *  release a region
+ *
+ *  @param[in] region
+ *  @return Success or failure.
+**/
+int allocw_region_fini(allocw_region_t *region)
+{
+    if (region == NULL)
+    {
+        NOTIFY_ERROR("null argument");
+        return EINVAL;
+    }
+
+    NOTIFY_DEBUG("destroying region %p", region);
+
+    return region_fini_(region);
+}
+
+
+/** region_lock_
+ *  Acquires the lock on a region and disables signal processing.
+ *
+ *  @param[in] region which region to lock
+ *  @param[out] set the signal mask which was cleared
+ *  @return Success or failure.
+**/
+static
+inline
+int region_lock_(allocw_region_t *region, sigset_t *oset)
+{
+    sigset_t set;
+    int r;
+
+    if (oset)
+        sigfillset(&set);
+
+    if ( (r = pthread_mutex_lock(&region->freelist_mutex)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_mutex_lock", strerror(r));
+        errno = r;
+        return -1;
+    }
+
+    if ( oset && (r = pthread_sigmask(SIG_SETMASK, &set, oset)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_sigmask", strerror(r));
+        errno = r;
+        return -1;
+    }
+
+    NOTIFY_DEBUG("region %p %sLOCKED", region, "");
+
+    return 0;
+}
+
+
+/** region_unlock_
+ *  Releases the lock on a region and restores signal processing.
+ *  
+ *  @param[in] region which region to lock
+ *  @param[in] oset the signal mask to restore
+ *  @return Success or failure.
+**/
+static
+inline
+int region_unlock_(allocw_region_t *region, sigset_t *oset)
+{
+    int retval = 0;
+    int r;
+
+    if ( oset && (r = pthread_sigmask(SIG_SETMASK, oset, NULL)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_sigmask", strerror(r));
+        errno = r;
+        retval = -1;
+    }
+
+    if ( (r = pthread_mutex_unlock(&region->freelist_mutex)) )
+    {
+        NOTIFY_ERROR("%s:%s", "pthread_mutex_unlock", strerror(r));
+        errno = r;
+        retval = -1;
+    }
+
+    NOTIFY_DEBUG("region %p %sLOCKED", region, "UN");
+
+    return retval;
+}
+
+
+/** block_split_
+ *  possibly partitions a free block, returns pointer to the size-d block
+ *  block must be equal to or greater than split size
+ *  
+ *  @param[in] region region
+ *  @param[in] b block the block to split
+ *  @param[in] payload_size size of the desired block's user data
+ *  @return pointer to the newly-pared block of at least #size payload-bytes
+**/
+static
+inline
+struct allocw_block_ *block_split_(allocw_region_t *region, struct allocw_block_ *b, size_t payload_size)
+{
+    const size_t new_block_size = payload_size + sizeof *b;
+    struct allocw_block_ *newb;
+
+    /* to split a block, we need room to spare for at least the header of next block */
+    if (b->size < new_block_size + sizeof *b)
+    {
+        NOTIFY_DEBUG("not enough room to split 0x%zx bytes off from 0x%zx byte block", new_block_size, b->size);
+        /* we'll just overcommit by the extra bytes */
+        return b;
+    }
+
+    NOTIFY_DEBUG("splitting %p (size:0x%zx) to fit requested 0x%zx (total 0x%zx)", b, b->size, payload_size, new_block_size + sizeof *b);
+
+    newb = (struct allocw_block_ *)((char *)b + new_block_size);
+    newb->size = b->size - (new_block_size);
+    b->size = new_block_size;
+    newb->prev_off = BLOCK_OFF(region, b);
+    newb->next_off = b->next_off;
+    if (newb->next_off)
+        BLOCK_PTR(region, newb->next_off)->prev_off = BLOCK_OFF(region, newb);
+    else
+        region->free_end = BLOCK_OFF(region, newb);
+
+    b->size = new_block_size;
+    b->next_off = BLOCK_OFF(region, newb);
+
+#ifdef PRINT_DEBUG
+    dump_block("newb", newb);
+    dump_block("b", b);
+#endif /* PRINT_DEBUG */
+
+    return b;
+}
+
+
+/** block_unsplit_
+ *  attempt to merge a free block with its next
+**/
+static
+inline
+void block_unsplit_(allocw_region_t *region, struct allocw_block_ *b)
+{
+    if (b->next_off)
+    {
+        struct allocw_block_ *n = BLOCK_PTR(region, b->next_off);
+
+        if (BLOCK_OFF(region, b) + b->size == b->next_off)
+        {
+            NOTIFY_DEBUG("merging id %zu (%p) size:%zu with id %zu (%p) size:%zu",
+                         BLOCK_OFF(region, b), b, b->size,
+                         BLOCK_OFF(region, n), n, n->size);
+            b->size += n->size;
+            b->next_off = n->next_off;
+            if (n->next_off)
+                BLOCK_PTR(region, n->next_off)->prev_off = BLOCK_OFF(region, b);
+            else
+                region->free_end = BLOCK_OFF(region, b);
+            NOTIFY_DEBUG("new block id %zu (%p) size:%zu",
+                         BLOCK_OFF(region, b), b, b->size);
+        }
+    }
+}
+
+
+/** region_freelist_insert
+ *  Returns a block to the freelist, merges adjacent blocks.
+ *
+ *  @param[in] region region
+ *  @param[in] b block
+**/
+static
+inline
+void region_freelist_insert_(allocw_region_t *region, struct allocw_block_ *b)
+{
+    struct allocw_block_ *x;
+
+    NOTIFY_DEBUG("insert of block %zu (%p) size:%zu", BLOCK_OFF(region, b), b, b->size);
+
+    /* empty freelist? easy. */
+    if (region->free_start == 0)
+    {
+        NOTIFY_DEBUG("inserting into empty freelist");
+        b->prev_off = 0;
+        b->next_off = 0;
+        region->free_start = BLOCK_OFF(region, b);
+        region->free_end = BLOCK_OFF(region, b);
+        return;
+    }
+
+    /* find the block to insert in front of */
+    x = BLOCK_PTR(region, region->free_start);
+    while ( BLOCK_OFF(region, x)
+    &&      BLOCK_OFF(region, x) < BLOCK_OFF(region, b) )
+    {
+        x = BLOCK_PTR(region, x->next_off);
+    }
+
+    if (BLOCK_OFF(region, x))
+    {
+        NOTIFY_DEBUG("inserting before block %zu (%p) size:%zu prev:%zu next:%zu", BLOCK_OFF(region, x), x, x->size, x->prev_off, x->next_off);
+        b->next_off = BLOCK_OFF(region, x);
+        b->prev_off = x->prev_off;
+        if (BLOCK_PTR(region, x->prev_off))
+            BLOCK_PTR(region, b->prev_off)->next_off = BLOCK_OFF(region, b);
+        else
+            region->free_start = BLOCK_OFF(region, b);
+        if (BLOCK_PTR(region, b->next_off))
+            BLOCK_PTR(region, b->next_off)->prev_off = BLOCK_OFF(region, b);
+        else
+            region->free_end = BLOCK_OFF(region, b);
+    }
+    else /* otherwise, b's offset is bigger than everything else, so tack it onto the end */
+    {
+        NOTIFY_DEBUG("inserting at end of freelist, after %zu (%p)", region->free_end, BLOCK_PTR(region, region->free_end));
+        b->next_off = 0;
+        b->prev_off = region->free_end;
+        BLOCK_PTR(region, region->free_end)->next_off = BLOCK_OFF(region, b);
+        region->free_end = BLOCK_OFF(region, b);
+    }
+
+    block_unsplit_(region, b);
+    if (b->prev_off)
+    {
+        x = BLOCK_PTR(region, b->prev_off);
+        block_unsplit_(region, x);
+    }
+}
+
+
+/** alloc_block_
+ *  Rounds #size up to an allocatable chunk, carves off enough storage for it from free-block #block, and removes it from the freelist.
+ *  #block must be big enough.
+**/
+static
+inline
+struct allocw_block_ *alloc_block_(allocw_region_t *region, struct allocw_block_ *block, size_t size)
+{
+    const size_t granularity = sizeof (struct allocw_block_);
+    const size_t size_adj = (size + granularity - 1) & ~(granularity - 1);
+
+    block = block_split_(region, block, size_adj);
+
+    if (block->prev_off)
+        BLOCK_PTR(region, block->prev_off)->next_off = block->next_off;
+    else
+        region->free_start = block->next_off;
+    if (block->next_off)
+        BLOCK_PTR(region, block->next_off)->prev_off = block->prev_off;
+    else
+        region->free_end = block->prev_off;
+    block->next_off = block->prev_off = 0;
+
+    return block;
+}
+
+/** allocw_realloc
+ *  Resize a previously allocated id.
+ *  Implementation idiosyncrasies: if size is zero, returns null after freeing id.
+ *
+ *  @param[in] region
+ *  @param[in] id
+ *  @param[in] size
+ *  @return new id
+**/
+allocw_id_t allocw_realloc(allocw_region_t *region, allocw_id_t id, size_t size)
+{
+    allocw_id_t retval = 0;
+    sigset_t set;
+    struct allocw_block_ *b,
+                         *b_id,
+                         *b_want,
+                         *best_fit = NULL;
+
+    if (region == NULL)
+    {
+        NOTIFY_ERROR("null argument");
+        return 0;
+    };
+
+    /* route simple requests away */
+
+    if (id == 0)
+        return allocw_malloc(region, size);
+
+    if (size == 0)
+    {
+        allocw_free(region, id);
+        return 0;
+    }
+
+    if (size <= BLOCK_PTR(region, id)->size)
+    {
+        NOTIFY_DEBUG("ignoring realloc to smaller block");
+        return id;
+    }
+
+    if (region_lock_(region, &set))
+        return 0;
+
+    /*
+        Locate the smallest region we can fit into, EXCEPT favor a freeblock
+        of any adequate size immediately following the original block, to
+        avoid copying.
+    */
+    b_id = BLOCK_PTR(region, id);
+    b_want = BLOCK_PTR(region, id + b_id->size);
+    for ( b = BLOCK_PTR(region, region->free_start);
+          (void *)b != (void *)region;
+          b = BLOCK_PTR(region, b->next_off) )
+    {
+        if (b->size == size || b->size >= size + sizeof *b)
+        {
+            if (best_fit == NULL
+            ||  b->size < best_fit->size)
+            {
+                best_fit = b;
+                if (best_fit == b_want)
+                    break;
+            }
+        }
+    }
+
+    if (best_fit == NULL)
+    {
+        errno = ENOMEM;
+        return 0;
+    }
+
+    if (best_fit == b_want)
+    {
+        size_t size_diff = size - b_id->size;
+
+        NOTIFY_DEBUG("growing %p (size:0x%zx) by size_diff:0x%zx into freeblock %p (size:0x%zx)",
+                     b_id, b_id->size,
+                     size_diff,
+                     best_fit, best_fit->size);
+
+        best_fit = alloc_block_(region, best_fit, size_diff);
+        b_id->size += best_fit->size;
+        b_id->next_off = best_fit->next_off;
+        if (b_id->next_off)
+            BLOCK_PTR(region, best_fit->next_off)->prev_off = BLOCK_OFF(region, b_id);
+        else
+            region->free_end = id;
+
+        retval = id;
+    }
+    else
+    {
+        size_t min_size = (b_id->size < size) ? b_id->size : size;
+
+        NOTIFY_DEBUG("reallocating %p (size:0x%zx) to %p (size:0x%zx)",
+                     b_id, b_id->size,
+                     best_fit, best_fit->size);
+
+        best_fit = alloc_block_(region, best_fit, size);
+        retval = BLOCK_OFF(region, best_fit);
+        memcpy(best_fit->data_, b_id->data_, min_size);
+        region_freelist_insert_(region, b_id);
+    }
+
+    if (region_unlock_(region, &set))
+        return retval;
+
+    return retval;
+}
+
+/** allocw_free
+ *  Free a previously allocated memory id in a region.
+ *
+ *  @param[in] region region
+ *  @param[in] id id of memory in region to free
+**/
+void allocw_free(allocw_region_t *region, allocw_id_t id)
+{
+    sigset_t set;
+    struct allocw_block_ *b;
+
+    if (region == NULL)
+    {
+        NOTIFY_ERROR("null argument");
+        errno = EINVAL;
+        return;
+    }
+
+    if (id == 0)
+    {
+        NOTIFY_ERROR("attempted free of null id");
+        return;
+    }
+
+    b = BLOCK_PTR(region, id);
+
+    NOTIFY_DEBUG("want to free block %p size:0x%zx", b, b->size);
+
+    if (region_lock_(region, &set))
+        return;
+
+    region_freelist_insert_(region, b);
+
+    region->num_alloced--;
+
+    if (region_unlock_(region, &set))
+        return;
+}
+
+
+/** allocw_malloc
+ *  Allocates a block of memory from the region.
+ *  
+ *  @param[in] region region
+ *  @param[in] size size in bytes to allocate
+ *  @return id of newly allocated memory
+**/
+allocw_id_t allocw_malloc(allocw_region_t *region, size_t size)
+{
+    const size_t granularity = sizeof (struct allocw_block_);
+    const size_t size_adj = (size + granularity - 1) & ~(granularity - 1);
+    sigset_t set;
+    struct allocw_block_ *b;
+    allocw_id_t ret_id = 0;
+
+    if (region == NULL)
+    {
+        NOTIFY_ERROR("null argument");
+        errno = EINVAL;
+        return 0;
+    }
+
+    if (region_lock_(region, &set))
+        return 0;
+
+    for (b = BLOCK_PTR(region, region->free_start);
+         (void *)b != (void *)region;
+         b = BLOCK_PTR(region, b->next_off) )
+    {
+        NOTIFY_DEBUG("checking free block:%p size:0x%zu", b, b->size);
+        if (b->size >= size_adj)
+            break;
+    }
+
+    if ((void *)b == (void *)region)
+    {
+        NOTIFY_DEBUG("no free block with enough room");
+        if (region_unlock_(region, &set))
+            return 0;
+        errno = EINVAL;
+        return 0;
+    }
+
+    b = alloc_block_(region, b, size);
+    ret_id = BLOCK_OFF(region, b);
+
+    region->num_alloced++;
+
+    if (region_unlock_(region, &set))
+        return ret_id;
+
+    return ret_id;
+}
+
+
+/** allocw_ptr
+ *  Converts a (region,id) to a normal pointer.
+ *
+ *  @param[in] region region
+ *  @param[in] id id
+ *  @return pointer
+**/
+inline
+void *allocw_ptr(allocw_region_t *region, allocw_id_t id)
+{
+    if (region == NULL || id == 0)
+        return NULL;
+
+    return &(BLOCK_PTR(region, id)->data_);
+}
+
+
+/** allocw_region_migrate
+ *  initialize a new region with an existing region's data
+ *  
+ *  @param[in] dst_region
+ *  @param[in] dst_size
+ *  @param[in] src_region
+ *  @return success or failure
+**/
+int allocw_region_migrate(allocw_region_t *dst_region, size_t dst_size, allocw_region_t *src_region)
+{
+    sigset_t set;
+    int retval = 0;
+    int r;
+
+    if (dst_region == NULL || src_region == NULL)
+    {
+        NOTIFY_ERROR("null argument");
+        return EINVAL;
+    }
+
+    NOTIFY_DEBUG("migrating region (%p size:%zu) to (%p size:%zu)",
+                 src_region, src_region->size,
+                 dst_region, dst_size);
+
+    if (dst_size < (src_region->size + sizeof (struct allocw_block_))
+    &&  dst_size != src_region->size )
+    {
+        if (src_region->free_end + BLOCK_PTR(src_region, src_region->free_end)->size == src_region->size)
+        {
+            NOTIFY_DEBUG("src_region is shrinkable by 0x%zx bytes",
+                         BLOCK_PTR(src_region, src_region->free_end)->size);
+            NOTIFY_DEBUG("but shrinking isn't implemented yet");
+        }
+        NOTIFY_ERROR("destination region too small");
+        return ENOMEM;
+    }
+
+    if ( (r = region_init_(dst_region, dst_size)) )
+    {
+        NOTIFY_DEBUG("%s:%s", "region_init_", "failed");
+        return r;
+    }
+
+    if (region_lock_(dst_region, &set))
+        return -1;
+
+    if (region_lock_(src_region, NULL))
+    {
+        region_unlock_(dst_region, &set);
+        return -1;
+    }
+
+    memmove(&(dst_region->data_), &(src_region->data_), src_region->size);
+    dst_region->free_start = src_region->free_start;
+    dst_region->free_end = src_region->free_end;
+    dst_region->num_alloced = src_region->num_alloced;
+
+    if (dst_size > src_region->size)
+    {
+        struct allocw_block_ *new_free_block = BLOCK_PTR(dst_region, src_region->size);
+        new_free_block->size = dst_region->size - src_region->size;
+        region_freelist_insert_(dst_region, new_free_block);
+    }
+
+    if (region_unlock_(src_region, NULL))
+        retval = -1;
+
+    if (region_unlock_(dst_region, &set))
+        retval = -1;
+
+    return retval;
+}
+
+
+#ifdef TEST
+/** freelist_stats
+ *  debugging function - info on freelist
+**/
+static
+void freelist_stats(allocw_region_t *region)
+{
+    struct allocw_block_ *b,
+                         *b_largest = NULL,
+                         *b_smallest = NULL;
+
+    for ( b = BLOCK_PTR(region, region->free_start);
+          (void *)b != (void *)region;
+          b = BLOCK_PTR(region, b->next_off) )
+    {
+        if (b_smallest == NULL
+        ||  b->size < b_smallest->size)
+            b_smallest = b;
+
+        if (b_largest == NULL
+        ||  b->size > b_largest->size)
+            b_largest = b;
+    }
+
+    if (b_smallest)
+        fprintf(stderr, "\t%s block: %p (size:0x%zx)\n", "smallest", b_smallest, b_smallest->size);
+    if (b_largest)
+        fprintf(stderr, "\t%s block: %p (size:0x%zx)\n", "largest", b_largest, b_largest->size);
+
+    b = BLOCK_PTR(region, region->free_end);
+    if ((void *)b != (void *)region)
+        fprintf(stderr, "\t%s block: %p (size:0x%zx)\n",
+                (BLOCK_OFF(region, b) + b->size == region->size) ? "wilderness" : "last",
+                b, b->size);
+}
+
+/** freelist_walk
+ *  debugging function - iterates over a region's freelist
+**/
+static
+void freelist_walk(allocw_region_t *region)
+{
+    sigset_t set;
+    struct allocw_block_ *block;
+
+    if (region_lock_(region, &set))
+        return;
+
+#ifdef PRINT_DEBUG
+    dump_region("region", region);
+#endif /* PRINT_DEBUG */
+
+    for ( block = BLOCK_PTR(region, region->free_start);
+          (void *)block != (void *)region;
+          block = BLOCK_PTR(region, block->next_off)
+        )
+    {
+#ifdef PRINT_DEBUG
+    dump_block("block", block);
+#endif /* PRINT_DEBUG */
+    }
+
+    if (region_unlock_(region, &set))
+        return;
+}
+
+
+/** main
+ *  for testing
+**/
+int main(int argc UNUSED, char **argv UNUSED)
+{
+    int r;
+    allocw_region_t *region, *region_two;
+    size_t region_sz;
+    allocw_id_t ptr[10];
+    size_t i;
+
+    region_sz = 1024;
+    region = malloc(region_sz);
+    region_two = malloc(region_sz * 2);
+    if (region == NULL
+    ||  region_two == NULL)
+    {
+        NOTIFY_ERROR("%s:%s", "malloc", strerror(errno));
+        exit(EXIT_FAILURE);
+    }
+
+    if ( (r = allocw_region_init(region, region_sz)) )
+    {
+        NOTIFY_ERROR("%s:%d", "allocw_region_init", r);
+        exit(EXIT_FAILURE);
+    }
+
+    freelist_walk(region);
+
+    for (i = 0; i < (sizeof ptr / sizeof *ptr); i++)
+    {
+        ptr[i] = allocw_malloc(region, 20);
+        if (ptr == NULL)
+        {
+            NOTIFY_ERROR("%s:%s(%d)", "allocw_malloc", strerror(errno), errno);
+            exit(EXIT_FAILURE);
+        }
+        fprintf(stderr, "allocated id:%zu (%p)\n", ptr[i], allocw_ptr(region, ptr[i]));
+    }
+
+    fprintf(stderr, "dumping pre-migration freelist...\n");
+    freelist_walk(region);
+    freelist_stats(region);
+
+    r = allocw_region_migrate(region_two, region_sz * 2, region);
+    if (r)
+    {
+        NOTIFY_ERROR("%s:%s(%d)", "allocw_region_migrate", strerror(r), r);
+        exit(EXIT_FAILURE);
+    }
+
+    r = allocw_region_fini(region);
+    if (r)
+    {
+        NOTIFY_ERROR("%s:%s(%d)", "allocw_region_fini", strerror(r), r);
+        exit(EXIT_FAILURE);
+    }
+
+    fprintf(stderr, "dumping post-migration freelist...\n");
+    freelist_walk(region_two);
+    freelist_stats(region_two);
+
+    for (i = 0; i < (sizeof ptr / sizeof *ptr); i++)
+    {
+        allocw_free(region_two, ptr[i]);
+        fprintf(stderr, "freed id:%zu (%p)\n", ptr[i], allocw_ptr(region_two, ptr[i]));
+    }
+
+    freelist_walk(region_two);
+    freelist_stats(region_two);
+
+    region = region_two;
+
+    ptr[0] = allocw_realloc(region, 0, 100);
+    fprintf(stderr, "\t%zu\n", ptr[0]);
+    ptr[1] = allocw_realloc(region, ptr[0], 200);
+    fprintf(stderr, "\t%zu\n", ptr[1]);
+    ptr[2] = allocw_realloc(region, ptr[1], 300);
+    fprintf(stderr, "\t%zu\n", ptr[2]);
+    ptr[3] = allocw_realloc(region, ptr[2], 0);
+    fprintf(stderr, "\t%zu\n", ptr[3]);
+
+    freelist_walk(region);
+    freelist_stats(region);
+
+    exit(EXIT_SUCCESS);
+}
+#endif /* TEST */