2 * This is a very, very basic allocator.
3 * It burns a lot of space, and makes no effort at being clever nor efficient.
4 * But it does allow one to use a segment of (it assumes shared) memory as a
5 * pool for dynamic allocation, and allows that block to be moved around.
6 * The heavy-handed signal masking is probably not the most elegant way of avoiding interprocess corruption.
9 #include "allocwithin_int.h"
11 #define NOTIFY_ERROR(...) do { fprintf(stderr, "ERROR "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
13 #define NOTIFY_DEBUG(...) do { fprintf(stderr, "DEBUG [%d] %s> ", __LINE__, __func__); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
14 #else /* PRINT_DEBUG */
15 #define NOTIFY_DEBUG(...) do { } while (0)
16 #endif /* PRINT_DEBUG */
18 static const char * const src_id_
= "$Id$";
20 static pthread_once_t init_once_
= PTHREAD_ONCE_INIT
;
21 static int init_result_
= -1;
22 static pthread_mutexattr_t allocw_mutexattr_
;
25 * private function prototypes
27 static void allocw_init_once_(void);
28 static int region_init_(allocw_region_t
*region
, size_t size
);
29 static int region_fini_(allocw_region_t
*region
);
30 static int region_lock_(allocw_region_t
*region
, sigset_t
*oset
);
31 static int region_unlock_(allocw_region_t
*region
, sigset_t
*oset
);
32 static void region_freelist_insert_(allocw_region_t
*region
, struct allocw_block_
*b
);
33 static struct allocw_block_
*block_split_(allocw_region_t
*region
, struct allocw_block_
*b
, size_t payload_size
);
34 static void block_unsplit_(allocw_region_t
*region
, struct allocw_block_
*b
);
35 static struct allocw_block_
*alloc_block_(allocw_region_t
*region
, struct allocw_block_
*block
, size_t size
);
39 * called once to initialize static attrs
42 void allocw_init_once_(void)
46 NOTIFY_DEBUG("initializing");
48 memset(&allocw_mutexattr_
, 0, sizeof allocw_mutexattr_
);
50 if ( (r
= pthread_mutexattr_init(&allocw_mutexattr_
)) )
52 NOTIFY_ERROR("%s:%s", "pthread_mutexattr_init", strerror(r
));
58 /* darwin doesn't like this */
59 if ( (r
= pthread_mutexattr_setpshared(&allocw_mutexattr_
, PTHREAD_PROCESS_SHARED
)) )
61 NOTIFY_ERROR("%s:%s", "pthread_mutexattr_setpshared", strerror(r
));
73 * debug function, display a region's basic details
77 void dump_region(char *label
, allocw_region_t
*region
)
79 NOTIFY_DEBUG("%s:%p", label
, region
);
80 NOTIFY_DEBUG("\t%s->%s:0x%zx", label
, "size", region
->size
);
81 NOTIFY_DEBUG("\t%s->%s:0x%zx", label
, "free_start", region
->free_start
);
82 NOTIFY_DEBUG("\t%s->%s:0x%zx", label
, "free_end", region
->free_end
);
83 NOTIFY_DEBUG("\t%s->%s:0x%x", label
, "num_alloced", region
->num_alloced
);
86 * debug function, display a block's basic details
90 void dump_block(char *label
, struct allocw_block_
*block
)
92 NOTIFY_DEBUG("%s:%p", label
, block
);
93 NOTIFY_DEBUG("\t%s->%s:0x%zx", label
, "size", block
->size
);
94 NOTIFY_DEBUG("\t%s->%s:0x%zx", label
, "prev_off", block
->prev_off
);
95 NOTIFY_DEBUG("\t%s->%s:0x%zx", label
, "next_off", block
->next_off
);
97 #endif /* PRINT_DEBUG */
101 * Initializes a region's header fields.
104 int region_init_(allocw_region_t
*region
, size_t size
)
106 struct allocw_block_
*b
;
110 memset(®ion
->freelist_mutex
, 0, sizeof region
->freelist_mutex
);
111 if ( (r
= pthread_mutex_init(®ion
->freelist_mutex
, &allocw_mutexattr_
)) )
113 NOTIFY_ERROR("%s:%s", "pthread_mutex_init", strerror(r
));
114 memset(region
, 0, sizeof *region
);
119 dump_region("region", region
);
120 #endif /* PRINT_DEBUG */
122 /* initialize the freelist */
123 b
= (struct allocw_block_
*)region
->data_
;
126 b
->size
= region
->size
- sizeof *region
;
127 region
->free_start
= region
->free_end
= BLOCK_OFF(region
, b
);
128 region
->num_alloced
= 0;
131 dump_block("first_free_block", b
);
132 #endif /* PRINT_DEBUG */
137 /** allocw_region_init
138 * Prepares a segment of memory for use as an allocation region.
139 * @note Total usable size will be #size minus per-allocation overhead;
141 * @param[in] region Pointer to the block of memory to initialize.
142 * @param[in] size Total size in bytes of memory region to initialize; usable size will be minus region header size and per-allocation overhead.
143 * @return Success or failure.
145 int allocw_region_init(allocw_region_t
*region
, size_t size
)
147 struct allocw_block_
*b
;
152 NOTIFY_ERROR("null argument");
155 if (size
< sizeof *region
+ sizeof *b
)
157 NOTIFY_ERROR("headers require 0x%zx size, 0x%zx requested", sizeof *region
+ sizeof *b
, size
);
161 if ( (r
= pthread_once(&init_once_
, allocw_init_once_
)) )
163 NOTIFY_ERROR("%s:%s", "pthread_once", strerror(r
));
168 NOTIFY_ERROR("%s:%d", "allocw_init_once_", init_result_
);
169 memset(region
, 0, sizeof *region
);
173 NOTIFY_DEBUG("sizeof allocw_region_t:0x%zx", sizeof *region
);
174 NOTIFY_DEBUG("sizeof allocw_block_:0x%zx", sizeof *b
);
176 if ( (r
= region_init_(region
, size
)) )
178 NOTIFY_DEBUG("%s:%s", "allocw_region_init_", "failed");
191 int region_fini_(allocw_region_t
*region
)
195 if ( (r
= pthread_mutex_destroy(®ion
->freelist_mutex
)) )
197 NOTIFY_ERROR("%s:%s", "pthread_mutex_destroy", strerror(r
));
201 memset(region
, 0, sizeof *region
);
206 /** allocw_region_fini
210 * @return Success or failure.
212 int allocw_region_fini(allocw_region_t
*region
)
216 NOTIFY_ERROR("null argument");
220 NOTIFY_DEBUG("destroying region %p", region
);
222 return region_fini_(region
);
227 * Acquires the lock on a region and disables signal processing.
229 * @param[in] region which region to lock
230 * @param[out] set the signal mask which was cleared
231 * @return Success or failure.
235 int region_lock_(allocw_region_t
*region
, sigset_t
*oset
)
243 if ( (r
= pthread_mutex_lock(®ion
->freelist_mutex
)) )
245 NOTIFY_ERROR("%s:%s", "pthread_mutex_lock", strerror(r
));
250 if ( oset
&& (r
= pthread_sigmask(SIG_SETMASK
, &set
, oset
)) )
252 NOTIFY_ERROR("%s:%s", "pthread_sigmask", strerror(r
));
257 NOTIFY_DEBUG("region %p %sLOCKED", region
, "");
264 * Releases the lock on a region and restores signal processing.
266 * @param[in] region which region to lock
267 * @param[in] oset the signal mask to restore
268 * @return Success or failure.
272 int region_unlock_(allocw_region_t
*region
, sigset_t
*oset
)
277 if ( oset
&& (r
= pthread_sigmask(SIG_SETMASK
, oset
, NULL
)) )
279 NOTIFY_ERROR("%s:%s", "pthread_sigmask", strerror(r
));
284 if ( (r
= pthread_mutex_unlock(®ion
->freelist_mutex
)) )
286 NOTIFY_ERROR("%s:%s", "pthread_mutex_unlock", strerror(r
));
291 NOTIFY_DEBUG("region %p %sLOCKED", region
, "UN");
298 * possibly partitions a free block, returns pointer to the size-d block
299 * block must be equal to or greater than split size
301 * @param[in] region region
302 * @param[in] b block the block to split
303 * @param[in] payload_size size of the desired block's user data
304 * @return pointer to the newly-pared block of at least #size payload-bytes
308 struct allocw_block_
*block_split_(allocw_region_t
*region
, struct allocw_block_
*b
, size_t payload_size
)
310 const size_t new_block_size
= payload_size
+ sizeof *b
;
311 struct allocw_block_
*newb
;
313 /* to split a block, we need room to spare for at least the header of next block */
314 if (b
->size
< new_block_size
+ sizeof *b
)
316 NOTIFY_DEBUG("not enough room to split 0x%zx bytes off from 0x%zx byte block", new_block_size
, b
->size
);
317 /* we'll just overcommit by the extra bytes */
321 NOTIFY_DEBUG("splitting %p (size:0x%zx) to fit requested 0x%zx (total 0x%zx)", b
, b
->size
, payload_size
, new_block_size
+ sizeof *b
);
323 newb
= (struct allocw_block_
*)((char *)b
+ new_block_size
);
324 newb
->size
= b
->size
- (new_block_size
);
325 b
->size
= new_block_size
;
326 newb
->prev_off
= BLOCK_OFF(region
, b
);
327 newb
->next_off
= b
->next_off
;
329 BLOCK_PTR(region
, newb
->next_off
)->prev_off
= BLOCK_OFF(region
, newb
);
331 region
->free_end
= BLOCK_OFF(region
, newb
);
333 b
->size
= new_block_size
;
334 b
->next_off
= BLOCK_OFF(region
, newb
);
337 dump_block("newb", newb
);
339 #endif /* PRINT_DEBUG */
346 * attempt to merge a free block with its next
350 void block_unsplit_(allocw_region_t
*region
, struct allocw_block_
*b
)
354 struct allocw_block_
*n
= BLOCK_PTR(region
, b
->next_off
);
356 if (BLOCK_OFF(region
, b
) + b
->size
== b
->next_off
)
358 NOTIFY_DEBUG("merging id %zu (%p) size:%zu with id %zu (%p) size:%zu",
359 BLOCK_OFF(region
, b
), b
, b
->size
,
360 BLOCK_OFF(region
, n
), n
, n
->size
);
362 b
->next_off
= n
->next_off
;
364 BLOCK_PTR(region
, n
->next_off
)->prev_off
= BLOCK_OFF(region
, b
);
366 region
->free_end
= BLOCK_OFF(region
, b
);
367 NOTIFY_DEBUG("new block id %zu (%p) size:%zu",
368 BLOCK_OFF(region
, b
), b
, b
->size
);
374 /** region_freelist_insert
375 * Returns a block to the freelist, merges adjacent blocks.
377 * @param[in] region region
382 void region_freelist_insert_(allocw_region_t
*region
, struct allocw_block_
*b
)
384 struct allocw_block_
*x
;
386 NOTIFY_DEBUG("insert of block %zu (%p) size:%zu", BLOCK_OFF(region
, b
), b
, b
->size
);
388 /* empty freelist? easy. */
389 if (region
->free_start
== 0)
391 NOTIFY_DEBUG("inserting into empty freelist");
394 region
->free_start
= BLOCK_OFF(region
, b
);
395 region
->free_end
= BLOCK_OFF(region
, b
);
399 /* find the block to insert in front of */
400 x
= BLOCK_PTR(region
, region
->free_start
);
401 while ( BLOCK_OFF(region
, x
)
402 && BLOCK_OFF(region
, x
) < BLOCK_OFF(region
, b
) )
404 x
= BLOCK_PTR(region
, x
->next_off
);
407 if (BLOCK_OFF(region
, x
))
409 NOTIFY_DEBUG("inserting before block %zu (%p) size:%zu prev:%zu next:%zu", BLOCK_OFF(region
, x
), x
, x
->size
, x
->prev_off
, x
->next_off
);
410 b
->next_off
= BLOCK_OFF(region
, x
);
411 b
->prev_off
= x
->prev_off
;
412 if (BLOCK_PTR(region
, x
->prev_off
))
413 BLOCK_PTR(region
, b
->prev_off
)->next_off
= BLOCK_OFF(region
, b
);
415 region
->free_start
= BLOCK_OFF(region
, b
);
416 if (BLOCK_PTR(region
, b
->next_off
))
417 BLOCK_PTR(region
, b
->next_off
)->prev_off
= BLOCK_OFF(region
, b
);
419 region
->free_end
= BLOCK_OFF(region
, b
);
421 else /* otherwise, b's offset is bigger than everything else, so tack it onto the end */
423 NOTIFY_DEBUG("inserting at end of freelist, after %zu (%p)", region
->free_end
, BLOCK_PTR(region
, region
->free_end
));
425 b
->prev_off
= region
->free_end
;
426 BLOCK_PTR(region
, region
->free_end
)->next_off
= BLOCK_OFF(region
, b
);
427 region
->free_end
= BLOCK_OFF(region
, b
);
430 block_unsplit_(region
, b
);
433 x
= BLOCK_PTR(region
, b
->prev_off
);
434 block_unsplit_(region
, x
);
440 * Rounds #size up to an allocatable chunk, carves off enough storage for it from free-block #block, and removes it from the freelist.
441 * #block must be big enough.
445 struct allocw_block_
*alloc_block_(allocw_region_t
*region
, struct allocw_block_
*block
, size_t size
)
447 const size_t granularity
= sizeof (struct allocw_block_
);
448 const size_t size_adj
= (size
+ granularity
- 1) & ~(granularity
- 1);
450 block
= block_split_(region
, block
, size_adj
);
453 BLOCK_PTR(region
, block
->prev_off
)->next_off
= block
->next_off
;
455 region
->free_start
= block
->next_off
;
457 BLOCK_PTR(region
, block
->next_off
)->prev_off
= block
->prev_off
;
459 region
->free_end
= block
->prev_off
;
460 block
->next_off
= block
->prev_off
= 0;
466 * Resize a previously allocated id.
467 * Implementation idiosyncrasies: if size is zero, returns null after freeing id.
474 allocw_id_t
allocw_realloc(allocw_region_t
*region
, allocw_id_t id
, size_t size
)
476 allocw_id_t retval
= 0;
478 struct allocw_block_
*b
,
485 NOTIFY_ERROR("null argument");
489 /* route simple requests away */
492 return allocw_malloc(region
, size
);
496 allocw_free(region
, id
);
500 if (size
<= BLOCK_PTR(region
, id
)->size
)
502 NOTIFY_DEBUG("ignoring realloc to smaller block");
506 if (region_lock_(region
, &set
))
510 Locate the smallest region we can fit into, EXCEPT favor a freeblock
511 of any adequate size immediately following the original block, to
514 b_id
= BLOCK_PTR(region
, id
);
515 b_want
= BLOCK_PTR(region
, id
+ b_id
->size
);
516 for ( b
= BLOCK_PTR(region
, region
->free_start
);
517 (void *)b
!= (void *)region
;
518 b
= BLOCK_PTR(region
, b
->next_off
) )
520 if (b
->size
== size
|| b
->size
>= size
+ sizeof *b
)
523 || b
->size
< best_fit
->size
)
526 if (best_fit
== b_want
)
532 if (best_fit
== NULL
)
538 if (best_fit
== b_want
)
540 size_t size_diff
= size
- b_id
->size
;
542 NOTIFY_DEBUG("growing %p (size:0x%zx) by size_diff:0x%zx into freeblock %p (size:0x%zx)",
545 best_fit
, best_fit
->size
);
547 best_fit
= alloc_block_(region
, best_fit
, size_diff
);
548 b_id
->size
+= best_fit
->size
;
549 b_id
->next_off
= best_fit
->next_off
;
551 BLOCK_PTR(region
, best_fit
->next_off
)->prev_off
= BLOCK_OFF(region
, b_id
);
553 region
->free_end
= id
;
559 size_t min_size
= (b_id
->size
< size
) ? b_id
->size
: size
;
561 NOTIFY_DEBUG("reallocating %p (size:0x%zx) to %p (size:0x%zx)",
563 best_fit
, best_fit
->size
);
565 best_fit
= alloc_block_(region
, best_fit
, size
);
566 retval
= BLOCK_OFF(region
, best_fit
);
567 memcpy(best_fit
->data_
, b_id
->data_
, min_size
);
568 region_freelist_insert_(region
, b_id
);
571 if (region_unlock_(region
, &set
))
578 * Free a previously allocated memory id in a region.
580 * @param[in] region region
581 * @param[in] id id of memory in region to free
583 void allocw_free(allocw_region_t
*region
, allocw_id_t id
)
586 struct allocw_block_
*b
;
590 NOTIFY_ERROR("null argument");
597 NOTIFY_ERROR("attempted free of null id");
601 b
= BLOCK_PTR(region
, id
);
603 NOTIFY_DEBUG("want to free block %p size:0x%zx", b
, b
->size
);
605 if (region_lock_(region
, &set
))
608 region_freelist_insert_(region
, b
);
610 region
->num_alloced
--;
612 if (region_unlock_(region
, &set
))
618 * Allocates a block of memory from the region.
620 * @param[in] region region
621 * @param[in] size size in bytes to allocate
622 * @return id of newly allocated memory
624 allocw_id_t
allocw_malloc(allocw_region_t
*region
, size_t size
)
626 const size_t granularity
= sizeof (struct allocw_block_
);
627 const size_t size_adj
= (size
+ granularity
- 1) & ~(granularity
- 1);
629 struct allocw_block_
*b
;
630 allocw_id_t ret_id
= 0;
634 NOTIFY_ERROR("null argument");
639 if (region_lock_(region
, &set
))
642 for (b
= BLOCK_PTR(region
, region
->free_start
);
643 (void *)b
!= (void *)region
;
644 b
= BLOCK_PTR(region
, b
->next_off
) )
646 NOTIFY_DEBUG("checking free block:%p size:0x%zu", b
, b
->size
);
647 if (b
->size
>= size_adj
)
651 if ((void *)b
== (void *)region
)
653 NOTIFY_DEBUG("no free block with enough room");
654 if (region_unlock_(region
, &set
))
660 b
= alloc_block_(region
, b
, size
);
661 ret_id
= BLOCK_OFF(region
, b
);
663 region
->num_alloced
++;
665 if (region_unlock_(region
, &set
))
673 * Converts a (region,id) to a normal pointer.
675 * @param[in] region region
680 void *allocw_ptr(allocw_region_t
*region
, allocw_id_t id
)
682 if (region
== NULL
|| id
== 0)
685 return &(BLOCK_PTR(region
, id
)->data_
);
689 /** allocw_region_migrate
690 * initialize a new region with an existing region's data
692 * @param[in] dst_region
693 * @param[in] dst_size
694 * @param[in] src_region
695 * @return success or failure
697 int allocw_region_migrate(allocw_region_t
*dst_region
, size_t dst_size
, allocw_region_t
*src_region
)
703 if (dst_region
== NULL
|| src_region
== NULL
)
705 NOTIFY_ERROR("null argument");
709 NOTIFY_DEBUG("migrating region (%p size:%zu) to (%p size:%zu)",
710 src_region
, src_region
->size
,
711 dst_region
, dst_size
);
713 if (dst_size
< (src_region
->size
+ sizeof (struct allocw_block_
))
714 && dst_size
!= src_region
->size
)
716 if (src_region
->free_end
+ BLOCK_PTR(src_region
, src_region
->free_end
)->size
== src_region
->size
)
718 NOTIFY_DEBUG("src_region is shrinkable by 0x%zx bytes",
719 BLOCK_PTR(src_region
, src_region
->free_end
)->size
);
720 NOTIFY_DEBUG("but shrinking isn't implemented yet");
722 NOTIFY_ERROR("destination region too small");
726 if ( (r
= region_init_(dst_region
, dst_size
)) )
728 NOTIFY_DEBUG("%s:%s", "region_init_", "failed");
732 if (region_lock_(dst_region
, &set
))
735 if (region_lock_(src_region
, NULL
))
737 region_unlock_(dst_region
, &set
);
741 memmove(&(dst_region
->data_
), &(src_region
->data_
), src_region
->size
);
742 dst_region
->free_start
= src_region
->free_start
;
743 dst_region
->free_end
= src_region
->free_end
;
744 dst_region
->num_alloced
= src_region
->num_alloced
;
746 if (dst_size
> src_region
->size
)
748 struct allocw_block_
*new_free_block
= BLOCK_PTR(dst_region
, src_region
->size
);
749 new_free_block
->size
= dst_region
->size
- src_region
->size
;
750 region_freelist_insert_(dst_region
, new_free_block
);
753 if (region_unlock_(src_region
, NULL
))
756 if (region_unlock_(dst_region
, &set
))
765 * debugging function - info on freelist
768 void freelist_stats(allocw_region_t
*region
)
770 struct allocw_block_
*b
,
774 for ( b
= BLOCK_PTR(region
, region
->free_start
);
775 (void *)b
!= (void *)region
;
776 b
= BLOCK_PTR(region
, b
->next_off
) )
778 if (b_smallest
== NULL
779 || b
->size
< b_smallest
->size
)
782 if (b_largest
== NULL
783 || b
->size
> b_largest
->size
)
788 fprintf(stderr
, "\t%s block: %p (size:0x%zx)\n", "smallest", b_smallest
, b_smallest
->size
);
790 fprintf(stderr
, "\t%s block: %p (size:0x%zx)\n", "largest", b_largest
, b_largest
->size
);
792 b
= BLOCK_PTR(region
, region
->free_end
);
793 if ((void *)b
!= (void *)region
)
794 fprintf(stderr
, "\t%s block: %p (size:0x%zx)\n",
795 (BLOCK_OFF(region
, b
) + b
->size
== region
->size
) ? "wilderness" : "last",
800 * debugging function - iterates over a region's freelist
803 void freelist_walk(allocw_region_t
*region
)
806 struct allocw_block_
*block
;
808 if (region_lock_(region
, &set
))
812 dump_region("region", region
);
813 #endif /* PRINT_DEBUG */
815 for ( block
= BLOCK_PTR(region
, region
->free_start
);
816 (void *)block
!= (void *)region
;
817 block
= BLOCK_PTR(region
, block
->next_off
)
821 dump_block("block", block
);
822 #endif /* PRINT_DEBUG */
825 if (region_unlock_(region
, &set
))
833 int main(int argc UNUSED
, char **argv UNUSED
)
836 allocw_region_t
*region
, *region_two
;
842 region
= malloc(region_sz
);
843 region_two
= malloc(region_sz
* 2);
845 || region_two
== NULL
)
847 NOTIFY_ERROR("%s:%s", "malloc", strerror(errno
));
851 if ( (r
= allocw_region_init(region
, region_sz
)) )
853 NOTIFY_ERROR("%s:%d", "allocw_region_init", r
);
857 freelist_walk(region
);
859 for (i
= 0; i
< (sizeof ptr
/ sizeof *ptr
); i
++)
861 ptr
[i
] = allocw_malloc(region
, 20);
864 NOTIFY_ERROR("%s:%s(%d)", "allocw_malloc", strerror(errno
), errno
);
867 fprintf(stderr
, "allocated id:%zu (%p)\n", ptr
[i
], allocw_ptr(region
, ptr
[i
]));
870 fprintf(stderr
, "dumping pre-migration freelist...\n");
871 freelist_walk(region
);
872 freelist_stats(region
);
874 r
= allocw_region_migrate(region_two
, region_sz
* 2, region
);
877 NOTIFY_ERROR("%s:%s(%d)", "allocw_region_migrate", strerror(r
), r
);
881 r
= allocw_region_fini(region
);
884 NOTIFY_ERROR("%s:%s(%d)", "allocw_region_fini", strerror(r
), r
);
888 fprintf(stderr
, "dumping post-migration freelist...\n");
889 freelist_walk(region_two
);
890 freelist_stats(region_two
);
892 for (i
= 0; i
< (sizeof ptr
/ sizeof *ptr
); i
++)
894 allocw_free(region_two
, ptr
[i
]);
895 fprintf(stderr
, "freed id:%zu (%p)\n", ptr
[i
], allocw_ptr(region_two
, ptr
[i
]));
898 freelist_walk(region_two
);
899 freelist_stats(region_two
);
903 ptr
[0] = allocw_realloc(region
, 0, 100);
904 fprintf(stderr
, "\t%zu\n", ptr
[0]);
905 ptr
[1] = allocw_realloc(region
, ptr
[0], 200);
906 fprintf(stderr
, "\t%zu\n", ptr
[1]);
907 ptr
[2] = allocw_realloc(region
, ptr
[1], 300);
908 fprintf(stderr
, "\t%zu\n", ptr
[2]);
909 ptr
[3] = allocw_realloc(region
, ptr
[2], 0);
910 fprintf(stderr
, "\t%zu\n", ptr
[3]);
912 freelist_walk(region
);
913 freelist_stats(region
);