Merge branch 'release/0.0'
[allocwithin] / allocwithin.c
1 /** allocwithin
2 * This is a very, very basic allocator.
3 * It burns a lot of space, and makes no effort at being clever nor efficient.
4 * But it does allow one to use a segment of (it assumes shared) memory as a
5 * pool for dynamic allocation, and allows that block to be moved around.
6 * The heavy-handed signal masking is probably not the most elegant way of avoiding interprocess corruption.
7 **/
8
9 #include "allocwithin_int.h"
10
11 #define NOTIFY_ERROR(...) do { fprintf(stderr, "ERROR "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
12 #ifdef PRINT_DEBUG
13 #define NOTIFY_DEBUG(...) do { fprintf(stderr, "DEBUG [%d] %s> ", __LINE__, __func__); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
14 #else /* PRINT_DEBUG */
15 #define NOTIFY_DEBUG(...) do { } while (0)
16 #endif /* PRINT_DEBUG */
17
18 static const char * const src_id_ = "$Id$";
19
20 static pthread_once_t init_once_ = PTHREAD_ONCE_INIT;
21 static int init_result_ = -1;
22 static pthread_mutexattr_t allocw_mutexattr_;
23
24 /**
25 * private function prototypes
26 **/
27 static void allocw_init_once_(void);
28 static int region_init_(allocw_region_t *region, size_t size);
29 static int region_fini_(allocw_region_t *region);
30 static int region_lock_(allocw_region_t *region, sigset_t *oset);
31 static int region_unlock_(allocw_region_t *region, sigset_t *oset);
32 static void region_freelist_insert_(allocw_region_t *region, struct allocw_block_ *b);
33 static struct allocw_block_ *block_split_(allocw_region_t *region, struct allocw_block_ *b, size_t payload_size);
34 static void block_unsplit_(allocw_region_t *region, struct allocw_block_ *b);
35 static struct allocw_block_ *alloc_block_(allocw_region_t *region, struct allocw_block_ *block, size_t size);
36
37
38 /** allocw_init_once_
39 * called once to initialize static attrs
40 **/
41 static
42 void allocw_init_once_(void)
43 {
44 int r;
45
46 NOTIFY_DEBUG("initializing");
47
48 memset(&allocw_mutexattr_, 0, sizeof allocw_mutexattr_);
49
50 if ( (r = pthread_mutexattr_init(&allocw_mutexattr_)) )
51 {
52 NOTIFY_ERROR("%s:%s", "pthread_mutexattr_init", strerror(r));
53 init_result_ = r;
54 return;
55 }
56
57 #if 0
58 /* darwin doesn't like this */
59 if ( (r = pthread_mutexattr_setpshared(&allocw_mutexattr_, PTHREAD_PROCESS_SHARED)) )
60 {
61 NOTIFY_ERROR("%s:%s", "pthread_mutexattr_setpshared", strerror(r));
62 init_result_ = r;
63 return;
64 }
65 #endif
66
67 init_result_ = 0;
68 }
69
70
71 #ifdef PRINT_DEBUG
72 /** dump_region
73 * debug function, display a region's basic details
74 **/
75 static
76 inline
77 void dump_region(char *label, allocw_region_t *region)
78 {
79 NOTIFY_DEBUG("%s:%p", label, region);
80 NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "size", region->size);
81 NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "free_start", region->free_start);
82 NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "free_end", region->free_end);
83 NOTIFY_DEBUG("\t%s->%s:0x%x", label, "num_alloced", region->num_alloced);
84 }
85 /** dump_block
86 * debug function, display a block's basic details
87 **/
88 static
89 inline
90 void dump_block(char *label, struct allocw_block_ *block)
91 {
92 NOTIFY_DEBUG("%s:%p", label, block);
93 NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "size", block->size);
94 NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "prev_off", block->prev_off);
95 NOTIFY_DEBUG("\t%s->%s:0x%zx", label, "next_off", block->next_off);
96 }
97 #endif /* PRINT_DEBUG */
98
99
100 /** region_init_
101 * Initializes a region's header fields.
102 **/
103 static
104 int region_init_(allocw_region_t *region, size_t size)
105 {
106 struct allocw_block_ *b;
107 int r;
108
109 region->size = size;
110 memset(&region->freelist_mutex, 0, sizeof region->freelist_mutex);
111 if ( (r = pthread_mutex_init(&region->freelist_mutex, &allocw_mutexattr_)) )
112 {
113 NOTIFY_ERROR("%s:%s", "pthread_mutex_init", strerror(r));
114 memset(region, 0, sizeof *region);
115 return r;
116 }
117
118 #ifdef PRINT_DEBUG
119 dump_region("region", region);
120 #endif /* PRINT_DEBUG */
121
122 /* initialize the freelist */
123 b = (struct allocw_block_ *)region->data_;
124 b->next_off = 0;
125 b->prev_off = 0;
126 b->size = region->size - sizeof *region;
127 region->free_start = region->free_end = BLOCK_OFF(region, b);
128 region->num_alloced = 0;
129
130 #ifdef PRINT_DEBUG
131 dump_block("first_free_block", b);
132 #endif /* PRINT_DEBUG */
133
134 return 0;
135 }
136
137 /** allocw_region_init
138 * Prepares a segment of memory for use as an allocation region.
139 * @note Total usable size will be #size minus per-allocation overhead;
140 *
141 * @param[in] region Pointer to the block of memory to initialize.
142 * @param[in] size Total size in bytes of memory region to initialize; usable size will be minus region header size and per-allocation overhead.
143 * @return Success or failure.
144 **/
145 int allocw_region_init(allocw_region_t *region, size_t size)
146 {
147 struct allocw_block_ *b;
148 int r;
149
150 if (region == NULL)
151 {
152 NOTIFY_ERROR("null argument");
153 return EINVAL;
154 }
155 if (size < sizeof *region + sizeof *b )
156 {
157 NOTIFY_ERROR("headers require 0x%zx size, 0x%zx requested", sizeof *region + sizeof *b, size);
158 return ENOMEM;
159 }
160
161 if ( (r = pthread_once(&init_once_, allocw_init_once_)) )
162 {
163 NOTIFY_ERROR("%s:%s", "pthread_once", strerror(r));
164 return r;
165 }
166 if (init_result_)
167 {
168 NOTIFY_ERROR("%s:%d", "allocw_init_once_", init_result_);
169 memset(region, 0, sizeof *region);
170 return init_result_;
171 }
172
173 NOTIFY_DEBUG("sizeof allocw_region_t:0x%zx", sizeof *region);
174 NOTIFY_DEBUG("sizeof allocw_block_:0x%zx", sizeof *b);
175
176 if ( (r = region_init_(region, size)) )
177 {
178 NOTIFY_DEBUG("%s:%s", "allocw_region_init_", "failed");
179 return r;
180 }
181
182 return 0;
183 }
184
185
186 /** region_fini_
187 *
188 **/
189 static
190 inline
191 int region_fini_(allocw_region_t *region)
192 {
193 int r;
194
195 if ( (r = pthread_mutex_destroy(&region->freelist_mutex)) )
196 {
197 NOTIFY_ERROR("%s:%s", "pthread_mutex_destroy", strerror(r));
198 return r;
199 }
200
201 memset(region, 0, sizeof *region);
202
203 return 0;
204 }
205
206 /** allocw_region_fini
207 * release a region
208 *
209 * @param[in] region
210 * @return Success or failure.
211 **/
212 int allocw_region_fini(allocw_region_t *region)
213 {
214 if (region == NULL)
215 {
216 NOTIFY_ERROR("null argument");
217 return EINVAL;
218 }
219
220 NOTIFY_DEBUG("destroying region %p", region);
221
222 return region_fini_(region);
223 }
224
225
226 /** region_lock_
227 * Acquires the lock on a region and disables signal processing.
228 *
229 * @param[in] region which region to lock
230 * @param[out] set the signal mask which was cleared
231 * @return Success or failure.
232 **/
233 static
234 inline
235 int region_lock_(allocw_region_t *region, sigset_t *oset)
236 {
237 sigset_t set;
238 int r;
239
240 if (oset)
241 sigfillset(&set);
242
243 if ( (r = pthread_mutex_lock(&region->freelist_mutex)) )
244 {
245 NOTIFY_ERROR("%s:%s", "pthread_mutex_lock", strerror(r));
246 errno = r;
247 return -1;
248 }
249
250 if ( oset && (r = pthread_sigmask(SIG_SETMASK, &set, oset)) )
251 {
252 NOTIFY_ERROR("%s:%s", "pthread_sigmask", strerror(r));
253 errno = r;
254 return -1;
255 }
256
257 NOTIFY_DEBUG("region %p %sLOCKED", region, "");
258
259 return 0;
260 }
261
262
263 /** region_unlock_
264 * Releases the lock on a region and restores signal processing.
265 *
266 * @param[in] region which region to lock
267 * @param[in] oset the signal mask to restore
268 * @return Success or failure.
269 **/
270 static
271 inline
272 int region_unlock_(allocw_region_t *region, sigset_t *oset)
273 {
274 int retval = 0;
275 int r;
276
277 if ( oset && (r = pthread_sigmask(SIG_SETMASK, oset, NULL)) )
278 {
279 NOTIFY_ERROR("%s:%s", "pthread_sigmask", strerror(r));
280 errno = r;
281 retval = -1;
282 }
283
284 if ( (r = pthread_mutex_unlock(&region->freelist_mutex)) )
285 {
286 NOTIFY_ERROR("%s:%s", "pthread_mutex_unlock", strerror(r));
287 errno = r;
288 retval = -1;
289 }
290
291 NOTIFY_DEBUG("region %p %sLOCKED", region, "UN");
292
293 return retval;
294 }
295
296
297 /** block_split_
298 * possibly partitions a free block, returns pointer to the size-d block
299 * block must be equal to or greater than split size
300 *
301 * @param[in] region region
302 * @param[in] b block the block to split
303 * @param[in] payload_size size of the desired block's user data
304 * @return pointer to the newly-pared block of at least #size payload-bytes
305 **/
306 static
307 inline
308 struct allocw_block_ *block_split_(allocw_region_t *region, struct allocw_block_ *b, size_t payload_size)
309 {
310 const size_t new_block_size = payload_size + sizeof *b;
311 struct allocw_block_ *newb;
312
313 /* to split a block, we need room to spare for at least the header of next block */
314 if (b->size < new_block_size + sizeof *b)
315 {
316 NOTIFY_DEBUG("not enough room to split 0x%zx bytes off from 0x%zx byte block", new_block_size, b->size);
317 /* we'll just overcommit by the extra bytes */
318 return b;
319 }
320
321 NOTIFY_DEBUG("splitting %p (size:0x%zx) to fit requested 0x%zx (total 0x%zx)", b, b->size, payload_size, new_block_size + sizeof *b);
322
323 newb = (struct allocw_block_ *)((char *)b + new_block_size);
324 newb->size = b->size - (new_block_size);
325 b->size = new_block_size;
326 newb->prev_off = BLOCK_OFF(region, b);
327 newb->next_off = b->next_off;
328 if (newb->next_off)
329 BLOCK_PTR(region, newb->next_off)->prev_off = BLOCK_OFF(region, newb);
330 else
331 region->free_end = BLOCK_OFF(region, newb);
332
333 b->size = new_block_size;
334 b->next_off = BLOCK_OFF(region, newb);
335
336 #ifdef PRINT_DEBUG
337 dump_block("newb", newb);
338 dump_block("b", b);
339 #endif /* PRINT_DEBUG */
340
341 return b;
342 }
343
344
345 /** block_unsplit_
346 * attempt to merge a free block with its next
347 **/
348 static
349 inline
350 void block_unsplit_(allocw_region_t *region, struct allocw_block_ *b)
351 {
352 if (b->next_off)
353 {
354 struct allocw_block_ *n = BLOCK_PTR(region, b->next_off);
355
356 if (BLOCK_OFF(region, b) + b->size == b->next_off)
357 {
358 NOTIFY_DEBUG("merging id %zu (%p) size:%zu with id %zu (%p) size:%zu",
359 BLOCK_OFF(region, b), b, b->size,
360 BLOCK_OFF(region, n), n, n->size);
361 b->size += n->size;
362 b->next_off = n->next_off;
363 if (n->next_off)
364 BLOCK_PTR(region, n->next_off)->prev_off = BLOCK_OFF(region, b);
365 else
366 region->free_end = BLOCK_OFF(region, b);
367 NOTIFY_DEBUG("new block id %zu (%p) size:%zu",
368 BLOCK_OFF(region, b), b, b->size);
369 }
370 }
371 }
372
373
374 /** region_freelist_insert
375 * Returns a block to the freelist, merges adjacent blocks.
376 *
377 * @param[in] region region
378 * @param[in] b block
379 **/
380 static
381 inline
382 void region_freelist_insert_(allocw_region_t *region, struct allocw_block_ *b)
383 {
384 struct allocw_block_ *x;
385
386 NOTIFY_DEBUG("insert of block %zu (%p) size:%zu", BLOCK_OFF(region, b), b, b->size);
387
388 /* empty freelist? easy. */
389 if (region->free_start == 0)
390 {
391 NOTIFY_DEBUG("inserting into empty freelist");
392 b->prev_off = 0;
393 b->next_off = 0;
394 region->free_start = BLOCK_OFF(region, b);
395 region->free_end = BLOCK_OFF(region, b);
396 return;
397 }
398
399 /* find the block to insert in front of */
400 x = BLOCK_PTR(region, region->free_start);
401 while ( BLOCK_OFF(region, x)
402 && BLOCK_OFF(region, x) < BLOCK_OFF(region, b) )
403 {
404 x = BLOCK_PTR(region, x->next_off);
405 }
406
407 if (BLOCK_OFF(region, x))
408 {
409 NOTIFY_DEBUG("inserting before block %zu (%p) size:%zu prev:%zu next:%zu", BLOCK_OFF(region, x), x, x->size, x->prev_off, x->next_off);
410 b->next_off = BLOCK_OFF(region, x);
411 b->prev_off = x->prev_off;
412 if (BLOCK_PTR(region, x->prev_off))
413 BLOCK_PTR(region, b->prev_off)->next_off = BLOCK_OFF(region, b);
414 else
415 region->free_start = BLOCK_OFF(region, b);
416 if (BLOCK_PTR(region, b->next_off))
417 BLOCK_PTR(region, b->next_off)->prev_off = BLOCK_OFF(region, b);
418 else
419 region->free_end = BLOCK_OFF(region, b);
420 }
421 else /* otherwise, b's offset is bigger than everything else, so tack it onto the end */
422 {
423 NOTIFY_DEBUG("inserting at end of freelist, after %zu (%p)", region->free_end, BLOCK_PTR(region, region->free_end));
424 b->next_off = 0;
425 b->prev_off = region->free_end;
426 BLOCK_PTR(region, region->free_end)->next_off = BLOCK_OFF(region, b);
427 region->free_end = BLOCK_OFF(region, b);
428 }
429
430 block_unsplit_(region, b);
431 if (b->prev_off)
432 {
433 x = BLOCK_PTR(region, b->prev_off);
434 block_unsplit_(region, x);
435 }
436 }
437
438
439 /** alloc_block_
440 * Rounds #size up to an allocatable chunk, carves off enough storage for it from free-block #block, and removes it from the freelist.
441 * #block must be big enough.
442 **/
443 static
444 inline
445 struct allocw_block_ *alloc_block_(allocw_region_t *region, struct allocw_block_ *block, size_t size)
446 {
447 const size_t granularity = sizeof (struct allocw_block_);
448 const size_t size_adj = (size + granularity - 1) & ~(granularity - 1);
449
450 block = block_split_(region, block, size_adj);
451
452 if (block->prev_off)
453 BLOCK_PTR(region, block->prev_off)->next_off = block->next_off;
454 else
455 region->free_start = block->next_off;
456 if (block->next_off)
457 BLOCK_PTR(region, block->next_off)->prev_off = block->prev_off;
458 else
459 region->free_end = block->prev_off;
460 block->next_off = block->prev_off = 0;
461
462 return block;
463 }
464
465 /** allocw_realloc
466 * Resize a previously allocated id.
467 * Implementation idiosyncrasies: if size is zero, returns null after freeing id.
468 *
469 * @param[in] region
470 * @param[in] id
471 * @param[in] size
472 * @return new id
473 **/
474 allocw_id_t allocw_realloc(allocw_region_t *region, allocw_id_t id, size_t size)
475 {
476 allocw_id_t retval = 0;
477 sigset_t set;
478 struct allocw_block_ *b,
479 *b_id,
480 *b_want,
481 *best_fit = NULL;
482
483 if (region == NULL)
484 {
485 NOTIFY_ERROR("null argument");
486 return 0;
487 };
488
489 /* route simple requests away */
490
491 if (id == 0)
492 return allocw_malloc(region, size);
493
494 if (size == 0)
495 {
496 allocw_free(region, id);
497 return 0;
498 }
499
500 if (size <= BLOCK_PTR(region, id)->size)
501 {
502 NOTIFY_DEBUG("ignoring realloc to smaller block");
503 return id;
504 }
505
506 if (region_lock_(region, &set))
507 return 0;
508
509 /*
510 Locate the smallest region we can fit into, EXCEPT favor a freeblock
511 of any adequate size immediately following the original block, to
512 avoid copying.
513 */
514 b_id = BLOCK_PTR(region, id);
515 b_want = BLOCK_PTR(region, id + b_id->size);
516 for ( b = BLOCK_PTR(region, region->free_start);
517 (void *)b != (void *)region;
518 b = BLOCK_PTR(region, b->next_off) )
519 {
520 if (b->size == size || b->size >= size + sizeof *b)
521 {
522 if (best_fit == NULL
523 || b->size < best_fit->size)
524 {
525 best_fit = b;
526 if (best_fit == b_want)
527 break;
528 }
529 }
530 }
531
532 if (best_fit == NULL)
533 {
534 errno = ENOMEM;
535 return 0;
536 }
537
538 if (best_fit == b_want)
539 {
540 size_t size_diff = size - b_id->size;
541
542 NOTIFY_DEBUG("growing %p (size:0x%zx) by size_diff:0x%zx into freeblock %p (size:0x%zx)",
543 b_id, b_id->size,
544 size_diff,
545 best_fit, best_fit->size);
546
547 best_fit = alloc_block_(region, best_fit, size_diff);
548 b_id->size += best_fit->size;
549 b_id->next_off = best_fit->next_off;
550 if (b_id->next_off)
551 BLOCK_PTR(region, best_fit->next_off)->prev_off = BLOCK_OFF(region, b_id);
552 else
553 region->free_end = id;
554
555 retval = id;
556 }
557 else
558 {
559 size_t min_size = (b_id->size < size) ? b_id->size : size;
560
561 NOTIFY_DEBUG("reallocating %p (size:0x%zx) to %p (size:0x%zx)",
562 b_id, b_id->size,
563 best_fit, best_fit->size);
564
565 best_fit = alloc_block_(region, best_fit, size);
566 retval = BLOCK_OFF(region, best_fit);
567 memcpy(best_fit->data_, b_id->data_, min_size);
568 region_freelist_insert_(region, b_id);
569 }
570
571 if (region_unlock_(region, &set))
572 return retval;
573
574 return retval;
575 }
576
577 /** allocw_free
578 * Free a previously allocated memory id in a region.
579 *
580 * @param[in] region region
581 * @param[in] id id of memory in region to free
582 **/
583 void allocw_free(allocw_region_t *region, allocw_id_t id)
584 {
585 sigset_t set;
586 struct allocw_block_ *b;
587
588 if (region == NULL)
589 {
590 NOTIFY_ERROR("null argument");
591 errno = EINVAL;
592 return;
593 }
594
595 if (id == 0)
596 {
597 NOTIFY_ERROR("attempted free of null id");
598 return;
599 }
600
601 b = BLOCK_PTR(region, id);
602
603 NOTIFY_DEBUG("want to free block %p size:0x%zx", b, b->size);
604
605 if (region_lock_(region, &set))
606 return;
607
608 region_freelist_insert_(region, b);
609
610 region->num_alloced--;
611
612 if (region_unlock_(region, &set))
613 return;
614 }
615
616
617 /** allocw_malloc
618 * Allocates a block of memory from the region.
619 *
620 * @param[in] region region
621 * @param[in] size size in bytes to allocate
622 * @return id of newly allocated memory
623 **/
624 allocw_id_t allocw_malloc(allocw_region_t *region, size_t size)
625 {
626 const size_t granularity = sizeof (struct allocw_block_);
627 const size_t size_adj = (size + granularity - 1) & ~(granularity - 1);
628 sigset_t set;
629 struct allocw_block_ *b;
630 allocw_id_t ret_id = 0;
631
632 if (region == NULL)
633 {
634 NOTIFY_ERROR("null argument");
635 errno = EINVAL;
636 return 0;
637 }
638
639 if (region_lock_(region, &set))
640 return 0;
641
642 for (b = BLOCK_PTR(region, region->free_start);
643 (void *)b != (void *)region;
644 b = BLOCK_PTR(region, b->next_off) )
645 {
646 NOTIFY_DEBUG("checking free block:%p size:0x%zu", b, b->size);
647 if (b->size >= size_adj)
648 break;
649 }
650
651 if ((void *)b == (void *)region)
652 {
653 NOTIFY_DEBUG("no free block with enough room");
654 if (region_unlock_(region, &set))
655 return 0;
656 errno = EINVAL;
657 return 0;
658 }
659
660 b = alloc_block_(region, b, size);
661 ret_id = BLOCK_OFF(region, b);
662
663 region->num_alloced++;
664
665 if (region_unlock_(region, &set))
666 return ret_id;
667
668 return ret_id;
669 }
670
671
672 /** allocw_ptr
673 * Converts a (region,id) to a normal pointer.
674 *
675 * @param[in] region region
676 * @param[in] id id
677 * @return pointer
678 **/
679 inline
680 void *allocw_ptr(allocw_region_t *region, allocw_id_t id)
681 {
682 if (region == NULL || id == 0)
683 return NULL;
684
685 return &(BLOCK_PTR(region, id)->data_);
686 }
687
688
689 /** allocw_region_migrate
690 * initialize a new region with an existing region's data
691 *
692 * @param[in] dst_region
693 * @param[in] dst_size
694 * @param[in] src_region
695 * @return success or failure
696 **/
697 int allocw_region_migrate(allocw_region_t *dst_region, size_t dst_size, allocw_region_t *src_region)
698 {
699 sigset_t set;
700 int retval = 0;
701 int r;
702
703 if (dst_region == NULL || src_region == NULL)
704 {
705 NOTIFY_ERROR("null argument");
706 return EINVAL;
707 }
708
709 NOTIFY_DEBUG("migrating region (%p size:%zu) to (%p size:%zu)",
710 src_region, src_region->size,
711 dst_region, dst_size);
712
713 if (dst_size < (src_region->size + sizeof (struct allocw_block_))
714 && dst_size != src_region->size )
715 {
716 if (src_region->free_end + BLOCK_PTR(src_region, src_region->free_end)->size == src_region->size)
717 {
718 NOTIFY_DEBUG("src_region is shrinkable by 0x%zx bytes",
719 BLOCK_PTR(src_region, src_region->free_end)->size);
720 NOTIFY_DEBUG("but shrinking isn't implemented yet");
721 }
722 NOTIFY_ERROR("destination region too small");
723 return ENOMEM;
724 }
725
726 if ( (r = region_init_(dst_region, dst_size)) )
727 {
728 NOTIFY_DEBUG("%s:%s", "region_init_", "failed");
729 return r;
730 }
731
732 if (region_lock_(dst_region, &set))
733 return -1;
734
735 if (region_lock_(src_region, NULL))
736 {
737 region_unlock_(dst_region, &set);
738 return -1;
739 }
740
741 memmove(&(dst_region->data_), &(src_region->data_), src_region->size);
742 dst_region->free_start = src_region->free_start;
743 dst_region->free_end = src_region->free_end;
744 dst_region->num_alloced = src_region->num_alloced;
745
746 if (dst_size > src_region->size)
747 {
748 struct allocw_block_ *new_free_block = BLOCK_PTR(dst_region, src_region->size);
749 new_free_block->size = dst_region->size - src_region->size;
750 region_freelist_insert_(dst_region, new_free_block);
751 }
752
753 if (region_unlock_(src_region, NULL))
754 retval = -1;
755
756 if (region_unlock_(dst_region, &set))
757 retval = -1;
758
759 return retval;
760 }
761
762
763 #ifdef TEST
764 /** freelist_stats
765 * debugging function - info on freelist
766 **/
767 static
768 void freelist_stats(allocw_region_t *region)
769 {
770 struct allocw_block_ *b,
771 *b_largest = NULL,
772 *b_smallest = NULL;
773
774 for ( b = BLOCK_PTR(region, region->free_start);
775 (void *)b != (void *)region;
776 b = BLOCK_PTR(region, b->next_off) )
777 {
778 if (b_smallest == NULL
779 || b->size < b_smallest->size)
780 b_smallest = b;
781
782 if (b_largest == NULL
783 || b->size > b_largest->size)
784 b_largest = b;
785 }
786
787 if (b_smallest)
788 fprintf(stderr, "\t%s block: %p (size:0x%zx)\n", "smallest", b_smallest, b_smallest->size);
789 if (b_largest)
790 fprintf(stderr, "\t%s block: %p (size:0x%zx)\n", "largest", b_largest, b_largest->size);
791
792 b = BLOCK_PTR(region, region->free_end);
793 if ((void *)b != (void *)region)
794 fprintf(stderr, "\t%s block: %p (size:0x%zx)\n",
795 (BLOCK_OFF(region, b) + b->size == region->size) ? "wilderness" : "last",
796 b, b->size);
797 }
798
799 /** freelist_walk
800 * debugging function - iterates over a region's freelist
801 **/
802 static
803 void freelist_walk(allocw_region_t *region)
804 {
805 sigset_t set;
806 struct allocw_block_ *block;
807
808 if (region_lock_(region, &set))
809 return;
810
811 #ifdef PRINT_DEBUG
812 dump_region("region", region);
813 #endif /* PRINT_DEBUG */
814
815 for ( block = BLOCK_PTR(region, region->free_start);
816 (void *)block != (void *)region;
817 block = BLOCK_PTR(region, block->next_off)
818 )
819 {
820 #ifdef PRINT_DEBUG
821 dump_block("block", block);
822 #endif /* PRINT_DEBUG */
823 }
824
825 if (region_unlock_(region, &set))
826 return;
827 }
828
829
830 /** main
831 * for testing
832 **/
833 int main(int argc UNUSED, char **argv UNUSED)
834 {
835 int r;
836 allocw_region_t *region, *region_two;
837 size_t region_sz;
838 allocw_id_t ptr[10];
839 size_t i;
840
841 region_sz = 1024;
842 region = malloc(region_sz);
843 region_two = malloc(region_sz * 2);
844 if (region == NULL
845 || region_two == NULL)
846 {
847 NOTIFY_ERROR("%s:%s", "malloc", strerror(errno));
848 exit(EXIT_FAILURE);
849 }
850
851 if ( (r = allocw_region_init(region, region_sz)) )
852 {
853 NOTIFY_ERROR("%s:%d", "allocw_region_init", r);
854 exit(EXIT_FAILURE);
855 }
856
857 freelist_walk(region);
858
859 for (i = 0; i < (sizeof ptr / sizeof *ptr); i++)
860 {
861 ptr[i] = allocw_malloc(region, 20);
862 if (ptr == NULL)
863 {
864 NOTIFY_ERROR("%s:%s(%d)", "allocw_malloc", strerror(errno), errno);
865 exit(EXIT_FAILURE);
866 }
867 fprintf(stderr, "allocated id:%zu (%p)\n", ptr[i], allocw_ptr(region, ptr[i]));
868 }
869
870 fprintf(stderr, "dumping pre-migration freelist...\n");
871 freelist_walk(region);
872 freelist_stats(region);
873
874 r = allocw_region_migrate(region_two, region_sz * 2, region);
875 if (r)
876 {
877 NOTIFY_ERROR("%s:%s(%d)", "allocw_region_migrate", strerror(r), r);
878 exit(EXIT_FAILURE);
879 }
880
881 r = allocw_region_fini(region);
882 if (r)
883 {
884 NOTIFY_ERROR("%s:%s(%d)", "allocw_region_fini", strerror(r), r);
885 exit(EXIT_FAILURE);
886 }
887
888 fprintf(stderr, "dumping post-migration freelist...\n");
889 freelist_walk(region_two);
890 freelist_stats(region_two);
891
892 for (i = 0; i < (sizeof ptr / sizeof *ptr); i++)
893 {
894 allocw_free(region_two, ptr[i]);
895 fprintf(stderr, "freed id:%zu (%p)\n", ptr[i], allocw_ptr(region_two, ptr[i]));
896 }
897
898 freelist_walk(region_two);
899 freelist_stats(region_two);
900
901 region = region_two;
902
903 ptr[0] = allocw_realloc(region, 0, 100);
904 fprintf(stderr, "\t%zu\n", ptr[0]);
905 ptr[1] = allocw_realloc(region, ptr[0], 200);
906 fprintf(stderr, "\t%zu\n", ptr[1]);
907 ptr[2] = allocw_realloc(region, ptr[1], 300);
908 fprintf(stderr, "\t%zu\n", ptr[2]);
909 ptr[3] = allocw_realloc(region, ptr[2], 0);
910 fprintf(stderr, "\t%zu\n", ptr[3]);
911
912 freelist_walk(region);
913 freelist_stats(region);
914
915 exit(EXIT_SUCCESS);
916 }
917 #endif /* TEST */