1 /* Synched up with: Not synched up with FSF 19.28, even though I
4 /* Get the configuration files if we're being compiled for Emacs. */
7 # include "ui/getpagesize.h"
13 #if defined (__STDC__) && !defined (STDC_HEADERS)
14 /* The ANSI standard says that defining __STDC__ to a non-zero value means
15 that the compiler conforms to that standard. The standard requires
16 certain header files and library functions to be present. Therefore,
17 if your compiler defines __STDC__ to non-0 but does not have ANSI headers
18 and the ANSI library routines, then your compiler is buggy. Conversely,
19 an ANSI-conforming environment (which has both the ANSI headers and
20 library routines, i.e., stdlib.h and `memmove') does not necessarily
21 define the STDC_HEADERS flag. Lucid Emacs requires an ANSI compiler.
22 Therefore, there is no need to consult the abominable STDC_HEADERS flag.
28 /* DO NOT EDIT THIS FILE -- it is automagically generated. -*- C -*- */
29 /* Bwaa-haa-haa! Not a chance that this is actually true! */
31 #define _MALLOC_INTERNAL
33 /* The malloc headers and source files from the C library follow here. */
35 /* Declarations for `malloc' and friends.
36 Copyright 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
37 Written May 1989 by Mike Haertel.
39 This library is free software; you can redistribute it and/or
40 modify it under the terms of the GNU Library General Public License as
41 published by the Free Software Foundation; either version 2 of the
42 License, or (at your option) any later version.
44 This library is distributed in the hope that it will be useful,
45 but WITHOUT ANY WARRANTY; without even the implied warranty of
46 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
47 Library General Public License for more details.
49 You should have received a copy of the GNU General Public License
50 along with this library; see the file COPYING. If not, write to
51 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
52 Boston, MA 02111-1307, USA.
54 The author may be reached (Email) at the address mike@ai.mit.edu,
55 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
61 #ifdef _MALLOC_INTERNAL
74 #endif /* _MALLOC_INTERNAL. */
81 #define __P(args) args
83 #define __ptr_t void *
86 #define __malloc_size_t size_t
92 /* XEmacs: I thought this should be int under SunOS, but that
93 apparently fails. Curses on all this shit. */
94 #define __free_ret_t void
96 /* XEmacs: I tried commenting these out and including stdlib.h,
97 but that fails badly. Urk! This sucks. */
98 /* Allocate SIZE bytes of memory. */
99 extern __ptr_t malloc __P((size_t __size));
100 /* Re-allocate the previously allocated block
101 in __ptr_t, making the new block SIZE bytes long. */
102 extern __ptr_t realloc __P((__ptr_t __ptr, size_t __size));
103 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
104 extern __ptr_t calloc __P((size_t __nmemb, size_t __size));
105 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
106 extern __free_ret_t free __P((__ptr_t __ptr));
108 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
109 extern __ptr_t memalign __P((size_t __alignment, size_t __size));
111 /* Allocate SIZE bytes on a page boundary. */
112 extern __ptr_t valloc __P((size_t __size));
114 #ifdef _MALLOC_INTERNAL
116 /* The allocator divides the heap into blocks of fixed size; large
117 requests receive one or more whole blocks, and small requests
118 receive a fragment of a block. Fragment sizes are powers of two,
119 and all fragments of a block are the same size. When all the
120 fragments in a block have been freed, the block itself is freed. */
121 #define INT_BIT (CHAR_BIT * sizeof(int))
122 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
123 #define BLOCKSIZE (1 << BLOCKLOG)
124 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
126 /* Determine the amount of memory spanned by the initial heap table
127 (not an absolute limit). */
128 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
130 /* Number of contiguous free blocks allowed to build up at the end of
131 memory before they will be returned to the system. */
132 #define FINAL_FREE_BLOCKS 8
134 /* Data structure giving per-block information. */
136 /* Heap information for a busy block. */
138 /* Zero for a large block, or positive giving the
139 logarithm to the base two of the fragment size. */
143 __malloc_size_t nfree; /* Free frags in a fragmented block. */
144 __malloc_size_t first; /* First free fragment of the block. */
146 /* Size (in blocks) of a large cluster. */
147 __malloc_size_t size;
150 /* Heap information for a free block
151 (that may be the first of a free cluster). */
153 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
154 __malloc_size_t next; /* Index of next free cluster. */
155 __malloc_size_t prev; /* Index of previous free cluster. */
159 /* Pointer to first block of the heap. */
160 extern char *_heapbase;
162 /* Table indexed by block number giving per-block information. */
163 extern malloc_info *_heapinfo;
165 /* Address to block number and vice versa. */
166 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
167 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
169 /* Current search index for the heap table. */
170 extern __malloc_size_t _heapindex;
172 /* Limit of valid info table indices. */
173 extern __malloc_size_t _heaplimit;
175 /* Doubly linked lists of free fragments. */
181 /* Free list headers for each fragment size. */
182 extern struct list _fraghead[];
184 /* List of blocks allocated with `memalign' (or `valloc'). */
186 struct alignlist *next;
187 __ptr_t aligned; /* The address that memaligned returned. */
188 __ptr_t exact; /* The address that malloc returned. */
190 extern struct alignlist *_aligned_blocks;
192 /* Instrumentation. */
193 extern __malloc_size_t _chunks_used;
194 extern __malloc_size_t _bytes_used;
195 extern __malloc_size_t _chunks_free;
196 extern __malloc_size_t _bytes_free;
198 /* Internal version of `free' used in `morecore' (malloc.c). */
199 extern void _free_internal __P((__ptr_t __ptr));
201 #endif /* _MALLOC_INTERNAL. */
203 /* Underlying allocation function; successive calls should
204 return contiguous pieces of memory. */
205 extern __ptr_t(*__morecore) __P((ptrdiff_t __size));
207 /* Default value of `__morecore'. */
208 extern __ptr_t __default_morecore __P((ptrdiff_t __size));
210 /* If not NULL, this function is called after each time
211 `__morecore' is called to increase the data size. */
212 extern void (*__after_morecore_hook) __P((void));
214 /* Nonzero if `malloc' has been called and done its initialization. */
215 /* extern int __malloc_initialized; */
217 /* Hooks for debugging versions. */
218 extern void (*__free_hook) __P((__ptr_t __ptr));
219 extern __ptr_t(*__malloc_hook) __P((size_t __size));
220 extern __ptr_t(*__realloc_hook) __P((__ptr_t __ptr, size_t __size));
222 /* Return values for `mprobe': these are the kinds of inconsistencies that
223 `mcheck' enables detection of. */
225 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
226 MCHECK_OK, /* Block is fine. */
227 MCHECK_FREE, /* Block freed twice. */
228 MCHECK_HEAD, /* Memory before the block was clobbered. */
229 MCHECK_TAIL /* Memory after the block was clobbered. */
232 /* Activate a standard collection of debugging hooks. This must be called
233 before `malloc' is ever called. ABORTFUNC is called with an error code
234 (see enum above) when an inconsistency is detected. If ABORTFUNC is
235 null, the standard function prints on stderr and then calls `abort'. */
236 extern int mcheck __P((void (*__abortfunc) __P((enum mcheck_status))));
238 /* Check for aberrations in a particular malloc'd block. You must have
239 called `mcheck' already. These are the same checks that `mcheck' does
240 when you free or reallocate a block. */
241 extern enum mcheck_status mprobe __P((__ptr_t __ptr));
243 /* Activate a standard collection of tracing hooks. */
244 extern void mtrace __P((void));
245 extern void muntrace __P((void));
247 /* Statistics available to the user. */
249 __malloc_size_t bytes_total; /* Total size of the heap. */
250 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
251 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
252 __malloc_size_t chunks_free; /* Chunks in the free list. */
253 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
256 /* Pick up the current statistics. */
257 extern struct mstats mstats __P((void));
259 /* Call WARNFUN with a warning message when memory usage is high. */
260 extern void memory_warnings __P((__ptr_t __start,
262 __P((const char *))));
264 #if 0 /* unused in this file, and conflicting prototypes anyway */
265 /* Relocating allocator. */
267 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
268 extern __ptr_t r_alloc __P((__ptr_t * __handleptr, size_t __size));
270 /* Free the storage allocated in HANDLEPTR. */
271 extern void r_alloc_free __P((__ptr_t * __handleptr));
273 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
274 extern __ptr_t r_re_alloc __P((__ptr_t * __handleptr, size_t __size));
280 #endif /* malloc.h */
281 /* Allocate memory on a page boundary.
282 Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
284 This library is free software; you can redistribute it and/or
285 modify it under the terms of the GNU Library General Public License as
286 published by the Free Software Foundation; either version 2 of the
287 License, or (at your option) any later version.
289 This library is distributed in the hope that it will be useful,
290 but WITHOUT ANY WARRANTY; without even the implied warranty of
291 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
292 Library General Public License for more details.
294 You should have received a copy of the GNU General Public License
295 along with this library; see the file COPYING. If not, write to
296 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
297 Boston, MA 02111-1307, USA.
299 The author may be reached (Email) at the address mike@ai.mit.edu,
300 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
301 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
303 #include <sys/cdefs.h>
304 #if ! (defined (__GLIBC__) && (__GLIBC__ >= 2))
305 extern size_t __getpagesize __P((void));
308 #include "ui/getpagesize.h"
309 #define __getpagesize() getpagesize()
311 #ifndef _MALLOC_INTERNAL
312 #define _MALLOC_INTERNAL
315 static __malloc_size_t pagesize;
317 __ptr_t valloc(__malloc_size_t size)
320 pagesize = __getpagesize();
322 return memalign(pagesize, size);
325 /* Memory allocator `malloc'.
326 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation
327 Written May 1989 by Mike Haertel.
329 This library is free software; you can redistribute it and/or
330 modify it under the terms of the GNU Library General Public License as
331 published by the Free Software Foundation; either version 2 of the
332 License, or (at your option) any later version.
334 This library is distributed in the hope that it will be useful,
335 but WITHOUT ANY WARRANTY; without even the implied warranty of
336 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
337 Library General Public License for more details.
339 You should have received a copy of the GNU General Public License
340 along with this library; see the file COPYING. If not, write to
341 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
342 Boston, MA 02111-1307, USA.
344 The author may be reached (Email) at the address mike@ai.mit.edu,
345 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
347 #ifndef _MALLOC_INTERNAL
348 #define _MALLOC_INTERNAL
352 /* How to really get more memory. */
353 #if defined (HEAP_IN_DATA) && !defined(PDUMP)
354 /* once dumped, free() & realloc() on static heap space will fail */
355 #define PURE_DATA(x) \
356 ((static_heap_dumped && (char*)x >= static_heap_base \
357 && (char*)x <= (static_heap_base + static_heap_size) ) ? 1 : 0)
358 extern int initialized;
359 extern int purify_flag;
360 extern char *static_heap_base;
361 extern char *static_heap_ptr;
362 extern char *static_heap_dumped;
363 extern unsigned long static_heap_size;
364 extern __ptr_t more_static_core __P((ptrdiff_t __size));
365 __ptr_t(*__morecore) __P((ptrdiff_t __size)) = more_static_core;
367 __ptr_t(*__morecore) __P((ptrdiff_t __size)) = __default_morecore;
368 #define PURE_DATA(x) 0
371 /* Debugging hook for `malloc'. */
372 __ptr_t(*__malloc_hook) __P((__malloc_size_t __size));
374 /* Pointer to the base of the first block. */
377 /* Block information table. Allocated with align/__free (not malloc/free). */
378 malloc_info *_heapinfo;
380 /* Number of info entries. */
381 static __malloc_size_t heapsize;
383 /* Search index in the info table. */
384 __malloc_size_t _heapindex;
386 /* Limit of valid info table indices. */
387 __malloc_size_t _heaplimit;
389 /* Free lists for each fragment size. */
390 struct list _fraghead[BLOCKLOG];
392 /* Instrumentation. */
393 __malloc_size_t _chunks_used;
394 __malloc_size_t _bytes_used;
395 __malloc_size_t _chunks_free;
396 __malloc_size_t _bytes_free;
398 /* Are you experienced? */
399 int __malloc_initialized;
401 void (*__after_morecore_hook) __P((void));
403 /* Aligned allocation. */
404 static __ptr_t align __P((__malloc_size_t));
405 static __ptr_t align(__malloc_size_t size)
408 unsigned long int adj;
410 result = (*__morecore) (size);
411 adj = (unsigned long int)((unsigned long int)((char *)result -
415 adj = BLOCKSIZE - adj;
416 (void)(*__morecore) (adj);
417 result = (char *)result + adj;
420 if (__after_morecore_hook)
421 (*__after_morecore_hook) ();
426 /* Set everything up and remember that we have. */
427 static int initialize __P((void));
428 static int initialize()
430 #if defined (HEAP_IN_DATA) && !defined(PDUMP)
431 if (static_heap_dumped && __morecore == more_static_core) {
432 __morecore = __default_morecore;
435 heapsize = HEAP / BLOCKSIZE;
436 _heapinfo = (malloc_info *) align(heapsize * sizeof(malloc_info));
437 if (_heapinfo == NULL)
439 memset(_heapinfo, 0, heapsize * sizeof(malloc_info));
440 memset(_fraghead, 0, BLOCKLOG * sizeof(struct list));
441 _heapinfo[0].free.size = 0;
442 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
445 _heapbase = (char *)_heapinfo;
447 /* Account for the _heapinfo block itself in the statistics. */
448 _bytes_used = heapsize * sizeof(malloc_info);
454 __malloc_initialized = 1;
458 /* Get neatly aligned memory, initializing or
459 growing the heap info table as necessary. */
460 static __ptr_t morecore __P((__malloc_size_t));
461 static __ptr_t morecore(__malloc_size_t size)
464 malloc_info *newinfo, *oldinfo;
465 __malloc_size_t newsize;
467 result = align(size);
471 /* Check if we need to grow the info table. */
472 if ((__malloc_size_t) BLOCK((char *)result + size) > heapsize) {
474 while ((__malloc_size_t) BLOCK((char *)result + size) > newsize)
476 newinfo = (malloc_info *) align(newsize * sizeof(malloc_info));
477 if (newinfo == NULL) {
478 (*__morecore) (-(int)size);
481 memcpy(newinfo, _heapinfo, heapsize * sizeof(malloc_info));
482 memset(&newinfo[heapsize], 0,
483 (newsize - heapsize) * sizeof(malloc_info));
485 newinfo[BLOCK(oldinfo)].busy.type = 0;
486 newinfo[BLOCK(oldinfo)].busy.info.size
487 = BLOCKIFY(heapsize * sizeof(malloc_info));
489 /* Account for the _heapinfo block itself in the statistics. */
490 _bytes_used += newsize * sizeof(malloc_info);
492 _free_internal(oldinfo);
496 _heaplimit = BLOCK((char *)result + size);
500 /* Allocate memory from the heap. */
501 __ptr_t malloc(__malloc_size_t size)
504 __malloc_size_t block, blocks, lastblocks, start;
508 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
509 valid address you can realloc and free (though not dereference).
511 It turns out that some extant code (sunrpc, at least Ultrix's version)
512 expects `malloc (0)' to return non-NULL and breaks otherwise.
515 #ifdef HAVE_X_WINDOWS
516 /* there is at least one Xt bug where calloc(n,x) is blindly called
517 where n can be 0, and yet if 0 is returned, Xt barfs */
519 size = sizeof(struct list);
525 if (__malloc_hook != NULL)
526 return (*__malloc_hook) (size);
528 if (!__malloc_initialized)
532 #ifdef SUNOS_LOCALTIME_BUG
533 /* Workaround for localtime() allocating 8 bytes and writing 9 bug... */
538 if (size < sizeof(struct list))
539 size = sizeof(struct list);
541 /* Determine the allocation policy based on the request size. */
542 if (size <= BLOCKSIZE / 2) {
543 /* Small allocation to receive a fragment of a block.
544 Determine the logarithm to base two of the fragment size. */
545 __malloc_size_t log = 1;
547 while ((size /= 2) != 0)
550 /* Look in the fragment lists for a
551 free fragment of the desired size. */
552 next = _fraghead[log].next;
554 /* There are free fragments of this size.
555 Pop a fragment out of the fragment list and return it.
556 Update the block's nfree and first counters. */
557 result = (__ptr_t) next;
558 next->prev->next = next->next;
559 if (next->next != NULL)
560 next->next->prev = next->prev;
561 block = BLOCK(result);
562 if (--_heapinfo[block].busy.info.frag.nfree != 0)
563 _heapinfo[block].busy.info.frag.first =
565 ((unsigned long int)((char *)next->next -
569 /* Update the statistics. */
571 _bytes_used += 1 << log;
573 _bytes_free -= 1 << log;
575 /* No free fragments of the desired size, so get a new block
576 and break it into fragments, returning the first. */
577 result = malloc(BLOCKSIZE);
581 /* Link all fragments but the first into the free list. */
582 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> log);
585 (struct list *)((char *)result +
587 next->next = _fraghead[log].next;
588 next->prev = &_fraghead[log];
589 next->prev->next = next;
590 if (next->next != NULL)
591 next->next->prev = next;
594 /* Initialize the nfree and first counters for this block. */
595 block = BLOCK(result);
596 _heapinfo[block].busy.type = log;
597 _heapinfo[block].busy.info.frag.nfree = i - 1;
598 _heapinfo[block].busy.info.frag.first = i - 1;
600 _chunks_free += (BLOCKSIZE >> log) - 1;
601 _bytes_free += BLOCKSIZE - (1 << log);
602 _bytes_used -= BLOCKSIZE - (1 << log);
605 /* Large allocation to receive one or more blocks.
606 Search the free list in a circle starting at the last place visited.
607 If we loop completely around without finding a large enough
608 space we will have to get more memory from the system. */
609 blocks = BLOCKIFY(size);
610 start = block = _heapindex;
611 while (_heapinfo[block].free.size < blocks) {
612 block = _heapinfo[block].free.next;
613 if (block == start) {
614 /* Need to get more from the system. Check to see if
615 the new core will be contiguous with the final free
616 block; if so we don't need to get as much. */
617 block = _heapinfo[0].free.prev;
618 lastblocks = _heapinfo[block].free.size;
620 && block + lastblocks == _heaplimit
621 && (*__morecore) (0) ==
622 ADDRESS(block + lastblocks)
625 ((blocks - lastblocks) * BLOCKSIZE)) !=
627 /* Which block we are extending (the `final free
628 block' referred to above) might have changed, if
629 it got combined with a freed info table. */
630 block = _heapinfo[0].free.prev;
631 _heapinfo[block].free.size +=
632 (blocks - lastblocks);
634 (blocks - lastblocks) * BLOCKSIZE;
637 result = morecore(blocks * BLOCKSIZE);
640 block = BLOCK(result);
641 _heapinfo[block].busy.type = 0;
642 _heapinfo[block].busy.info.size = blocks;
644 _bytes_used += blocks * BLOCKSIZE;
649 /* At this point we have found a suitable free list entry.
650 Figure out how to remove what we need from the list. */
651 result = ADDRESS(block);
652 if (_heapinfo[block].free.size > blocks) {
653 /* The block we found has a bit left over,
654 so relink the tail end back into the free list. */
655 _heapinfo[block + blocks].free.size
656 = _heapinfo[block].free.size - blocks;
657 _heapinfo[block + blocks].free.next
658 = _heapinfo[block].free.next;
659 _heapinfo[block + blocks].free.prev
660 = _heapinfo[block].free.prev;
661 _heapinfo[_heapinfo[block].free.prev].free.next
662 = _heapinfo[_heapinfo[block].free.next].free.prev
663 = _heapindex = block + blocks;
665 /* The block exactly matches our requirements,
666 so just remove it from the list. */
667 _heapinfo[_heapinfo[block].free.next].free.prev
668 = _heapinfo[block].free.prev;
669 _heapinfo[_heapinfo[block].free.prev].free.next
670 = _heapindex = _heapinfo[block].free.next;
674 _heapinfo[block].busy.type = 0;
675 _heapinfo[block].busy.info.size = blocks;
677 _bytes_used += blocks * BLOCKSIZE;
678 _bytes_free -= blocks * BLOCKSIZE;
686 /* On some ANSI C systems, some libc functions call _malloc, _free
687 and _realloc. Make them use the GNU functions. */
689 __ptr_t _malloc(__malloc_size_t size);
690 __ptr_t _malloc(__malloc_size_t size)
695 void _free(__ptr_t ptr);
696 void _free(__ptr_t ptr)
701 __ptr_t _realloc(__ptr_t ptr, __malloc_size_t size);
702 __ptr_t _realloc(__ptr_t ptr, __malloc_size_t size)
704 return realloc(ptr, size);
708 /* Free a block of memory allocated by `malloc'.
709 Copyright 1990, 1991, 1992, 1994 Free Software Foundation
710 Written May 1989 by Mike Haertel.
712 This library is free software; you can redistribute it and/or
713 modify it under the terms of the GNU Library General Public License as
714 published by the Free Software Foundation; either version 2 of the
715 License, or (at your option) any later version.
717 This library is distributed in the hope that it will be useful,
718 but WITHOUT ANY WARRANTY; without even the implied warranty of
719 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
720 Library General Public License for more details.
722 You should have received a copy of the GNU General Public License
723 along with this library; see the file COPYING. If not, write to
724 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
725 Boston, MA 02111-1307, USA.
727 The author may be reached (Email) at the address mike@ai.mit.edu,
728 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
730 #ifndef _MALLOC_INTERNAL
731 #define _MALLOC_INTERNAL
735 /* Debugging hook for free. */
736 void (*__free_hook) __P((__ptr_t __ptr));
738 /* List of blocks allocated by memalign. */
739 struct alignlist *_aligned_blocks = NULL;
741 /* Return memory to the heap.
742 Like `free' but don't call a __free_hook if there is one. */
743 void _free_internal(__ptr_t ptr)
746 __malloc_size_t block, blocks;
748 struct list *prev, *next;
752 type = _heapinfo[block].busy.type;
755 /* Get as many statistics as early as we can. */
757 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
758 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
760 /* Find the free cluster previous to this one in the free list.
761 Start searching at the last block referenced; this may benefit
762 programs with locality of allocation. */
766 i = _heapinfo[i].free.prev;
769 i = _heapinfo[i].free.next;
770 while (i > 0 && i < block);
771 i = _heapinfo[i].free.prev;
774 /* Determine how to link this block into the free list. */
775 if (block == i + _heapinfo[i].free.size) {
776 /* Coalesce this block with its predecessor. */
777 _heapinfo[i].free.size +=
778 _heapinfo[block].busy.info.size;
781 /* Really link this block back into the free list. */
782 _heapinfo[block].free.size =
783 _heapinfo[block].busy.info.size;
784 _heapinfo[block].free.next = _heapinfo[i].free.next;
785 _heapinfo[block].free.prev = i;
786 _heapinfo[i].free.next = block;
787 _heapinfo[_heapinfo[block].free.next].free.prev = block;
791 /* Now that the block is linked in, see if we can coalesce it
792 with its successor (by deleting its successor from the list
793 and adding in its size). */
794 if (block + _heapinfo[block].free.size ==
795 _heapinfo[block].free.next) {
796 _heapinfo[block].free.size +=
797 _heapinfo[_heapinfo[block].free.next].free.size;
798 _heapinfo[block].free.next =
799 _heapinfo[_heapinfo[block].free.next].free.next;
800 _heapinfo[_heapinfo[block].free.next].free.prev = block;
804 /* Now see if we can return stuff to the system. */
805 blocks = _heapinfo[block].free.size;
806 if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
807 && (*__morecore) (0) == ADDRESS(block + blocks)) {
808 __malloc_size_t bytes = blocks * BLOCKSIZE;
809 _heaplimit -= blocks;
810 (*__morecore) (-(int)bytes);
811 _heapinfo[_heapinfo[block].free.prev].free.next
812 = _heapinfo[block].free.next;
813 _heapinfo[_heapinfo[block].free.next].free.prev
814 = _heapinfo[block].free.prev;
815 block = _heapinfo[block].free.prev;
817 _bytes_free -= bytes;
820 /* Set the next search to begin at this block. */
825 /* Do some of the statistics. */
827 _bytes_used -= 1 << type;
829 _bytes_free += 1 << type;
831 /* Get the address of the first free fragment in this block. */
832 prev = (struct list *)((char *)ADDRESS(block) +
833 (_heapinfo[block].busy.info.frag.
836 if (_heapinfo[block].busy.info.frag.nfree ==
837 (BLOCKSIZE >> type) - 1) {
838 /* If all fragments of this block are free, remove them
839 from the fragment list and free the whole block. */
841 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type);
844 prev->prev->next = next;
846 next->prev = prev->prev;
847 _heapinfo[block].busy.type = 0;
848 _heapinfo[block].busy.info.size = 1;
850 /* Keep the statistics accurate. */
852 _bytes_used += BLOCKSIZE;
853 _chunks_free -= BLOCKSIZE >> type;
854 _bytes_free -= BLOCKSIZE;
856 free(ADDRESS(block));
857 } else if (_heapinfo[block].busy.info.frag.nfree != 0) {
858 /* If some fragments of this block are free, link this
859 fragment into the fragment list after the first free
860 fragment of this block. */
861 next = (struct list *)ptr;
862 next->next = prev->next;
865 if (next->next != NULL)
866 next->next->prev = next;
867 ++_heapinfo[block].busy.info.frag.nfree;
869 /* No fragments of this block are free, so link this
870 fragment into the fragment list and announce that
871 it is the first free fragment of this block. */
872 prev = (struct list *)ptr;
873 _heapinfo[block].busy.info.frag.nfree = 1;
874 _heapinfo[block].busy.info.frag.first =
876 ((unsigned long int)((char *)ptr - (char *)NULL)
877 % BLOCKSIZE >> type);
878 prev->next = _fraghead[type].next;
879 prev->prev = &_fraghead[type];
880 prev->prev->next = prev;
881 if (prev->next != NULL)
882 prev->next->prev = prev;
888 /* Return memory to the heap. */
889 __free_ret_t free(__ptr_t ptr)
896 if (PURE_DATA(ptr)) {
900 for (l = _aligned_blocks; l != NULL; l = l->next)
901 if (l->aligned == ptr) {
902 l->aligned = NULL; /* Mark the slot in the list as free. */
907 if (__free_hook != NULL)
908 (*__free_hook) (ptr);
913 /* Copyright (C) 1991, 1993, 1994 Free Software Foundation, Inc.
914 This file is part of the GNU C Library.
916 The GNU C Library is free software; you can redistribute it and/or
917 modify it under the terms of the GNU Library General Public License as
918 published by the Free Software Foundation; either version 2 of the
919 License, or (at your option) any later version.
921 The GNU C Library is distributed in the hope that it will be useful,
922 but WITHOUT ANY WARRANTY; without even the implied warranty of
923 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
924 Library General Public License for more details.
926 You should have received a copy of the GNU General Public License
927 along with this library; see the file COPYING. If not, write to
928 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
929 Boston, MA 02111-1307, USA. */
931 #ifndef _MALLOC_INTERNAL
932 #define _MALLOC_INTERNAL
938 #include <ansidecl.h>
939 #include <gnu-stabs.h>
943 function_alias(cfree, free, void, (ptr), DEFUN(cfree, (ptr), PTR ptr))
946 void cfree(__ptr_t ptr);
947 void cfree(__ptr_t ptr)
953 /* Change the size of a block allocated by `malloc'.
954 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
955 Written May 1989 by Mike Haertel.
957 This library is free software; you can redistribute it and/or
958 modify it under the terms of the GNU Library General Public License as
959 published by the Free Software Foundation; either version 2 of the
960 License, or (at your option) any later version.
962 This library is distributed in the hope that it will be useful,
963 but WITHOUT ANY WARRANTY; without even the implied warranty of
964 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
965 Library General Public License for more details.
967 You should have received a copy of the GNU General Public License
968 along with this library; see the file COPYING. If not, write to
969 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
970 Boston, MA 02111-1307, USA.
972 The author may be reached (Email) at the address mike@ai.mit.edu,
973 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
975 #ifndef _MALLOC_INTERNAL
976 #define _MALLOC_INTERNAL
981 #define min(A, B) ((A) < (B) ? (A) : (B))
984 /* Debugging hook for realloc. */
985 __ptr_t(*__realloc_hook) __P((__ptr_t __ptr, __malloc_size_t __size));
987 /* Resize the given region to the new size, returning a pointer
988 to the (possibly moved) region. This is optimized for speed;
989 some benchmarks seem to indicate that greater compactness is
990 achieved by unconditionally allocating and copying to a
991 new region. This module has incestuous knowledge of the
992 internals of both free and malloc. */
993 __ptr_t realloc(__ptr_t ptr, __malloc_size_t size)
997 __malloc_size_t block, blocks, oldlimit;
999 if (PURE_DATA(ptr)) {
1000 result = malloc(size);
1001 memcpy(result, ptr, size);
1005 else if (size == 0) {
1008 } else if (ptr == NULL)
1009 return malloc(size);
1011 if (__realloc_hook != NULL)
1012 return (*__realloc_hook) (ptr, size);
1016 type = _heapinfo[block].busy.type;
1019 /* Maybe reallocate a large block to a small fragment. */
1020 if (size <= BLOCKSIZE / 2) {
1021 result = malloc(size);
1022 if (result != NULL) {
1023 memcpy(result, ptr, size);
1024 _free_internal(ptr);
1029 /* The new size is a large allocation as well;
1030 see if we can hold it in place. */
1031 blocks = BLOCKIFY(size);
1032 if (blocks < _heapinfo[block].busy.info.size) {
1033 /* The new size is smaller; return
1034 excess memory to the free list. */
1035 _heapinfo[block + blocks].busy.type = 0;
1036 _heapinfo[block + blocks].busy.info.size
1037 = _heapinfo[block].busy.info.size - blocks;
1038 _heapinfo[block].busy.info.size = blocks;
1039 /* We have just created a new chunk by splitting a chunk in two.
1040 Now we will free this chunk; increment the statistics counter
1041 so it doesn't become wrong when _free_internal decrements it. */
1043 _free_internal(ADDRESS(block + blocks));
1045 } else if (blocks == _heapinfo[block].busy.info.size)
1046 /* No size change necessary. */
1049 /* Won't fit, so allocate a new region that will.
1050 Free the old region first in case there is sufficient
1051 adjacent free space to grow without moving. */
1052 blocks = _heapinfo[block].busy.info.size;
1053 /* Prevent free from actually returning memory to the system. */
1054 oldlimit = _heaplimit;
1057 _heaplimit = oldlimit;
1058 result = malloc(size);
1059 if (result == NULL) {
1060 /* Now we're really in trouble. We have to unfree
1061 the thing we just freed. Unfortunately it might
1062 have been coalesced with its neighbors. */
1063 if (_heapindex == block)
1064 (void)malloc(blocks * BLOCKSIZE);
1068 _heapindex) * BLOCKSIZE);
1069 (void)malloc(blocks * BLOCKSIZE);
1075 memmove(result, ptr, blocks * BLOCKSIZE);
1080 /* Old size is a fragment; type is logarithm
1081 to base two of the fragment size. */
1082 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1083 size <= (__malloc_size_t) (1 << type))
1084 /* The new size is the same kind of fragment. */
1087 /* The new size is different; allocate a new space,
1088 and copy the lesser of the new size and the old. */
1089 result = malloc(size);
1093 min(size, (__malloc_size_t) 1 << type));
1102 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1104 This library is free software; you can redistribute it and/or
1105 modify it under the terms of the GNU Library General Public License as
1106 published by the Free Software Foundation; either version 2 of the
1107 License, or (at your option) any later version.
1109 This library is distributed in the hope that it will be useful,
1110 but WITHOUT ANY WARRANTY; without even the implied warranty of
1111 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1112 Library General Public License for more details.
1114 You should have received a copy of the GNU General Public License
1115 along with this library; see the file COPYING. If not, write to
1116 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1117 Boston, MA 02111-1307, USA.
1119 The author may be reached (Email) at the address mike@ai.mit.edu,
1120 or (US mail) as Mike Haertel c/o Free Software Foundation, Inc. */
1122 #ifndef _MALLOC_INTERNAL
1123 #define _MALLOC_INTERNAL
1127 /* Allocate an array of NMEMB elements each SIZE bytes long.
1128 The entire array is initialized to zeros. */
1129 __ptr_t calloc(__malloc_size_t nmemb, __malloc_size_t size)
1131 __ptr_t result = malloc(nmemb * size);
1134 (void)memset(result, 0, nmemb * size);
1139 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1140 This file is part of the GNU C Library.
1142 The GNU C Library is free software; you can redistribute it and/or modify
1143 it under the terms of the GNU General Public License as published by
1144 the Free Software Foundation; either version 2, or (at your option)
1147 The GNU C Library is distributed in the hope that it will be useful,
1148 but WITHOUT ANY WARRANTY; without even the implied warranty of
1149 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1150 GNU General Public License for more details.
1152 You should have received a copy of the GNU General Public License
1153 along with the GNU C Library; see the file COPYING. If not, write to
1154 the Free the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1155 Boston, MA 02111-1307, USA. */
1157 #ifndef _MALLOC_INTERNAL
1158 #define _MALLOC_INTERNAL
1162 /* #ifndef __GNU_LIBRARY__ */
1166 #ifdef GMALLOC_NEEDS_SBRK_DECL
1167 /* some versions of OSF1 need this */
1168 extern __ptr_t __sbrk __P((ssize_t increment));
1170 #ifdef __GNU_LIBRARY__
1171 /* It is best not to declare this and cast its result on foreign operating
1172 systems with potentially hostile include files. */
1173 #if !(defined(linux) && defined(sparc))
1174 extern __ptr_t __sbrk __P((int increment));
1183 /* Allocate INCREMENT more bytes of data space,
1184 and return the start of data space, or NULL on errors.
1185 If INCREMENT is negative, shrink data space. */
1186 __ptr_t __default_morecore(ptrdiff_t increment)
1188 __ptr_t result = (__ptr_t) __sbrk(increment);
1189 if (result == (__ptr_t) - 1)
1194 /* Copyright (C) 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1196 This library is free software; you can redistribute it and/or
1197 modify it under the terms of the GNU Library General Public License as
1198 published by the Free Software Foundation; either version 2 of the
1199 License, or (at your option) any later version.
1201 This library is distributed in the hope that it will be useful,
1202 but WITHOUT ANY WARRANTY; without even the implied warranty of
1203 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1204 Library General Public License for more details.
1206 You should have received a copy of the GNU General Public License
1207 along with this library; see the file COPYING. If not, write to
1208 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1209 Boston, MA 02111-1307, USA. */
1211 #ifndef _MALLOC_INTERNAL
1212 #define _MALLOC_INTERNAL
1216 __ptr_t memalign(__malloc_size_t alignment, __malloc_size_t size)
1219 unsigned long int adj;
1221 size = ((size + alignment - 1) / alignment) * alignment;
1223 result = malloc(size);
1226 adj = (unsigned long int)((unsigned long int)((char *)result -
1230 struct alignlist *l;
1231 for (l = _aligned_blocks; l != NULL; l = l->next)
1232 if (l->aligned == NULL)
1233 /* This slot is free. Use it. */
1236 l = (struct alignlist *)
1237 malloc(sizeof(struct alignlist));
1242 l->next = _aligned_blocks;
1243 _aligned_blocks = l;
1246 result = l->aligned = (char *)result + alignment - adj;