1 /* Storage allocation and gc for SXEmacs Lisp interpreter.
2 Copyright (C) 1985-1998 Free Software Foundation, Inc.
3 Copyright (C) 1995 Sun Microsystems, Inc.
4 Copyright (C) 1995, 1996 Ben Wing.
6 This file is part of SXEmacs
8 SXEmacs is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 3 of the License, or
11 (at your option) any later version.
13 SXEmacs is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 /* Synched up with: FSF 19.28, Mule 2.0. Substantially different from
27 FSF: Original version; a long time ago.
28 Mly: Significantly rewritten to use new 3-bit tags and
29 nicely abstracted object definitions, for 19.8.
30 JWZ: Improved code to keep track of purespace usage and
31 issue nice purespace and GC stats.
32 Ben Wing: Cleaned up frob-block lrecord code, added error-checking
33 and various changes for Mule, for 19.12.
34 Added bit vectors for 19.13.
35 Added lcrecord lists for 19.14.
36 slb: Lots of work on the purification and dump time code.
37 Synched Doug Lea malloc support from Emacs 20.2.
38 og: Killed the purespace. Portable dumper (moved to dumper.c)
44 #include "backtrace.h"
48 #include "ui/device.h"
50 #define INCLUDE_EVENTS_H_PRIVATE_SPHERE
51 #include "events/events.h"
54 #include "ui/glyphs.h"
56 #include "ui/redisplay.h"
57 #include "specifier.h"
60 #include "ui/window.h"
61 #include "ui/console-stream.h"
63 #ifdef DOUG_LEA_MALLOC
71 #define SXE_DEBUG_GC_GMP(args...) SXE_DEBUG_GC("[gmp]: " args)
74 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
77 # if defined HAVE_GC_GC_H
79 # elif defined HAVE_GC_H
82 /* declare the 3 funs we need */
83 extern void *GC_malloc(size_t);
84 extern void *GC_malloc_atomic(size_t);
85 extern void *GC_malloc_uncollectable(size_t);
86 extern void *GC_malloc_stubborn(size_t);
87 extern void *GC_realloc(void*, size_t);
88 extern char *GC_strdup(const char*);
89 extern void GC_free(void*);
91 # error "I'm very concerned about your BDWGC support"
95 /* category subsystem */
101 EXFUN(Fgarbage_collect, 0);
104 /* this is _way_ too slow to be part of the standard debug options */
105 #if defined(DEBUG_SXEMACS) && defined(MULE)
106 #define VERIFY_STRING_CHARS_INTEGRITY
110 /* Define this to use malloc/free with no freelist for all datatypes,
111 the hope being that some debugging tools may help detect
112 freed memory references */
113 #ifdef USE_DEBUG_MALLOC /* Taking the above comment at face value -slb */
115 #define ALLOC_NO_POOLS
119 static Fixnum debug_allocation;
120 static Fixnum debug_allocation_backtrace_length;
123 #if defined EF_USE_ASYNEQ && !(defined HAVE_BDWGC && defined EF_USE_BDWGC)
124 #include "semaphore.h"
125 sxe_mutex_t cons_mutex;
126 #endif /* EF_USE_ASYNEQ && !BDWGC */
128 #include "events/event-queue.h"
129 #include "events/workers.h"
130 dllist_t workers = NULL;
133 /* Number of bytes of consing done since the last gc */
134 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
135 #define INCREMENT_CONS_COUNTER_1(size)
139 EMACS_INT consing_since_gc;
140 #define INCREMENT_CONS_COUNTER_1(size) (consing_since_gc += (size))
145 debug_allocation_backtrace(void)
147 if (debug_allocation_backtrace_length > 0) {
148 debug_short_backtrace (debug_allocation_backtrace_length);
153 #define INCREMENT_CONS_COUNTER(foosize, type) \
155 if (debug_allocation) { \
156 stderr_out("allocating %s (size %ld)\n", \
157 type, (long)foosize); \
158 debug_allocation_backtrace (); \
160 INCREMENT_CONS_COUNTER_1(foosize); \
162 #define NOSEEUM_INCREMENT_CONS_COUNTER(foosize, type) \
164 if (debug_allocation > 1) { \
165 stderr_out("allocating noseeum %s (size %ld)\n", \
166 type, (long)foosize); \
167 debug_allocation_backtrace (); \
169 INCREMENT_CONS_COUNTER_1(foosize); \
172 #define INCREMENT_CONS_COUNTER(size, type) INCREMENT_CONS_COUNTER_1 (size)
173 #define NOSEEUM_INCREMENT_CONS_COUNTER(size, type) \
174 INCREMENT_CONS_COUNTER_1 (size)
178 DECREMENT_CONS_COUNTER(size_t size)
179 __attribute__((always_inline));
182 DECREMENT_CONS_COUNTER(size_t size)
184 consing_since_gc -= (size);
185 if (consing_since_gc < 0) {
186 consing_since_gc = 0;
190 /* Number of bytes of consing since gc before another gc should be done. */
191 EMACS_INT gc_cons_threshold;
193 /* Nonzero during gc */
196 /* Number of times GC has happened at this level or below.
197 * Level 0 is most volatile, contrary to usual convention.
198 * (Of course, there's only one level at present) */
199 EMACS_INT gc_generation_number[1];
201 /* This is just for use by the printer, to allow things to print uniquely */
202 static int lrecord_uid_counter;
204 /* Nonzero when calling certain hooks or doing other things where
206 int gc_currently_forbidden;
209 Lisp_Object Vpre_gc_hook, Qpre_gc_hook;
210 Lisp_Object Vpost_gc_hook, Qpost_gc_hook;
212 /* "Garbage collecting" */
213 Lisp_Object Vgc_message;
214 Lisp_Object Vgc_pointer_glyph;
215 static char gc_default_message[] = "Garbage collecting";
216 Lisp_Object Qgarbage_collecting;
218 /* Non-zero means we're in the process of doing the dump */
221 #ifdef ERROR_CHECK_TYPECHECK
223 Error_behavior ERROR_ME, ERROR_ME_NOT, ERROR_ME_WARN;
227 int c_readonly(Lisp_Object obj)
229 return POINTER_TYPE_P(XTYPE(obj)) && C_READONLY(obj);
232 int lisp_readonly(Lisp_Object obj)
234 return POINTER_TYPE_P(XTYPE(obj)) && LISP_READONLY(obj);
237 /* Maximum amount of C stack to save when a GC happens. */
239 #ifndef MAX_SAVE_STACK
240 #define MAX_SAVE_STACK 0 /* 16000 */
243 /* Non-zero means ignore malloc warnings. Set during initialization. */
244 int ignore_malloc_warnings;
246 static void *breathing_space;
248 void release_breathing_space(void)
250 if (breathing_space) {
251 void *tmp = breathing_space;
257 /* malloc calls this if it finds we are near exhausting storage */
258 void malloc_warning(const char *str)
260 if (ignore_malloc_warnings)
266 "Killing some buffers may delay running out of memory.\n"
267 "However, certainly by the time you receive the 95%% warning,\n"
268 "you should clean up, kill this Emacs, and start a new one.", str);
271 /* Called if malloc returns zero */
272 DOESNT_RETURN memory_full(void)
274 /* Force a GC next time eval is called.
275 It's better to loop garbage-collecting (we might reclaim enough
276 to win) than to loop beeping and barfing "Memory exhausted"
278 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
279 /* that's all we can do */
282 consing_since_gc = gc_cons_threshold + 1;
283 release_breathing_space();
286 /* Flush some histories which might conceivably contain garbalogical
288 if (!NILP(Fboundp(Qvalues))) {
291 Vcommand_history = Qnil;
293 error("Memory exhausted");
296 #if !defined HAVE_BDWGC || !defined EF_USE_BDWGC
297 /* like malloc and realloc but check for no memory left, and block input. */
300 void *xmalloc(size_t size)
302 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
303 /* yes i know this is contradicting because of the outer conditional
304 * but this here and the definition in lisp.h are meant to be
306 void *val = zmalloc(size);
307 #else /* !HAVE_BDWGC */
308 void *val = ymalloc(size);
309 #endif /* HAVE_BDWGC */
311 if (!val && (size != 0))
316 #undef xmalloc_atomic
317 void *xmalloc_atomic(size_t size)
319 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
320 void *val = zmalloc_atomic(size);
321 #else /* !HAVE_BDWGC */
322 void *val = ymalloc_atomic(size);
323 #endif /* HAVE_BDWGC */
325 if (!val && (size != 0))
331 static void *xcalloc(size_t nelem, size_t elsize)
333 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
334 void *val = zcalloc(nelem, elsize);
336 void *val = ycalloc(nelem, elsize);
339 if (!val && (nelem != 0))
344 void *xmalloc_and_zero(size_t size)
346 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
347 return zmalloc_and_zero(size);
349 return xcalloc(size, 1);
354 void *xrealloc(void *block, size_t size)
356 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
357 void *val = zrealloc(block, size);
358 #else /* !HAVE_BDWGC */
359 /* We must call malloc explicitly when BLOCK is 0, since some
360 reallocs don't do this. */
361 void *val = block ? yrealloc(block, size) : ymalloc(size);
362 #endif /* HAVE_BDWGC */
364 if (!val && (size != 0))
370 #ifdef ERROR_CHECK_GC
373 typedef unsigned int four_byte_t;
374 #elif SIZEOF_LONG == 4
375 typedef unsigned long four_byte_t;
376 #elif SIZEOF_SHORT == 4
377 typedef unsigned short four_byte_t;
379 What kind of strange - ass system are we running on ?
381 #if !defined HAVE_BDWGC || !defined EF_USE_BDWGC
382 static void deadbeef_memory(void *ptr, size_t size)
384 four_byte_t *ptr4 = (four_byte_t *) ptr;
385 size_t beefs = size >> 2;
387 /* In practice, size will always be a multiple of four. */
389 (*ptr4++) = 0xDEADBEEF;
393 #else /* !ERROR_CHECK_GC */
395 #define deadbeef_memory(ptr, size)
397 #endif /* !ERROR_CHECK_GC */
400 #if !defined HAVE_BDWGC || !defined EF_USE_BDWGC
401 char *xstrdup(const char *str)
403 #ifdef ERROR_CHECK_MALLOC
404 #if SIZEOF_VOID_P == 4
405 /* Unbelievably, calling free() on 0xDEADBEEF doesn't cause an
406 error until much later on for many system mallocs, such as
407 the one that comes with Solaris 2.3. FMH!! */
408 assert(str != (void *)0xDEADBEEF);
409 #elif SIZEOF_VOID_P == 8
410 assert(str != (void*)0xCAFEBABEDEADBEEF);
412 #endif /* ERROR_CHECK_MALLOC */
414 int len = strlen(str)+1; /* for stupid terminating 0 */
416 void *val = xmalloc(len);
419 return (char*)memcpy(val, str, len);
425 #if !defined HAVE_STRDUP
426 /* will be a problem I think */
427 char *strdup(const char *s)
431 #endif /* !HAVE_STRDUP */
435 allocate_lisp_storage(size_t size)
437 return xmalloc(size);
440 #if defined HAVE_BDWGC && defined EF_USE_BDWGC && 0
442 lcrec_register_finaliser(struct lcrecord_header *b)
444 GC_finalization_proc *foo = NULL;
446 auto void lcrec_finaliser();
448 auto void lcrec_finaliser(void *obj, void *UNUSED(data))
450 const struct lrecord_implementation *lrimp =
451 XRECORD_LHEADER_IMPLEMENTATION(obj);
452 if (LIKELY(lrimp->finalizer != NULL)) {
453 SXE_DEBUG_GC("running the finaliser on %p :type %d\n",
455 lrimp->finalizer(obj, 0);
458 memset(obj, 0, sizeof(struct lcrecord_header));
462 SXE_DEBUG_GC("lcrec-fina %p\n", b);
463 GC_REGISTER_FINALIZER(b, lcrec_finaliser, NULL, foo, bar);
468 lcrec_register_finaliser(struct lcrecord_header *UNUSED(b))
472 #endif /* HAVE_BDWGC */
474 #if !defined HAVE_BDWGC || !defined EF_USE_BDWGC
475 /* lcrecords are chained together through their "next" field.
476 After doing the mark phase, GC will walk this linked list
477 and free any lcrecord which hasn't been marked. */
478 static struct lcrecord_header *all_lcrecords;
482 #if defined USE_MLY_UIDS
483 #define lcheader_set_uid(_x) (_x)->uid = lrecord_uid_counter++
484 #elif defined USE_JWZ_UIDS
485 #define lcheader_set_uid(_x) (_x)->uid = (long int)&(_x)
488 void *alloc_lcrecord(size_t size,
489 const struct lrecord_implementation *implementation)
491 struct lcrecord_header *lcheader;
494 ((implementation->static_size == 0 ?
495 implementation->size_in_bytes_method != NULL :
496 implementation->static_size == size)
497 && (!implementation->basic_p)
499 (!(implementation->hash == NULL
500 && implementation->equal != NULL)));
503 lcheader = (struct lcrecord_header *)allocate_lisp_storage(size);
504 lcrec_register_finaliser(lcheader);
505 set_lheader_implementation(&lcheader->lheader, implementation);
507 lcheader_set_uid(lcheader);
509 #if !defined HAVE_BDWGC || !defined EF_USE_BDWGC
510 lcheader->next = all_lcrecords;
511 all_lcrecords = lcheader;
512 INCREMENT_CONS_COUNTER(size, implementation->name);
518 static void disksave_object_finalization_1(void)
520 #if !defined HAVE_BDWGC || !defined EF_USE_BDWGC
521 struct lcrecord_header *header;
523 for (header = all_lcrecords; header; header = header->next) {
524 if (LHEADER_IMPLEMENTATION(&header->lheader)->finalizer &&
526 LHEADER_IMPLEMENTATION(&header->lheader)->
527 finalizer(header, 1);
532 /************************************************************************/
533 /* Debugger support */
534 /************************************************************************/
535 /* Give gdb/dbx enough information to decode Lisp Objects. We make
536 sure certain symbols are always defined, so gdb doesn't complain
537 about expressions in src/.gdbinit. See src/.gdbinit or src/.dbxrc
538 to see how this is used. */
540 EMACS_UINT dbg_valmask = ((1UL << VALBITS) - 1) << GCBITS;
541 EMACS_UINT dbg_typemask = (1UL << GCTYPEBITS) - 1;
543 unsigned char dbg_valbits = VALBITS;
544 unsigned char dbg_gctypebits = GCTYPEBITS;
546 /* On some systems, the above definitions will be optimized away by
547 the compiler or linker unless they are referenced in some function. */
548 long dbg_inhibit_dbg_symbol_deletion(void);
549 long dbg_inhibit_dbg_symbol_deletion(void)
551 return (dbg_valmask + dbg_typemask + dbg_valbits + dbg_gctypebits);
554 /* Macros turned into functions for ease of debugging.
555 Debuggers don't know about macros! */
556 int dbg_eq(Lisp_Object obj1, Lisp_Object obj2);
557 int dbg_eq(Lisp_Object obj1, Lisp_Object obj2)
559 return EQ(obj1, obj2);
562 /************************************************************************/
563 /* Fixed-size type macros */
564 /************************************************************************/
566 /* For fixed-size types that are commonly used, we malloc() large blocks
567 of memory at a time and subdivide them into chunks of the correct
568 size for an object of that type. This is more efficient than
569 malloc()ing each object separately because we save on malloc() time
570 and overhead due to the fewer number of malloc()ed blocks, and
571 also because we don't need any extra pointers within each object
572 to keep them threaded together for GC purposes. For less common
573 (and frequently large-size) types, we use lcrecords, which are
574 malloc()ed individually and chained together through a pointer
575 in the lcrecord header. lcrecords do not need to be fixed-size
576 (i.e. two objects of the same type need not have the same size;
577 however, the size of a particular object cannot vary dynamically).
578 It is also much easier to create a new lcrecord type because no
579 additional code needs to be added to alloc.c. Finally, lcrecords
580 may be more efficient when there are only a small number of them.
582 The types that are stored in these large blocks (or "frob blocks")
583 are cons, float, compiled-function, symbol, marker, extent, event,
586 Note that strings are special in that they are actually stored in
587 two parts: a structure containing information about the string, and
588 the actual data associated with the string. The former structure
589 (a struct Lisp_String) is a fixed-size structure and is managed the
590 same way as all the other such types. This structure contains a
591 pointer to the actual string data, which is stored in structures of
592 type struct string_chars_block. Each string_chars_block consists
593 of a pointer to a struct Lisp_String, followed by the data for that
594 string, followed by another pointer to a Lisp_String, followed by
595 the data for that string, etc. At GC time, the data in these
596 blocks is compacted by searching sequentially through all the
597 blocks and compressing out any holes created by unmarked strings.
598 Strings that are more than a certain size (bigger than the size of
599 a string_chars_block, although something like half as big might
600 make more sense) are malloc()ed separately and not stored in
601 string_chars_blocks. Furthermore, no one string stretches across
602 two string_chars_blocks.
604 Vectors are each malloc()ed separately, similar to lcrecords.
606 In the following discussion, we use conses, but it applies equally
607 well to the other fixed-size types.
609 We store cons cells inside of cons_blocks, allocating a new
610 cons_block with malloc() whenever necessary. Cons cells reclaimed
611 by GC are put on a free list to be reallocated before allocating
612 any new cons cells from the latest cons_block. Each cons_block is
613 just under 2^n - MALLOC_OVERHEAD bytes long, since malloc (at least
614 the versions in malloc.c and gmalloc.c) really allocates in units
615 of powers of two and uses 4 bytes for its own overhead.
617 What GC actually does is to search through all the cons_blocks,
618 from the most recently allocated to the oldest, and put all
619 cons cells that are not marked (whether or not they're already
620 free) on a cons_free_list. The cons_free_list is a stack, and
621 so the cons cells in the oldest-allocated cons_block end up
622 at the head of the stack and are the first to be reallocated.
623 If any cons_block is entirely free, it is freed with free()
624 and its cons cells removed from the cons_free_list. Because
625 the cons_free_list ends up basically in memory order, we have
626 a high locality of reference (assuming a reasonable turnover
627 of allocating and freeing) and have a reasonable probability
628 of entirely freeing up cons_blocks that have been more recently
629 allocated. This stage is called the "sweep stage" of GC, and
630 is executed after the "mark stage", which involves starting
631 from all places that are known to point to in-use Lisp objects
632 (e.g. the obarray, where are all symbols are stored; the
633 current catches and condition-cases; the backtrace list of
634 currently executing functions; the gcpro list; etc.) and
635 recursively marking all objects that are accessible.
637 At the beginning of the sweep stage, the conses in the cons blocks
638 are in one of three states: in use and marked, in use but not
639 marked, and not in use (already freed). Any conses that are marked
640 have been marked in the mark stage just executed, because as part
641 of the sweep stage we unmark any marked objects. The way we tell
642 whether or not a cons cell is in use is through the LRECORD_FREE_P
643 macro. This uses a special lrecord type `lrecord_type_free',
644 which is never associated with any valid object.
646 Conses on the free_cons_list are threaded through a pointer stored
647 in the conses themselves. Because the cons is still in a
648 cons_block and needs to remain marked as not in use for the next
649 time that GC happens, we need room to store both the "free"
650 indicator and the chaining pointer. So this pointer is stored
651 after the lrecord header (actually where C places a pointer after
652 the lrecord header; they are not necessarily contiguous). This
653 implies that all fixed-size types must be big enough to contain at
654 least one pointer. This is true for all current fixed-size types,
655 with the possible exception of Lisp_Floats, for which we define the
656 meat of the struct using a union of a pointer and a double to
657 ensure adequate space for the free list chain pointer.
659 Some types of objects need additional "finalization" done
660 when an object is converted from in use to not in use;
661 this is the purpose of the ADDITIONAL_FREE_type macro.
662 For example, markers need to be removed from the chain
663 of markers that is kept in each buffer. This is because
664 markers in a buffer automatically disappear if the marker
665 is no longer referenced anywhere (the same does not
666 apply to extents, however).
668 WARNING: Things are in an extremely bizarre state when
669 the ADDITIONAL_FREE_type macros are called, so beware!
671 When ERROR_CHECK_GC is defined, we do things differently so as to
672 maximize our chances of catching places where there is insufficient
673 GCPROing. The thing we want to avoid is having an object that
674 we're using but didn't GCPRO get freed by GC and then reallocated
675 while we're in the process of using it -- this will result in
676 something seemingly unrelated getting trashed, and is extremely
677 difficult to track down. If the object gets freed but not
678 reallocated, we can usually catch this because we set most of the
679 bytes of a freed object to 0xDEADBEEF. (The lisp object type is set
680 to the invalid type `lrecord_type_free', however, and a pointer
681 used to chain freed objects together is stored after the lrecord
682 header; we play some tricks with this pointer to make it more
683 bogus, so crashes are more likely to occur right away.)
685 We want freed objects to stay free as long as possible,
686 so instead of doing what we do above, we maintain the
687 free objects in a first-in first-out queue. We also
688 don't recompute the free list each GC, unlike above;
689 this ensures that the queue ordering is preserved.
690 [This means that we are likely to have worse locality
691 of reference, and that we can never free a frob block
692 once it's allocated. (Even if we know that all cells
693 in it are free, there's no easy way to remove all those
694 cells from the free list because the objects on the
695 free list are unlikely to be in memory order.)]
696 Furthermore, we never take objects off the free list
697 unless there's a large number (usually 1000, but
698 varies depending on type) of them already on the list.
699 This way, we ensure that an object that gets freed will
700 remain free for the next 1000 (or whatever) times that
701 an object of that type is allocated. */
703 #ifndef MALLOC_OVERHEAD
705 #define MALLOC_OVERHEAD 0
706 #elif defined (rcheck)
707 #define MALLOC_OVERHEAD 20
709 #define MALLOC_OVERHEAD 8
711 #endif /* MALLOC_OVERHEAD */
713 #if !defined(HAVE_MMAP) || defined(DOUG_LEA_MALLOC)
714 /* If we released our reserve (due to running out of memory),
715 and we have a fair amount free once again,
716 try to set aside another reserve in case we run out once more.
718 This is called when a relocatable block is freed in ralloc.c. */
719 void refill_memory_reserve(void);
720 void refill_memory_reserve(void)
722 if (breathing_space == 0)
723 breathing_space = (char *)malloc(4096 - MALLOC_OVERHEAD);
725 #endif /* !HAVE_MMAP || DOUG_LEA_MALLOC */
727 #ifdef ALLOC_NO_POOLS
728 # define TYPE_ALLOC_SIZE(type, structtype) 1
730 # define TYPE_ALLOC_SIZE(type, structtype) \
731 ((2048 - MALLOC_OVERHEAD - sizeof (struct type##_block *)) \
732 / sizeof (structtype))
733 #endif /* ALLOC_NO_POOLS */
735 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
736 #define DECLARE_FIXED_TYPE_ALLOC(type, structtype) \
738 init_##type##_alloc(void) \
743 #define DECLARE_FIXED_TYPE_ALLOC(type, structtype) \
745 struct type##_block \
747 struct type##_block *prev; \
748 structtype block[TYPE_ALLOC_SIZE (type, structtype)]; \
751 static struct type##_block *current_##type##_block; \
752 static int current_##type##_block_index; \
754 static Lisp_Free *type##_free_list; \
755 static Lisp_Free *type##_free_list_tail; \
758 init_##type##_alloc (void) \
760 current_##type##_block = 0; \
761 current_##type##_block_index = \
762 countof (current_##type##_block->block); \
763 type##_free_list = 0; \
764 type##_free_list_tail = 0; \
767 static int gc_count_num_##type##_in_use; \
768 static int gc_count_num_##type##_freelist
769 #endif /* HAVE_BDWGC */
771 /* no need for a case distinction, shouldn't be called in bdwgc mode */
772 #define ALLOCATE_FIXED_TYPE_FROM_BLOCK(type, result) \
774 if (current_##type##_block_index \
775 == countof (current_##type##_block->block)) { \
776 struct type##_block *AFTFB_new = \
777 (struct type##_block *) \
778 allocate_lisp_storage( \
779 sizeof (struct type##_block)); \
780 AFTFB_new->prev = current_##type##_block; \
781 current_##type##_block = AFTFB_new; \
782 current_##type##_block_index = 0; \
784 (result) = &(current_##type##_block \
785 ->block[current_##type##_block_index++]); \
788 /* Allocate an instance of a type that is stored in blocks.
789 TYPE is the "name" of the type, STRUCTTYPE is the corresponding
792 #ifdef ERROR_CHECK_GC
794 /* Note: if you get crashes in this function, suspect incorrect calls
795 to free_cons() and friends. This happened once because the cons
796 cell was not GC-protected and was getting collected before
797 free_cons() was called. */
799 /* no need for a case distinction, shouldn't be called in bdwgc mode */
800 #define ALLOCATE_FIXED_TYPE_1(type, structtype, result) \
803 if (gc_count_num_##type##_freelist > \
804 MINIMUM_ALLOWED_FIXED_TYPE_CELLS_##type) { \
805 result = (structtype *) type##_free_list; \
806 /* Before actually using the chain pointer, \
807 we complement all its bits; \
808 see FREE_FIXED_TYPE(). */ \
809 type##_free_list = (Lisp_Free *) \
811 (type##_free_list->chain)); \
812 gc_count_num_##type##_freelist--; \
814 ALLOCATE_FIXED_TYPE_FROM_BLOCK (type, result); \
816 MARK_LRECORD_AS_NOT_FREE (result); \
817 unlock_allocator(); \
820 #else /* !ERROR_CHECK_GC */
822 /* no need for a case distinction, shouldn't be called in bdwgc mode */
823 #define ALLOCATE_FIXED_TYPE_1(type, structtype, result) \
825 if (type##_free_list) { \
826 result = (structtype *) type##_free_list; \
827 type##_free_list = type##_free_list->chain; \
829 ALLOCATE_FIXED_TYPE_FROM_BLOCK (type, result); \
831 MARK_LRECORD_AS_NOT_FREE (result); \
833 #endif /* !ERROR_CHECK_GC */
835 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
837 #define ALLOCATE_FIXED_TYPE(type, structtype, result) \
839 result = xnew(structtype); \
840 INCREMENT_CONS_COUNTER(sizeof(structtype), #type); \
842 #define ALLOCATE_ATOMIC_FIXED_TYPE(type, structtype, result) \
844 result = xnew_atomic(structtype); \
845 INCREMENT_CONS_COUNTER(sizeof(structtype), #type); \
850 #define ALLOCATE_FIXED_TYPE(type, structtype, result) \
852 ALLOCATE_FIXED_TYPE_1 (type, structtype, result); \
853 INCREMENT_CONS_COUNTER (sizeof (structtype), #type); \
855 #define ALLOCATE_ATOMIC_FIXED_TYPE ALLOCATE_FIXED_TYPE
859 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
860 #define NOSEEUM_ALLOCATE_FIXED_TYPE(type, structtype, result) \
861 (result) = xnew(structtype)
863 #define NOSEEUM_ALLOCATE_FIXED_TYPE(type, structtype, result) \
865 ALLOCATE_FIXED_TYPE_1 (type, structtype, result); \
866 NOSEEUM_INCREMENT_CONS_COUNTER(sizeof (structtype), #type); \
870 /* Lisp_Free is the type to represent a free list member inside a frob
871 block of any lisp object type. */
872 typedef struct Lisp_Free {
873 struct lrecord_header lheader;
874 struct Lisp_Free *chain;
877 #define LRECORD_FREE_P(ptr) \
878 ((ptr)->lheader.type == lrecord_type_free)
880 #define MARK_LRECORD_AS_FREE(ptr) \
881 ((void) ((ptr)->lheader.type = lrecord_type_free))
883 #ifdef ERROR_CHECK_GC
884 #define MARK_LRECORD_AS_NOT_FREE(ptr) \
885 ((void) ((ptr)->lheader.type = lrecord_type_undefined))
887 #define MARK_LRECORD_AS_NOT_FREE(ptr) DO_NOTHING
890 #ifdef ERROR_CHECK_GC
892 #define PUT_FIXED_TYPE_ON_FREE_LIST(type, structtype, ptr) \
894 if (type##_free_list_tail) { \
895 /* When we store the chain pointer, we \
896 complement all its bits; this should \
897 significantly increase its bogosity in case \
898 someone tries to use the value, and \
899 should make us crash faster if someone \
900 overwrites the pointer because when it gets \
901 un-complemented in ALLOCATED_FIXED_TYPE(), \
902 the resulting pointer will be extremely \
904 type##_free_list_tail->chain = \
905 (Lisp_Free *) ~ (EMACS_UINT) (ptr); \
907 type##_free_list = (Lisp_Free *) (ptr); \
909 type##_free_list_tail = (Lisp_Free *) (ptr); \
912 #else /* !ERROR_CHECK_GC */
914 #define PUT_FIXED_TYPE_ON_FREE_LIST(type, structtype, ptr) \
916 ((Lisp_Free *) (ptr))->chain = type##_free_list; \
917 type##_free_list = (Lisp_Free *) (ptr); \
920 #endif /* !ERROR_CHECK_GC */
922 /* TYPE and STRUCTTYPE are the same as in ALLOCATE_FIXED_TYPE(). */
924 #define FREE_FIXED_TYPE(type, structtype, ptr) \
926 structtype *FFT_ptr = (ptr); \
927 ADDITIONAL_FREE_##type (FFT_ptr); \
928 deadbeef_memory (FFT_ptr, sizeof (structtype)); \
929 PUT_FIXED_TYPE_ON_FREE_LIST(type, structtype, FFT_ptr); \
930 MARK_LRECORD_AS_FREE (FFT_ptr); \
933 /* Like FREE_FIXED_TYPE() but used when we are explicitly
934 freeing a structure through free_cons(), free_marker(), etc.
935 rather than through the normal process of sweeping.
936 We attempt to undo the changes made to the allocation counters
937 as a result of this structure being allocated. This is not
938 completely necessary but helps keep things saner: e.g. this way,
939 repeatedly allocating and freeing a cons will not result in
940 the consing-since-gc counter advancing, which would cause a GC
941 and somewhat defeat the purpose of explicitly freeing. */
943 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
944 #define FREE_FIXED_TYPE_WHEN_NOT_IN_GC(type, structtype, ptr)
945 #else /* !HAVE_BDWGC */
946 #define FREE_FIXED_TYPE_WHEN_NOT_IN_GC(type, structtype, ptr) \
948 FREE_FIXED_TYPE (type, structtype, ptr); \
949 DECREMENT_CONS_COUNTER (sizeof (structtype)); \
950 gc_count_num_##type##_freelist++; \
952 #endif /* HAVE_BDWGC */
954 /************************************************************************/
955 /* Cons allocation */
956 /************************************************************************/
958 DECLARE_FIXED_TYPE_ALLOC(cons, Lisp_Cons);
959 /* conses are used and freed so often that we set this really high */
960 /* #define MINIMUM_ALLOWED_FIXED_TYPE_CELLS_cons 20000 */
961 #define MINIMUM_ALLOWED_FIXED_TYPE_CELLS_cons 2000
963 #if defined HAVE_BDWGC && defined EF_USE_BDWGC && 0
965 cons_register_finaliser(Lisp_Cons *s)
967 GC_finalization_proc *foo = NULL;
969 auto void cons_finaliser();
971 auto void cons_finaliser(void *obj, void *UNUSED(data))
974 memset(obj, 0, sizeof(Lisp_Cons));
978 SXE_DEBUG_GC("cons-fina %p\n", s);