1 /* Execution of byte code produced by bytecomp.el.
2 Implementation of compiled-function objects.
3 Copyright (C) 1992, 1993 Free Software Foundation, Inc.
5 This file is part of SXEmacs
7 SXEmacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 SXEmacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 /* Synched up with: Mule 2.0, FSF 19.30. */
23 /* This file has been Mule-ized. */
29 hacked on by jwz@jwz.org 1991-06
30 o added a compile-time switch to turn on simple sanity checking;
31 o put back the obsolete byte-codes for error-detection;
32 o added a new instruction, unbind_all, which I will use for
33 tail-recursion elimination;
34 o made temp_output_buffer_show be called with the right number
36 o made the new bytecodes be called with args in the right order;
37 o added metering support.
40 o added relative jump instructions;
41 o all conditionals now only do QUIT if they jump.
43 Ben Wing: some changes for Mule, 1995-06.
45 Martin Buchholz: performance hacking, 1998-09.
46 See Internals Manual, Evaluation.
51 #include "backtrace.h"
57 EXFUN(Ffetch_bytecode, 1);
59 Lisp_Object Qbyte_code, Qcompiled_functionp, Qinvalid_byte_code;
61 enum Opcode { /* Byte codes */
87 Bsymbol_function = 0113,
110 Beq = 0141, /* was Bmark,
111 but no longer generated as of v18 */
117 Bfollowing_char = 0147,
118 Bpreceding_char = 0150,
119 Bcurrent_column = 0151,
121 Bequal = 0153, /* was Bscan_buffer,
122 but no longer generated as of v18 */
127 Bcurrent_buffer = 0160,
129 Bsave_current_buffer = 0162, /* was Bread_char,
130 but no longer generated as of v19 */
131 Bmemq = 0163, /* was Bset_mark,
132 but no longer generated as of v18 */
133 Binteractive_p = 0164, /* Needed since interactive-p takes
135 Bforward_char = 0165,
136 Bforward_word = 0166,
137 Bskip_chars_forward = 0167,
138 Bskip_chars_backward = 0170,
139 Bforward_line = 0171,
141 Bbuffer_substring = 0173,
142 Bdelete_region = 0174,
143 Bnarrow_to_region = 0175,
150 Bgotoifnonnil = 0204,
151 Bgotoifnilelsepop = 0205,
152 Bgotoifnonnilelsepop = 0206,
157 Bsave_excursion = 0212,
158 Bsave_window_excursion = 0213,
159 Bsave_restriction = 0214,
162 Bunwind_protect = 0216,
163 Bcondition_case = 0217,
164 Btemp_output_buffer_setup = 0220,
165 Btemp_output_buffer_show = 0221,
170 Bmatch_beginning = 0224,
175 Bstring_equal = 0230,
176 Bstring_lessp = 0231,
195 BRgotoifnonnil = 0254,
196 BRgotoifnilelsepop = 0255,
197 BRgotoifnonnilelsepop = 0256,
202 Bmember = 0266, /* new in v20 */
203 Bassq = 0267, /* new in v20 */
205 Bcl_macro = 0270, /* only if modules/cl is there */
207 BLAST_BEFORE_THREE_O_O = Bcl_macro,
211 typedef enum Opcode Opcode;
212 typedef unsigned char Opbyte;
214 static void check_opcode(Opcode opcode);
215 static void invalid_byte_code_error(char *error_message, ...);
218 execute_rare_opcode(Lisp_Object *stk, const Opbyte *prg, Opcode opcode)
219 __attribute__((noinline));
221 static Lisp_Object execute_optimized_program(const Opbyte * program,
223 Lisp_Object * constants_data);
225 extern Lisp_Object Qand_rest, Qand_optional;
227 /* Define BYTE_CODE_METER to enable generation of a byte-op usage histogram.
228 This isn't defined in FSF Emacs and isn't defined in XEmacs v19. */
229 /* #define BYTE_CODE_METER */
231 #ifdef BYTE_CODE_METER
233 Lisp_Object Vbyte_code_meter, Qbyte_code_meter;
234 int byte_metering_on;
236 static void meter_code(Opcode prev_opcode, Opcode this_opcode)
238 if (byte_metering_on) {
240 XVECTOR_DATA(XVECTOR_DATA(Vbyte_code_meter)[this_opcode]);
241 p[0] = INT_PLUS1(p[0]);
243 p[prev_opcode] = INT_PLUS1(p[prev_opcode]);
247 #endif /* BYTE_CODE_METER */
249 static Lisp_Object bytecode_nreverse(Lisp_Object list)
251 REGISTER Lisp_Object prev = Qnil;
252 REGISTER Lisp_Object tail = list;
254 while (!NILP(tail)) {
255 REGISTER Lisp_Object next;
265 /* Apply compiled-function object FUN to the NARGS evaluated arguments
266 in ARGS, and return the result of evaluation. */
268 funcall_compiled_function(Lisp_Object fun, int nargs, Lisp_Object args[])
270 /* This function can GC */
271 int speccount = specpdl_depth();
273 Lisp_Compiled_Function *f = XCOMPILED_FUNCTION(fun);
276 if (!OPAQUEP(f->instructions))
277 /* Lazily munge the instructions into a more efficient form */
278 optimize_compiled_function(fun);
280 /* optimize_compiled_function() guaranteed that f->specpdl_depth is
281 the required space on the specbinding stack for binding the args
282 and local variables of fun. So just reserve it once. */
283 SPECPDL_RESERVE(f->specpdl_depth);
286 /* Fmake_byte_code() guaranteed that f->arglist is a valid list
287 containing only non-constant symbols. */
288 LIST_LOOP_3(symbol, f->arglist, tail) {
289 if (EQ(symbol, Qand_rest)) {
292 SPECBIND_FAST_UNSAFE(symbol,
296 } else if (EQ(symbol, Qand_optional))
298 else if (i == nargs && !optional)
299 goto wrong_number_of_arguments;
301 SPECBIND_FAST_UNSAFE(symbol,
303 nargs ? args[i++] : Qnil);
308 goto wrong_number_of_arguments;
314 execute_optimized_program((Opbyte *)
315 XOPAQUE_DATA(f->instructions),
317 XVECTOR_DATA(f->constants));
319 /* The attempt to optimize this by only unbinding variables failed
320 because using buffer-local variables as function parameters
321 leads to specpdl_ptr->func != 0 */
322 /* UNBIND_TO_GCPRO_VARIABLES_ONLY (speccount, value); */
323 UNBIND_TO_GCPRO(speccount, value);
327 wrong_number_of_arguments:
328 /* The actual printed compiled_function object is incomprehensible.
329 Check the backtrace to see if we can get a more meaningful symbol. */
330 if (EQ(fun, indirect_function(*backtrace_list->function, 0)))
331 fun = *backtrace_list->function;
332 return Fsignal(Qwrong_number_of_arguments, list2(fun, make_int(nargs)));
335 /* Read next uint8 from the instruction stream. */
336 #define READ_UINT_1 ((unsigned int) (unsigned char) *program_ptr++)
338 /* Read next uint16 from the instruction stream. */
339 #define READ_UINT_2 \
341 (((unsigned int) (unsigned char) program_ptr[-1]) * 256 + \
342 ((unsigned int) (unsigned char) program_ptr[-2])))
344 /* Read next int8 from the instruction stream. */
345 #define READ_INT_1 ((int) (signed char) *program_ptr++)
347 /* Read next int16 from the instruction stream. */
350 (((int) ( signed char) program_ptr[-1]) * 256 + \
351 ((int) (unsigned char) program_ptr[-2])))
353 /* Read next int8 from instruction stream; don't advance program_pointer */
354 #define PEEK_INT_1 ((int) (signed char) program_ptr[0])
356 /* Read next int16 from instruction stream; don't advance program_pointer */
358 ((((int) ( signed char) program_ptr[1]) * 256) | \
359 ((int) (unsigned char) program_ptr[0]))
361 /* Do relative jumps from the current location.
362 We only do a QUIT if we jump backwards, for efficiency.
363 No infloops without backward jumps! */
364 #define JUMP_RELATIVE(jump) do { \
365 int JR_jump = (jump); \
366 if (JR_jump < 0) QUIT; \
367 program_ptr += JR_jump; \
370 #define JUMP JUMP_RELATIVE (PEEK_INT_2)
371 #define JUMPR JUMP_RELATIVE (PEEK_INT_1)
373 #define JUMP_NEXT ((void) (program_ptr += 2))
374 #define JUMPR_NEXT ((void) (program_ptr += 1))
376 /* Push x onto the execution stack. */
377 #define PUSH(x) (*++stack_ptr = (x))
379 /* Pop a value off the execution stack. */
380 #define POP (*stack_ptr--)
382 /* Discard n values from the execution stack. */
383 #define DISCARD(n) (stack_ptr -= (n))
385 /* Get the value which is at the top of the execution stack,
387 #define TOP (*stack_ptr)
389 /* See comment before the big switch in execute_optimized_program(). */
390 #if defined HAVE_BDWGC && defined EF_USE_BDWGC
393 #define GCPRO_STACK (gcpro1.nvars = stack_ptr - stack_beg)
396 /* The actual interpreter for byte code.
397 This function has been seriously optimized for performance.
398 Don't change the constructs unless you are willing to do
399 real benchmarking and profiling work -- martin */
402 execute_optimized_program(const Opbyte *program,
403 int stack_depth, Lisp_Object *constants_data)
405 /* This function can GC */
406 REGISTER const Opbyte *program_ptr = program;
407 /* C99 here we come */
408 Lisp_Object stack_beg[stack_depth + 1];
409 REGISTER Lisp_Object *stack_ptr = stack_beg;
410 int speccount = specpdl_depth();
413 #ifdef BYTE_CODE_METER
414 Opcode this_opcode = 0;
418 #ifdef ERROR_CHECK_BYTE_CODE
419 Lisp_Object *stack_end = stack_beg + stack_depth;
422 /* We used to GCPRO the whole interpreter stack before entering this while
423 loop (21.5.14 and before), but that interferes with collection of weakly
424 referenced objects. Although strictly speaking there's no promise that
425 weak references will disappear by any given point in time, they should
426 be collected at the first opportunity. Waiting until exit from the
427 function caused test failures because "stale" objects "above" the top of
428 the stack were still GCPROed, and they were not getting collected until
429 after exit from the (byte-compiled) test!
431 Now the idea is to dynamically adjust the array of GCPROed objects to
432 include only the "active" region of the stack.
434 We use the "GCPRO1 the array base and set the nvars member" method. It
435 would be slightly inefficient but correct to use GCPRO1_ARRAY here. It
436 would just redundantly set nvars.
437 #### Maybe it would be clearer to use GCPRO1_ARRAY and do GCPRO_STACK
440 GCPRO_STACK is something of a misnomer, because it suggests that a
441 struct gcpro is initialized each time. This is false; only the nvars
442 member of a single struct gcpro is being adjusted. This works because
443 each time a new object is assigned to a stack location, the old object
444 loses its reference and is effectively UNGCPROed, and the new object is
445 automatically GCPROed as long as nvars is correct. Only when we
446 return from the interpreter do we need to finalize the struct gcpro
447 itself, and that's done at case Breturn.
449 GCPRO1 (stack_ptr[1]);
452 REGISTER Opcode opcode = READ_UINT_1;
454 /* Get nvars right before maybe signaling. */
456 #ifdef ERROR_CHECK_BYTE_CODE
457 if (stack_ptr > stack_end)
458 invalid_byte_code_error("byte code stack overflow");
459 if (stack_ptr < stack_beg)
460 invalid_byte_code_error("byte code stack underflow");
461 check_opcode(opcode);
464 #ifdef BYTE_CODE_METER
465 prev_opcode = this_opcode;
466 this_opcode = opcode;
467 meter_code(prev_opcode, this_opcode);
470 switch ((unsigned int)opcode) {
474 if (opcode >= Bconstant)
475 PUSH(constants_data[opcode - Bconstant]);
477 stack_ptr = execute_rare_opcode(
478 stack_ptr, program_ptr, opcode);
487 n = opcode - Bvarref;
493 n = READ_UINT_1; /* most common */
496 Lisp_Object symbol = constants_data[n];
497 Lisp_Object value = XSYMBOL(symbol)->value;
498 if (SYMBOL_VALUE_MAGIC_P(value))
499 value = Fsymbol_value(symbol);
510 n = opcode - Bvarset;
516 n = READ_UINT_1; /* most common */
519 Lisp_Object symbol = constants_data[n];
520 Lisp_Symbol *symbol_ptr = XSYMBOL(symbol);
521 Lisp_Object old_value = symbol_ptr->value;
522 Lisp_Object new_value = POP;
523 if (!SYMBOL_VALUE_MAGIC_P(old_value)
524 || UNBOUNDP(old_value))
525 symbol_ptr->value = new_value;
527 Fset(symbol, new_value);
537 n = opcode - Bvarbind;
543 n = READ_UINT_1; /* most common */
546 Lisp_Object symbol = constants_data[n];
547 Lisp_Symbol *symbol_ptr = XSYMBOL(symbol);
548 Lisp_Object old_value = symbol_ptr->value;
549 Lisp_Object new_value = POP;
550 if (!SYMBOL_VALUE_MAGIC_P(old_value)
551 || UNBOUNDP(old_value)) {
552 specpdl_ptr->symbol = symbol;
553 specpdl_ptr->old_value = old_value;
554 specpdl_ptr->func = 0;
556 specpdl_depth_counter++;
558 symbol_ptr->value = new_value;
560 #ifdef ERROR_CHECK_CATCH
561 check_specbind_stack_sanity ();
564 specbind_magic(symbol, new_value);
577 n = (opcode < Bcall + 6 ? opcode - Bcall :
578 opcode == Bcall + 6 ? READ_UINT_1 : READ_UINT_2);
580 #ifdef BYTE_CODE_METER
581 if (byte_metering_on && SYMBOLP(TOP)) {
583 Fget(TOP, Qbyte_code_meter, Qnil);
585 Fput(TOP, Qbyte_code_meter,
586 make_int(XINT(val) + 1));
589 TOP = Ffuncall(n + 1, &TOP);
600 UNBIND_TO(specpdl_depth() -
601 (opcode < Bunbind + 6 ? opcode - Bunbind :
603 Bunbind + 6 ? READ_UINT_1 : READ_UINT_2));
624 case Bgotoifnilelsepop:
633 case Bgotoifnonnilelsepop:
660 case BRgotoifnilelsepop:
669 case BRgotoifnonnilelsepop:
680 #ifdef ERROR_CHECK_BYTE_CODE
681 /* Binds and unbinds are supposed to be compiled balanced. */
682 if (specpdl_depth() != speccount)
683 invalid_byte_code_error
684 ("unbalanced specbinding stack");
694 Lisp_Object arg = TOP;
700 PUSH(constants_data[READ_UINT_2]);
704 TOP = CONSP(TOP) ? XCAR(TOP) : Fcar(TOP);
708 TOP = CONSP(TOP) ? XCDR(TOP) : Fcdr(TOP);
712 /* To unbind back to the beginning of this frame. Not
713 used yet, but will be needed for tail-recursion
715 unbind_to(speccount, Qnil);
719 Lisp_Object arg = POP;
720 TOP = Fcar(Fnthcdr(TOP, arg));
725 TOP = SYMBOLP(TOP) ? Qt : Qnil;
729 TOP = CONSP(TOP) ? Qt : Qnil;
733 TOP = STRINGP(TOP) ? Qt : Qnil;
737 TOP = LISTP(TOP) ? Qt : Qnil;
741 TOP = NUMBERP(TOP) ? Qt : Qnil;
745 #if defined HAVE_MPZ && (defined WITH_GMP || defined WITH_MP)
746 TOP = INTEGERP(TOP) ? Qt : Qnil;
748 TOP = INTP(TOP) ? Qt : Qnil;
753 Lisp_Object arg = POP;
754 TOP = EQ_WITH_EBOLA_NOTICE(TOP, arg) ? Qt : Qnil;
759 TOP = NILP(TOP) ? Qt : Qnil;
763 Lisp_Object arg = POP;
764 TOP = Fcons(TOP, arg);
769 TOP = Fcons(TOP, Qnil);
780 n = opcode - (Blist1 - 1);
783 Lisp_Object list = Qnil;
785 list = Fcons(TOP, list);
797 n = opcode - (Bconcat2 - 2);
805 TOP = Fconcat(n, &TOP);
813 Lisp_Object arg2 = POP;
814 Lisp_Object arg1 = POP;
815 TOP = Faset(TOP, arg1, arg2);
820 TOP = Fsymbol_value(TOP);
823 case Bsymbol_function:
824 TOP = Fsymbol_function(TOP);
828 Lisp_Object arg = POP;
829 TOP = Fget(TOP, arg, Qnil);
842 Lisp_Object arg = POP;
843 if (ent_binrel(ASE_BINARY_REL_EQUALP, TOP, arg))
851 Lisp_Object arg = POP;
852 if (ent_binrel(ASE_BINARY_REL_GREATERP, TOP, arg))
860 Lisp_Object arg = POP;
861 if (ent_binrel(ASE_BINARY_REL_LESSP, TOP, arg))
869 Lisp_Object arg = POP;
870 if (ent_binrel2(ASE_BINARY_REL_LESSP,
871 ASE_BINARY_REL_EQUALP, TOP, arg))
879 Lisp_Object arg = POP;
880 if (ent_binrel2(ASE_BINARY_REL_GREATERP,
881 ASE_BINARY_REL_EQUALP, TOP, arg))
889 TOP = ent_unop_neg(TOP);
894 TOP = bytecode_nconc2(&TOP);
898 Lisp_Object arg = POP;
899 TOP = ent_binop(ASE_BINARY_OP_SUM, TOP, arg);
903 Lisp_Object arg = POP;
904 TOP = ent_binop(ASE_BINARY_OP_DIFF, TOP, arg);
908 Lisp_Object arg = POP;
909 TOP = ent_binop(ASE_BINARY_OP_PROD, TOP, arg);
913 Lisp_Object arg = POP;
914 TOP = ent_binop(ASE_BINARY_OP_DIV, TOP, arg);
918 Lisp_Object arg = POP;
919 if (!ent_binrel(ASE_BINARY_REL_GREATERP, TOP, arg))
924 Lisp_Object arg = POP;
925 if (!ent_binrel(ASE_BINARY_REL_LESSP, TOP, arg))
931 PUSH(make_int(BUF_PT(current_buffer)));
935 TOP = Finsert(1, &TOP);
941 TOP = Finsert(n, &TOP);
945 Lisp_Object arg = POP;
946 TOP = Faref(TOP, arg);
951 Lisp_Object arg = POP;
952 TOP = Fmemq(TOP, arg);
957 Lisp_Object arg = POP;
958 TOP = Fset(TOP, arg);
963 Lisp_Object arg = POP;
964 TOP = Fequal(TOP, arg);
969 Lisp_Object arg = POP;
970 TOP = Fnthcdr(TOP, arg);
975 Lisp_Object arg = POP;
976 TOP = Felt(TOP, arg);
981 Lisp_Object arg = POP;
982 TOP = Fmember(TOP, arg);
987 TOP = Fgoto_char(TOP, Qnil);
990 case Bcurrent_buffer: {
992 XSETBUFFER(buffer, current_buffer);
998 TOP = Fset_buffer(TOP);
1002 PUSH(make_int(BUF_ZV(current_buffer)));
1006 PUSH(make_int(BUF_BEGV(current_buffer)));
1009 case Bskip_chars_forward: {
1010 Lisp_Object arg = POP;
1011 TOP = Fskip_chars_forward(TOP, arg, Qnil);
1016 Lisp_Object arg = POP;
1017 TOP = Fassq(TOP, arg);
1022 Lisp_Object arg = POP;
1023 TOP = Fsetcar(TOP, arg);
1028 Lisp_Object arg = POP;
1029 TOP = Fsetcdr(TOP, arg);
1034 TOP = bytecode_nreverse(TOP);
1038 TOP = CONSP(TOP) ? XCAR(TOP) : Qnil;
1042 TOP = CONSP(TOP) ? XCDR(TOP) : Qnil;
1046 Lisp_Object op = TOP;
1047 Lisp_Object orig_fun, fun;
1050 orig_fun = fun = XCAR(op);
1052 if (SYMBOLP (fun) && !EQ(fun, Qunbound) &&
1053 (fun = XSYMBOL(fun)->function, SYMBOLP(fun)))
1054 fun = indirect_function(fun, 1);
1055 if (SUBRP(fun) && XSUBR(fun)->max_args == UNEVALLED) {
1056 Lisp_Object(*subr)(Lisp_Object) =
1057 (Lisp_Object(*)(Lisp_Object))
1058 subr_function(XSUBR(fun));
1061 fprintf(stderr, "Uh-oh!\nSuicide?\n");
1063 fprintf(stderr, "YESSSSSS!\n");
1066 fprintf(stderr, "tomorrow maybe\n");
1074 /* It makes a worthwhile performance difference (5%) to shunt
1075 lesser-used opcodes off to a subroutine, to keep the switch in
1076 execute_optimized_program small. If you REALLY care about
1077 performance, you want to keep your heavily executed code away from
1078 rarely executed code, to minimize cache misses.
1080 Don't make this function static, since then the compiler might inline it.
1082 How about __attribute__((noinline)) then? -hrop */
1084 execute_rare_opcode(Lisp_Object *stack_ptr,
1085 const Opbyte *program_ptr, Opcode opcode)
1087 switch ((unsigned int)opcode) {
1089 case Bsave_excursion:
1090 record_unwind_protect(save_excursion_restore,
1091 save_excursion_save());