Merge pull request #6288 from dearblue/closing
[mruby.git] / src / vm.c
blob3a30f65bdd62e79454e6bcb2cda15bb550dc9bf2
1 /*
2 ** vm.c - virtual machine for mruby
3 **
4 ** See Copyright Notice in mruby.h
5 */
7 #include <mruby.h>
8 #include <mruby/array.h>
9 #include <mruby/class.h>
10 #include <mruby/hash.h>
11 #include <mruby/irep.h>
12 #include <mruby/numeric.h>
13 #include <mruby/proc.h>
14 #include <mruby/range.h>
15 #include <mruby/string.h>
16 #include <mruby/variable.h>
17 #include <mruby/error.h>
18 #include <mruby/opcode.h>
19 #include "value_array.h"
20 #include <mruby/throw.h>
21 #include <mruby/dump.h>
22 #include <mruby/internal.h>
23 #include <mruby/presym.h>
25 #ifdef MRB_NO_STDIO
26 #if defined(__cplusplus)
27 extern "C" {
28 #endif
29 void abort(void);
30 #if defined(__cplusplus)
31 } /* extern "C" */
32 #endif
33 #endif
35 #define STACK_INIT_SIZE 128
36 #define CALLINFO_INIT_SIZE 32
38 /* Define amount of linear stack growth. */
39 #ifndef MRB_STACK_GROWTH
40 #define MRB_STACK_GROWTH 128
41 #endif
43 /* Maximum recursive depth. Should be set lower on memory constrained systems. */
44 #ifdef __clang__
45 #if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
46 #define __SANITIZE_ADDRESS__
47 #endif
48 #endif
50 #ifndef MRB_CALL_LEVEL_MAX
51 #if defined(__SANITIZE_ADDRESS__)
52 #define MRB_CALL_LEVEL_MAX 128
53 #else
54 #define MRB_CALL_LEVEL_MAX 512
55 #endif
56 #endif
58 /* Maximum stack depth. Should be set lower on memory constrained systems.
59 The value below allows about 60000 recursive calls in the simplest case. */
60 #ifndef MRB_STACK_MAX
61 #define MRB_STACK_MAX (0x40000 - MRB_STACK_GROWTH)
62 #endif
64 #ifdef VM_DEBUG
65 # define DEBUG(x) (x)
66 #else
67 # define DEBUG(x)
68 #endif
71 #ifndef MRB_GC_FIXED_ARENA
72 static void
73 mrb_gc_arena_shrink(mrb_state *mrb, int idx)
75 mrb_gc *gc = &mrb->gc;
76 int capa = gc->arena_capa;
78 gc->arena_idx = idx;
79 if (idx < capa / 4) {
80 capa >>= 2;
81 if (capa < MRB_GC_ARENA_SIZE) {
82 capa = MRB_GC_ARENA_SIZE;
84 if (capa != gc->arena_capa) {
85 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa);
86 gc->arena_capa = capa;
90 #else
91 #define mrb_gc_arena_shrink(mrb, idx) mrb_gc_arena_restore(mrb, idx)
92 #endif
94 #define CALL_MAXARGS 15
95 #define CALL_VARARGS (CALL_MAXARGS<<4 | CALL_MAXARGS)
97 static inline void
98 stack_clear(mrb_value *from, size_t count)
100 while (count-- > 0) {
101 SET_NIL_VALUE(*from);
102 from++;
106 static inline void
107 stack_copy(mrb_value *dst, const mrb_value *src, size_t size)
109 if (!src) return;
110 memcpy(dst, src, sizeof(mrb_value)*size);
113 static void
114 stack_init(mrb_state *mrb)
116 struct mrb_context *c = mrb->c;
118 /* mrb_assert(mrb->stack == NULL); */
119 c->stbase = (mrb_value*)mrb_malloc(mrb, STACK_INIT_SIZE * sizeof(mrb_value));
120 c->stend = c->stbase + STACK_INIT_SIZE;
122 /* mrb_assert(ci == NULL); */
123 static const mrb_callinfo ci_zero = { 0 };
124 c->cibase = (mrb_callinfo*)mrb_malloc(mrb, CALLINFO_INIT_SIZE * sizeof(mrb_callinfo));
125 c->ciend = c->cibase + CALLINFO_INIT_SIZE;
126 c->cibase[0] = ci_zero;
127 c->ci = c->cibase;
128 c->ci->u.target_class = mrb->object_class;
129 c->ci->stack = c->stbase;
132 static inline void
133 envadjust(mrb_state *mrb, mrb_value *oldbase, mrb_value *newbase)
135 mrb_callinfo *ci = mrb->c->cibase;
136 ptrdiff_t delta = newbase - oldbase;
138 if (delta == 0) return;
139 while (ci <= mrb->c->ci) {
140 struct REnv *e = mrb_vm_ci_env(ci);
142 if (e) {
143 mrb_assert(e->cxt == mrb->c && MRB_ENV_ONSTACK_P(e));
144 mrb_assert(e->stack == ci->stack);
146 e->stack += delta;
148 ci->stack += delta;
149 ci++;
153 /** def rec; $deep =+ 1; if $deep > 1000; return 0; end; rec; end **/
155 static void
156 stack_extend_alloc(mrb_state *mrb, mrb_int room)
158 mrb_value *oldbase = mrb->c->stbase;
159 mrb_value *newstack;
160 size_t oldsize = mrb->c->stend - mrb->c->stbase;
161 size_t size = oldsize;
162 size_t off = mrb->c->ci->stack ? mrb->c->stend - mrb->c->ci->stack : 0;
164 if (off > size) size = off;
165 #ifdef MRB_STACK_EXTEND_DOUBLING
166 if ((size_t)room <= size)
167 size *= 2;
168 else
169 size += room;
170 #else
171 /* Use linear stack growth.
172 It is slightly slower than doubling the stack space,
173 but it saves memory on small devices. */
174 if (room <= MRB_STACK_GROWTH)
175 size += MRB_STACK_GROWTH;
176 else
177 size += room;
178 #endif
180 newstack = (mrb_value*)mrb_realloc(mrb, mrb->c->stbase, sizeof(mrb_value) * size);
181 stack_clear(&(newstack[oldsize]), size - oldsize);
182 envadjust(mrb, oldbase, newstack);
183 mrb->c->stbase = newstack;
184 mrb->c->stend = mrb->c->stbase + size;
186 /* Raise an exception if the new stack size will be too large,
187 to prevent infinite recursion. However, do this only after resizing the stack, so mrb_raise has stack space to work with. */
188 if (size > MRB_STACK_MAX) {
189 mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err));
193 static inline void
194 stack_extend(mrb_state *mrb, mrb_int room)
196 if (!mrb->c->ci->stack || mrb->c->ci->stack + room >= mrb->c->stend) {
197 stack_extend_alloc(mrb, room);
201 MRB_API void
202 mrb_stack_extend(mrb_state *mrb, mrb_int room)
204 stack_extend(mrb, room);
207 static void
208 stack_extend_adjust(mrb_state *mrb, mrb_int room, const mrb_value **argp)
210 const struct mrb_context *c = mrb->c;
211 ptrdiff_t voff = *argp - c->stbase;
213 if (voff < 0 || voff >= c->stend - c->stbase) {
214 stack_extend(mrb, room);
216 else {
217 stack_extend(mrb, room);
218 *argp = c->stbase + voff;
222 static inline struct REnv*
223 uvenv(mrb_state *mrb, mrb_int up)
225 const struct RProc *proc = mrb->c->ci->proc;
226 struct REnv *e;
228 while (up--) {
229 proc = proc->upper;
230 if (!proc) return NULL;
232 e = MRB_PROC_ENV(proc);
233 if (e) return e; /* proc has enclosed env */
234 return NULL;
237 static inline const struct RProc*
238 top_proc(mrb_state *mrb, const struct RProc *proc)
240 while (proc->upper) {
241 if (MRB_PROC_SCOPE_P(proc) || MRB_PROC_STRICT_P(proc))
242 return proc;
243 proc = proc->upper;
245 return proc;
248 #define CI_PROC_SET(ci, p) do {\
249 ci->proc = p;\
250 mrb_assert(!p || !MRB_PROC_ALIAS_P(p));\
251 ci->pc = (p && !MRB_PROC_CFUNC_P(p) && p->body.irep) ? p->body.irep->iseq : NULL;\
252 } while (0)
254 void
255 mrb_vm_ci_proc_set(mrb_callinfo *ci, const struct RProc *p)
257 CI_PROC_SET(ci, p);
260 #define CI_TARGET_CLASS(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env->c : (ci)->u.target_class)
262 struct RClass*
263 mrb_vm_ci_target_class(const mrb_callinfo *ci)
265 return CI_TARGET_CLASS(ci);
268 void
269 mrb_vm_ci_target_class_set(mrb_callinfo *ci, struct RClass *tc)
271 struct REnv *e = ci->u.env;
272 if (e && e->tt == MRB_TT_ENV) {
273 e->c = tc;
275 else {
276 ci->u.target_class = tc;
280 #define CI_ENV(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env : NULL)
282 struct REnv*
283 mrb_vm_ci_env(const mrb_callinfo *ci)
285 return CI_ENV(ci);
288 static inline void
289 ci_env_set(mrb_callinfo *ci, struct REnv *e)
291 if (ci->u.env) {
292 if (ci->u.env->tt == MRB_TT_ENV) {
293 if (e) {
294 e->c = ci->u.env->c;
295 ci->u.env = e;
297 else {
298 ci->u.target_class = ci->u.env->c;
301 else if (e) {
302 e->c = ci->u.target_class;
303 ci->u.env = e;
306 else {
307 ci->u.env = e;
311 void
312 mrb_vm_ci_env_set(mrb_callinfo *ci, struct REnv *e)
314 ci_env_set(ci, e);
317 MRB_API void
318 mrb_vm_ci_env_clear(mrb_state *mrb, mrb_callinfo *ci)
320 struct REnv *e = ci->u.env;
321 if (e && e->tt == MRB_TT_ENV) {
322 ci->u.target_class = e->c;
323 mrb_env_unshare(mrb, e, FALSE);
327 #define CINFO_NONE 0 // called method from mruby VM (without C functions)
328 #define CINFO_SKIP 1 // ignited mruby VM from C
329 #define CINFO_DIRECT 2 // called method from C
330 #define CINFO_RESUMED 3 // resumed by `Fiber.yield` (probably the main call is `mrb_fiber_resume()`)
332 #define BLK_PTR(b) ((mrb_proc_p(b)) ? mrb_proc_ptr(b) : NULL)
334 static inline mrb_callinfo*
335 cipush(mrb_state *mrb, mrb_int push_stacks, uint8_t cci, struct RClass *target_class,
336 const struct RProc *proc, struct RProc *blk, mrb_sym mid, uint16_t argc)
338 struct mrb_context *c = mrb->c;
339 mrb_callinfo *ci = c->ci;
341 if (ci + 1 == c->ciend) {
342 ptrdiff_t size = ci - c->cibase;
344 if (size > MRB_CALL_LEVEL_MAX) {
345 mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err));
347 c->cibase = (mrb_callinfo*)mrb_realloc(mrb, c->cibase, sizeof(mrb_callinfo)*size*2);
348 c->ci = c->cibase + size;
349 c->ciend = c->cibase + size * 2;
351 ci = ++c->ci;
352 ci->flags = 0;
353 if (blk && (blk->flags & (MRB_PROC_CFUNC_FL | MRB_PROC_ENVSET | MRB_PROC_ORPHAN)) == MRB_PROC_ENVSET &&
354 blk->e.env == ci[-1].u.env) {
355 mrb_assert(blk->color != MRB_GC_RED); // no exist red object with env set
356 ci->flags = MRB_CI_COMPANION_BLOCK;
357 blk->flags |= MRB_PROC_ORPHAN;
359 ci->mid = mid;
360 CI_PROC_SET(ci, proc);
361 ci->stack = ci[-1].stack + push_stacks;
362 ci->n = argc & 0xf;
363 ci->nk = (argc>>4) & 0xf;
364 ci->cci = cci;
365 ci->u.target_class = target_class;
367 return ci;
370 static void
371 fiber_terminate(mrb_state *mrb, struct mrb_context *c, mrb_callinfo *ci)
373 mrb_assert(c != mrb->root_c);
375 struct REnv *env = CI_ENV(ci);
376 mrb_assert(env == NULL || MRB_ENV_LEN(env) <= c->stend - ci->stack);
378 c->status = MRB_FIBER_TERMINATED;
379 mrb_free(mrb, c->cibase);
380 c->cibase = c->ciend = c->ci = NULL;
381 mrb_value *stack = c->stbase;
382 c->stbase = c->stend = NULL;
384 if (!env) {
385 mrb_free(mrb, stack);
387 else {
388 size_t len = (size_t)MRB_ENV_LEN(env);
389 if (len == 0) {
390 env->stack = NULL;
391 MRB_ENV_CLOSE(env);
392 mrb_free(mrb, stack);
394 else {
395 mrb_assert(stack == env->stack);
396 mrb_write_barrier(mrb, (struct RBasic*)env);
398 // don't call MRB_ENV_CLOSE() before mrb_realloc().
399 // the reason is that env->stack may be freed by mrb_realloc() if MRB_DEBUG + MRB_GC_STRESS are enabled.
400 // realloc() on a freed heap will cause double-free.
402 stack = (mrb_value*)mrb_realloc(mrb, stack, len * sizeof(mrb_value));
403 if (mrb_object_dead_p(mrb, (struct RBasic*)env)) {
404 mrb_free(mrb, stack);
406 else {
407 env->stack = stack;
408 MRB_ENV_CLOSE(env);
413 /* fiber termination should automatic yield or transfer to root */
414 mrb->c = c->prev;
415 if (!mrb->c) mrb->c = mrb->root_c;
416 else c->prev = NULL;
417 mrb->c->status = MRB_FIBER_RUNNING;
420 mrb_bool
421 mrb_env_unshare(mrb_state *mrb, struct REnv *e, mrb_bool noraise)
423 if (e == NULL) return TRUE;
424 if (!MRB_ENV_ONSTACK_P(e)) return TRUE;
426 size_t len = (size_t)MRB_ENV_LEN(e);
427 if (len == 0) {
428 e->stack = NULL;
429 MRB_ENV_CLOSE(e);
430 return TRUE;
433 size_t live = mrb->gc.live;
434 mrb_value *p = (mrb_value*)mrb_malloc_simple(mrb, sizeof(mrb_value)*len);
435 if (live != mrb->gc.live && mrb_object_dead_p(mrb, (struct RBasic*)e)) {
436 // The e object is now subject to GC inside mrb_malloc_simple().
437 // Moreover, if NULL is returned due to mrb_malloc_simple() failure, simply ignore it.
438 mrb_free(mrb, p);
439 return TRUE;
441 else if (p) {
442 stack_copy(p, e->stack, len);
443 e->stack = p;
444 MRB_ENV_CLOSE(e);
445 mrb_write_barrier(mrb, (struct RBasic*)e);
446 return TRUE;
448 else {
449 e->stack = NULL;
450 MRB_ENV_CLOSE(e);
451 MRB_ENV_SET_LEN(e, 0);
452 MRB_ENV_SET_BIDX(e, 0);
453 if (!noraise) {
454 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
456 return FALSE;
460 static inline mrb_callinfo*
461 cipop(mrb_state *mrb)
463 struct mrb_context *c = mrb->c;
464 mrb_callinfo *ci = c->ci;
465 struct REnv *env = CI_ENV(ci);
467 ci_env_set(ci, NULL); // make possible to free env by GC if not needed
468 if (env && !mrb_env_unshare(mrb, env, TRUE)) {
469 c->ci--; // exceptions are handled at the method caller; see #3087
470 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
472 c->ci--;
473 return c->ci;
476 MRB_API mrb_value
477 mrb_protect_error(mrb_state *mrb, mrb_protect_error_func *body, void *userdata, mrb_bool *error)
479 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
480 struct mrb_jmpbuf c_jmp;
481 mrb_value result;
482 int ai = mrb_gc_arena_save(mrb);
483 const struct mrb_context *c = mrb->c;
484 ptrdiff_t ci_index = c->ci - c->cibase;
486 if (error) { *error = FALSE; }
488 MRB_TRY(&c_jmp) {
489 mrb->jmp = &c_jmp;
490 result = body(mrb, userdata);
491 mrb->jmp = prev_jmp;
493 MRB_CATCH(&c_jmp) {
494 mrb->jmp = prev_jmp;
495 result = mrb_obj_value(mrb->exc);
496 mrb->exc = NULL;
497 if (error) { *error = TRUE; }
498 if (mrb->c == c) {
499 while (c->ci - c->cibase > ci_index) {
500 cipop(mrb);
503 else {
504 // It was probably switched by mrb_fiber_resume().
505 // Simply destroy all successive CINFO_DIRECTs once the fiber has been switched.
506 c = mrb->c;
507 while (c->ci > c->cibase && c->ci->cci == CINFO_DIRECT) {
508 cipop(mrb);
512 MRB_END_EXC(&c_jmp);
514 mrb_gc_arena_restore(mrb, ai);
515 mrb_gc_protect(mrb, result);
516 return result;
519 void mrb_exc_set(mrb_state *mrb, mrb_value exc);
520 static mrb_value mrb_run(mrb_state *mrb, const struct RProc* proc, mrb_value self);
522 #ifndef MRB_FUNCALL_ARGC_MAX
523 #define MRB_FUNCALL_ARGC_MAX 16
524 #endif
526 MRB_API mrb_value
527 mrb_funcall(mrb_state *mrb, mrb_value self, const char *name, mrb_int argc, ...)
529 mrb_value argv[MRB_FUNCALL_ARGC_MAX];
530 va_list ap;
531 mrb_sym mid = mrb_intern_cstr(mrb, name);
533 if (argc > MRB_FUNCALL_ARGC_MAX) {
534 mrb_raise(mrb, E_ARGUMENT_ERROR, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX) ")");
537 va_start(ap, argc);
538 for (mrb_int i = 0; i < argc; i++) {
539 argv[i] = va_arg(ap, mrb_value);
541 va_end(ap);
542 return mrb_funcall_argv(mrb, self, mid, argc, argv);
545 MRB_API mrb_value
546 mrb_funcall_id(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, ...)
548 mrb_value argv[MRB_FUNCALL_ARGC_MAX];
549 va_list ap;
551 if (argc > MRB_FUNCALL_ARGC_MAX) {
552 mrb_raise(mrb, E_ARGUMENT_ERROR, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX) ")");
555 va_start(ap, argc);
556 for (mrb_int i = 0; i < argc; i++) {
557 argv[i] = va_arg(ap, mrb_value);
559 va_end(ap);
560 return mrb_funcall_argv(mrb, self, mid, argc, argv);
563 static mrb_int
564 mrb_ci_kidx(const mrb_callinfo *ci)
566 if (ci->nk == 0) return -1;
567 return (ci->n == CALL_MAXARGS) ? 2 : ci->n + 1;
570 static inline mrb_int
571 mrb_bidx(uint8_t n, uint8_t k)
573 if (n == 15) n = 1;
574 if (k == 15) n += 1;
575 else n += k*2;
576 return n + 1; /* self + args + kargs */
579 static inline mrb_int
580 ci_bidx(mrb_callinfo *ci)
582 return mrb_bidx(ci->n, ci->nk);
585 mrb_int
586 mrb_ci_bidx(mrb_callinfo *ci)
588 return ci_bidx(ci);
591 mrb_int
592 mrb_ci_nregs(mrb_callinfo *ci)
594 const struct RProc *p;
596 if (!ci) return 4;
597 mrb_int nregs = ci_bidx(ci) + 1; /* self + args + kargs + blk */
598 p = ci->proc;
599 if (p && !MRB_PROC_CFUNC_P(p) && p->body.irep && p->body.irep->nregs > nregs) {
600 return p->body.irep->nregs;
602 return nregs;
605 mrb_value mrb_obj_missing(mrb_state *mrb, mrb_value mod);
607 static mrb_method_t
608 prepare_missing(mrb_state *mrb, mrb_callinfo *ci, mrb_value recv, mrb_sym mid, mrb_value blk, mrb_bool super)
610 mrb_sym missing = MRB_SYM(method_missing);
611 mrb_value *argv = &ci->stack[1];
612 mrb_value args;
613 mrb_method_t m;
615 /* pack positional arguments */
616 if (ci->n == 15) args = argv[0];
617 else args = mrb_ary_new_from_values(mrb, ci->n, argv);
619 if (mrb_func_basic_p(mrb, recv, missing, mrb_obj_missing)) {
620 method_missing:
621 if (super) mrb_no_method_error(mrb, mid, args, "no superclass method '%n'", mid);
622 else mrb_method_missing(mrb, mid, recv, args);
623 /* not reached */
625 if (mid != missing) {
626 ci->u.target_class = mrb_class(mrb, recv);
628 m = mrb_vm_find_method(mrb, ci->u.target_class, &ci->u.target_class, missing);
629 if (MRB_METHOD_UNDEF_P(m)) goto method_missing; /* just in case */
630 stack_extend(mrb, 4);
632 argv = &ci->stack[1]; /* maybe reallocated */
633 argv[0] = args;
634 if (ci->nk == 0) {
635 argv[1] = blk;
637 else {
638 mrb_assert(ci->nk == 15);
639 argv[1] = argv[ci->n];
640 argv[2] = blk;
642 ci->n = CALL_MAXARGS;
643 /* ci->nk is already set to zero or CALL_MAXARGS */
644 mrb_ary_unshift(mrb, args, mrb_symbol_value(mid));
645 ci->mid = missing;
646 return m;
649 static void
650 funcall_args_capture(mrb_state *mrb, int stoff, mrb_int argc, const mrb_value *argv, mrb_value block, mrb_callinfo *ci)
652 if (argc < 0 || argc > INT32_MAX) {
653 mrb_raisef(mrb, E_ARGUMENT_ERROR, "negative or too big argc for funcall (%i)", argc);
656 ci->nk = 0; /* funcall does not support keyword arguments */
657 if (argc < CALL_MAXARGS) {
658 mrb_int extends = stoff + argc + 2 /* self + block */;
659 stack_extend_adjust(mrb, extends, &argv);
661 mrb_value *args = mrb->c->ci->stack + stoff + 1 /* self */;
662 stack_copy(args, argv, argc);
663 args[argc] = block;
664 ci->n = (uint8_t)argc;
666 else {
667 int extends = stoff + 3 /* self + splat + block */;
668 stack_extend_adjust(mrb, extends, &argv);
670 mrb_value *args = mrb->c->ci->stack + stoff + 1 /* self */;
671 args[0] = mrb_ary_new_from_values(mrb, argc, argv);
672 args[1] = block;
673 ci->n = CALL_MAXARGS;
677 static inline mrb_value
678 ensure_block(mrb_state *mrb, mrb_value blk)
680 if (!mrb_nil_p(blk) && !mrb_proc_p(blk)) {
681 blk = mrb_type_convert(mrb, blk, MRB_TT_PROC, MRB_SYM(to_proc));
682 /* The stack might have been reallocated during mrb_type_convert(), see #3622 */
684 return blk;
687 MRB_API mrb_value
688 mrb_funcall_with_block(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, const mrb_value *argv, mrb_value blk)
690 mrb_value val;
691 int ai = mrb_gc_arena_save(mrb);
693 if (!mrb->jmp) {
694 struct mrb_jmpbuf c_jmp;
695 ptrdiff_t nth_ci = mrb->c->ci - mrb->c->cibase;
697 MRB_TRY(&c_jmp) {
698 mrb->jmp = &c_jmp;
699 /* recursive call */
700 val = mrb_funcall_with_block(mrb, self, mid, argc, argv, blk);
701 mrb->jmp = NULL;
703 MRB_CATCH(&c_jmp) { /* error */
704 while (nth_ci < (mrb->c->ci - mrb->c->cibase)) {
705 cipop(mrb);
707 mrb->jmp = 0;
708 val = mrb_obj_value(mrb->exc);
710 MRB_END_EXC(&c_jmp);
711 mrb->jmp = NULL;
713 else {
714 mrb_method_t m;
715 mrb_callinfo *ci = mrb->c->ci;
716 mrb_int n = mrb_ci_nregs(ci);
718 if (!mrb->c->stbase) {
719 stack_init(mrb);
721 if (ci - mrb->c->cibase > MRB_CALL_LEVEL_MAX) {
722 mrb_exc_raise(mrb, mrb_obj_value(mrb->stack_err));
724 blk = ensure_block(mrb, blk);
725 ci = cipush(mrb, n, CINFO_DIRECT, NULL, NULL, BLK_PTR(blk), 0, 0);
726 funcall_args_capture(mrb, 0, argc, argv, blk, ci);
727 ci->u.target_class = mrb_class(mrb, self);
728 m = mrb_vm_find_method(mrb, ci->u.target_class, &ci->u.target_class, mid);
729 if (MRB_METHOD_UNDEF_P(m)) {
730 m = prepare_missing(mrb, ci, self, mid, mrb_nil_value(), FALSE);
732 else {
733 ci->mid = mid;
735 ci->proc = MRB_METHOD_PROC_P(m) ? MRB_METHOD_PROC(m) : NULL;
737 if (MRB_METHOD_CFUNC_P(m)) {
738 ci->stack[0] = self;
739 val = MRB_METHOD_CFUNC(m)(mrb, self);
740 cipop(mrb);
742 else {
743 /* handle alias */
744 if (MRB_PROC_ALIAS_P(ci->proc)) {
745 ci->mid = ci->proc->body.mid;
746 ci->proc = ci->proc->upper;
748 ci->cci = CINFO_SKIP;
749 val = mrb_run(mrb, ci->proc, self);
752 mrb_gc_arena_restore(mrb, ai);
753 mrb_gc_protect(mrb, val);
754 return val;
757 MRB_API mrb_value
758 mrb_funcall_argv(mrb_state *mrb, mrb_value self, mrb_sym mid, mrb_int argc, const mrb_value *argv)
760 return mrb_funcall_with_block(mrb, self, mid, argc, argv, mrb_nil_value());
763 static void
764 check_method_noarg(mrb_state *mrb, const mrb_callinfo *ci)
766 mrb_int argc = ci->n == CALL_MAXARGS ? RARRAY_LEN(ci->stack[1]) : ci->n;
767 if (ci->nk > 0) {
768 mrb_value kdict = ci->stack[mrb_ci_kidx(ci)];
769 if (!(mrb_hash_p(kdict) && mrb_hash_empty_p(mrb, kdict))) {
770 argc++;
773 if (argc > 0) {
774 mrb_argnum_error(mrb, argc, 0, 0);
778 static mrb_value
779 exec_irep(mrb_state *mrb, mrb_value self, const struct RProc *p)
781 mrb_callinfo *ci = mrb->c->ci;
782 mrb_int keep, nregs;
784 ci->stack[0] = self;
785 /* handle alias */
786 if (MRB_PROC_ALIAS_P(p)) {
787 ci->mid = p->body.mid;
788 p = p->upper;
790 CI_PROC_SET(ci, p);
791 if (MRB_PROC_CFUNC_P(p)) {
792 if (MRB_PROC_NOARG_P(p) && (ci->n > 0 || ci->nk > 0)) {
793 check_method_noarg(mrb, ci);
795 return MRB_PROC_CFUNC(p)(mrb, self);
797 nregs = p->body.irep->nregs;
798 keep = ci_bidx(ci)+1;
799 if (nregs < keep) {
800 stack_extend(mrb, keep);
802 else {
803 stack_extend(mrb, nregs);
804 stack_clear(ci->stack+keep, nregs-keep);
807 cipush(mrb, 0, 0, NULL, NULL, NULL, 0, 0);
809 return self;
812 mrb_value
813 mrb_exec_irep(mrb_state *mrb, mrb_value self, struct RProc *p)
815 mrb_callinfo *ci = mrb->c->ci;
816 if (ci->cci == CINFO_NONE) {
817 return exec_irep(mrb, self, p);
819 else {
820 mrb_value ret;
821 if (MRB_PROC_CFUNC_P(p)) {
822 if (MRB_PROC_NOARG_P(p) && (ci->n > 0 || ci->nk > 0)) {
823 check_method_noarg(mrb, ci);
825 cipush(mrb, 0, CINFO_DIRECT, CI_TARGET_CLASS(ci), p, NULL, ci->mid, ci->n|(ci->nk<<4));
826 ret = MRB_PROC_CFUNC(p)(mrb, self);
827 cipop(mrb);
829 else {
830 mrb_int keep = ci_bidx(ci) + 1; /* receiver + block */
831 ret = mrb_top_run(mrb, p, self, keep);
833 if (mrb->exc && mrb->jmp) {
834 mrb_exc_raise(mrb, mrb_obj_value(mrb->exc));
836 return ret;
840 /* 15.3.1.3.4 */
841 /* 15.3.1.3.44 */
843 * call-seq:
844 * obj.send(symbol [, args...]) -> obj
845 * obj.__send__(symbol [, args...]) -> obj
847 * Invokes the method identified by _symbol_, passing it any
848 * arguments specified. You can use <code>__send__</code> if the name
849 * +send+ clashes with an existing method in _obj_.
851 * class Klass
852 * def hello(*args)
853 * "Hello " + args.join(' ')
854 * end
855 * end
856 * k = Klass.new
857 * k.send :hello, "gentle", "readers" #=> "Hello gentle readers"
859 mrb_value
860 mrb_f_send(mrb_state *mrb, mrb_value self)
862 mrb_sym name;
863 mrb_value block, *regs;
864 mrb_method_t m;
865 struct RClass *c;
866 mrb_callinfo *ci = mrb->c->ci;
867 int n = ci->n;
869 if (ci->cci > CINFO_NONE) {
870 funcall:;
871 const mrb_value *argv;
872 mrb_int argc;
873 mrb_get_args(mrb, "n*&", &name, &argv, &argc, &block);
874 return mrb_funcall_with_block(mrb, self, name, argc, argv, block);
877 regs = mrb->c->ci->stack+1;
879 if (n == 0) {
880 argnum_error:
881 mrb_argnum_error(mrb, 0, 1, -1);
883 else if (n == 15) {
884 if (RARRAY_LEN(regs[0]) == 0) goto argnum_error;
885 name = mrb_obj_to_sym(mrb, RARRAY_PTR(regs[0])[0]);
887 else {
888 name = mrb_obj_to_sym(mrb, regs[0]);
891 c = mrb_class(mrb, self);
892 m = mrb_vm_find_method(mrb, c, &c, name);
893 if (MRB_METHOD_UNDEF_P(m)) { /* call method_missing */
894 goto funcall;
897 ci->mid = name;
898 ci->u.target_class = c;
899 /* remove first symbol from arguments */
900 if (n == 15) { /* variable length arguments */
901 regs[0] = mrb_ary_subseq(mrb, regs[0], 1, RARRAY_LEN(regs[0]) - 1);
903 else { /* n > 0 */
904 for (int i=0; i<n; i++) {
905 regs[i] = regs[i+1];
907 regs[n] = regs[n+1]; /* copy kdict or block */
908 if (ci->nk > 0) {
909 regs[n+1] = regs[n+2]; /* copy block */
911 ci->n--;
914 const struct RProc *p;
915 if (MRB_METHOD_PROC_P(m)) {
916 p = MRB_METHOD_PROC(m);
917 /* handle alias */
918 if (MRB_PROC_ALIAS_P(p)) {
919 ci->mid = p->body.mid;
920 p = p->upper;
922 CI_PROC_SET(ci, p);
924 if (MRB_METHOD_CFUNC_P(m)) {
925 if (MRB_METHOD_NOARG_P(m) && (ci->n > 0 || ci->nk > 0)) {
926 check_method_noarg(mrb, ci);
928 return MRB_METHOD_CFUNC(m)(mrb, self);
930 return exec_irep(mrb, self, p);
933 static void
934 check_block(mrb_state *mrb, mrb_value blk)
936 if (mrb_nil_p(blk)) {
937 mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
939 if (!mrb_proc_p(blk)) {
940 mrb_raise(mrb, E_TYPE_ERROR, "not a block");
944 static mrb_value
945 eval_under(mrb_state *mrb, mrb_value self, mrb_value blk, struct RClass *c)
947 struct RProc *p;
948 mrb_callinfo *ci;
949 int nregs;
951 check_block(mrb, blk);
952 ci = mrb->c->ci;
953 if (ci->cci == CINFO_DIRECT) {
954 return mrb_yield_with_class(mrb, blk, 1, &self, self, c);
956 ci->u.target_class = c;
957 p = mrb_proc_ptr(blk);
958 /* just in case irep is NULL; #6065 */
959 if (p->body.irep == NULL) return mrb_nil_value();
960 CI_PROC_SET(ci, p);
961 ci->n = 1;
962 ci->nk = 0;
963 ci->mid = ci[-1].mid;
964 if (MRB_PROC_CFUNC_P(p)) {
965 stack_extend(mrb, 4);
966 mrb->c->ci->stack[0] = self;
967 mrb->c->ci->stack[1] = self;
968 mrb->c->ci->stack[2] = mrb_nil_value();
969 return MRB_PROC_CFUNC(p)(mrb, self);
971 nregs = p->body.irep->nregs;
972 if (nregs < 4) nregs = 4;
973 stack_extend(mrb, nregs);
974 mrb->c->ci->stack[0] = self;
975 mrb->c->ci->stack[1] = self;
976 stack_clear(mrb->c->ci->stack+2, nregs-2);
977 cipush(mrb, 0, 0, NULL, NULL, NULL, 0, 0);
979 return self;
982 /* 15.2.2.4.35 */
984 * call-seq:
985 * mod.class_eval {| | block } -> obj
986 * mod.module_eval {| | block } -> obj
988 * Evaluates block in the context of _mod_. This can
989 * be used to add methods to a class. <code>module_eval</code> returns
990 * the result of evaluating its argument.
992 mrb_value
993 mrb_mod_module_eval(mrb_state *mrb, mrb_value mod)
995 mrb_value a, b;
997 if (mrb_get_args(mrb, "|S&", &a, &b) == 1) {
998 mrb_raise(mrb, E_NOTIMP_ERROR, "module_eval/class_eval with string not implemented");
1000 return eval_under(mrb, mod, b, mrb_class_ptr(mod));
1003 /* 15.3.1.3.18 */
1005 * call-seq:
1006 * obj.instance_eval {| | block } -> obj
1008 * Evaluates the given block,within the context of the receiver (_obj_).
1009 * In order to set the context, the variable +self+ is set to _obj_ while
1010 * the code is executing, giving the code access to _obj_'s
1011 * instance variables. In the version of <code>instance_eval</code>
1012 * that takes a +String+, the optional second and third
1013 * parameters supply a filename and starting line number that are used
1014 * when reporting compilation errors.
1016 * class KlassWithSecret
1017 * def initialize
1018 * @secret = 99
1019 * end
1020 * end
1021 * k = KlassWithSecret.new
1022 * k.instance_eval { @secret } #=> 99
1024 mrb_value
1025 mrb_obj_instance_eval(mrb_state *mrb, mrb_value self)
1027 mrb_value a, b;
1029 if (mrb_get_args(mrb, "|S&", &a, &b) == 1) {
1030 mrb_raise(mrb, E_NOTIMP_ERROR, "instance_eval with string not implemented");
1032 return eval_under(mrb, self, b, mrb_singleton_class_ptr(mrb, self));
1035 MRB_API mrb_value
1036 mrb_yield_with_class(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv, mrb_value self, struct RClass *c)
1038 struct RProc *p;
1039 mrb_sym mid;
1040 mrb_callinfo *ci;
1041 mrb_value val;
1042 mrb_int n;
1044 check_block(mrb, b);
1045 ci = mrb->c->ci;
1046 n = mrb_ci_nregs(ci);
1047 p = mrb_proc_ptr(b);
1048 if (MRB_PROC_ENV_P(p)) {
1049 mid = p->e.env->mid;
1051 else {
1052 mid = ci->mid;
1054 ci = cipush(mrb, n, CINFO_DIRECT, NULL, NULL, NULL, mid, 0);
1055 funcall_args_capture(mrb, 0, argc, argv, mrb_nil_value(), ci);
1056 ci->u.target_class = c;
1057 ci->proc = p;
1059 if (MRB_PROC_CFUNC_P(p)) {
1060 ci->stack[0] = self;
1061 val = MRB_PROC_CFUNC(p)(mrb, self);
1062 cipop(mrb);
1064 else {
1065 ci->cci = CINFO_SKIP;
1066 val = mrb_run(mrb, p, self);
1068 return val;
1071 MRB_API mrb_value
1072 mrb_yield_argv(mrb_state *mrb, mrb_value b, mrb_int argc, const mrb_value *argv)
1074 struct RProc *p = mrb_proc_ptr(b);
1075 struct RClass *tc;
1076 mrb_value self = mrb_proc_get_self(mrb, p, &tc);
1078 return mrb_yield_with_class(mrb, b, argc, argv, self, tc);
1081 MRB_API mrb_value
1082 mrb_yield(mrb_state *mrb, mrb_value b, mrb_value arg)
1084 struct RProc *p = mrb_proc_ptr(b);
1085 struct RClass *tc;
1086 mrb_value self = mrb_proc_get_self(mrb, p, &tc);
1088 return mrb_yield_with_class(mrb, b, 1, &arg, self, tc);
1091 mrb_value
1092 mrb_yield_cont(mrb_state *mrb, mrb_value b, mrb_value self, mrb_int argc, const mrb_value *argv)
1094 struct RProc *p;
1095 mrb_callinfo *ci;
1097 check_block(mrb, b);
1098 p = mrb_proc_ptr(b);
1099 ci = mrb->c->ci;
1101 stack_extend_adjust(mrb, 4, &argv);
1102 mrb->c->ci->stack[1] = mrb_ary_new_from_values(mrb, argc, argv);
1103 mrb->c->ci->stack[2] = mrb_nil_value();
1104 mrb->c->ci->stack[3] = mrb_nil_value();
1105 ci->n = 15;
1106 ci->nk = 0;
1107 return exec_irep(mrb, self, p);
1110 #define RBREAK_TAG_FOREACH(f) \
1111 f(RBREAK_TAG_BREAK, 0) \
1112 f(RBREAK_TAG_JUMP, 1) \
1113 f(RBREAK_TAG_STOP, 2)
1115 #define RBREAK_TAG_DEFINE(tag, i) tag = i,
1116 enum {
1117 RBREAK_TAG_FOREACH(RBREAK_TAG_DEFINE)
1119 #undef RBREAK_TAG_DEFINE
1121 #define RBREAK_TAG_BIT 3
1122 #define RBREAK_TAG_BIT_OFF 8
1123 #define RBREAK_TAG_MASK (~(~UINT32_C(0) << RBREAK_TAG_BIT))
1125 static inline uint32_t
1126 mrb_break_tag_get(struct RBreak *brk)
1128 return (brk->flags >> RBREAK_TAG_BIT_OFF) & RBREAK_TAG_MASK;
1131 static inline void
1132 mrb_break_tag_set(struct RBreak *brk, uint32_t tag)
1134 brk->flags &= ~(RBREAK_TAG_MASK << RBREAK_TAG_BIT_OFF);
1135 brk->flags |= (tag & RBREAK_TAG_MASK) << RBREAK_TAG_BIT_OFF;
1138 static struct RBreak*
1139 break_new(mrb_state *mrb, uint32_t tag, const mrb_callinfo *return_ci, mrb_value val)
1141 mrb_assert((size_t)(return_ci - mrb->c->cibase) <= (size_t)(mrb->c->ci - mrb->c->cibase));
1143 struct RBreak *brk = MRB_OBJ_ALLOC(mrb, MRB_TT_BREAK, NULL);
1144 brk->ci_break_index = return_ci - mrb->c->cibase;
1145 mrb_break_value_set(brk, val);
1146 mrb_break_tag_set(brk, tag);
1148 return brk;
1151 #define MRB_CATCH_FILTER_RESCUE (UINT32_C(1) << MRB_CATCH_RESCUE)
1152 #define MRB_CATCH_FILTER_ENSURE (UINT32_C(1) << MRB_CATCH_ENSURE)
1153 #define MRB_CATCH_FILTER_ALL (MRB_CATCH_FILTER_RESCUE | MRB_CATCH_FILTER_ENSURE)
1155 static const struct mrb_irep_catch_handler *
1156 catch_handler_find(const mrb_irep *irep, const mrb_code *pc, uint32_t filter)
1158 ptrdiff_t xpc;
1159 size_t cnt;
1160 const struct mrb_irep_catch_handler *e;
1162 /* The comparison operators use `>` and `<=` because pc already points to the next instruction */
1163 #define catch_cover_p(pc, beg, end) ((pc) > (ptrdiff_t)(beg) && (pc) <= (ptrdiff_t)(end))
1165 mrb_assert(irep && irep->clen > 0);
1166 xpc = pc - irep->iseq;
1167 /* If it retry at the top level, pc will be 0, so check with -1 as the start position */
1168 mrb_assert(catch_cover_p(xpc, -1, irep->ilen));
1169 if (!catch_cover_p(xpc, -1, irep->ilen)) return NULL;
1171 /* Currently uses a simple linear search to avoid processing complexity. */
1172 cnt = irep->clen;
1173 e = mrb_irep_catch_handler_table(irep) + cnt - 1;
1174 for (; cnt > 0; cnt--, e--) {
1175 if (((UINT32_C(1) << e->type) & filter) &&
1176 catch_cover_p(xpc, mrb_irep_catch_handler_unpack(e->begin), mrb_irep_catch_handler_unpack(e->end))) {
1177 return e;
1181 #undef catch_cover_p
1183 return NULL;
1186 typedef enum {
1187 LOCALJUMP_ERROR_RETURN = 0,
1188 LOCALJUMP_ERROR_BREAK = 1,
1189 LOCALJUMP_ERROR_YIELD = 2
1190 } localjump_error_kind;
1192 static void
1193 localjump_error(mrb_state *mrb, localjump_error_kind kind)
1195 char kind_str[3][7] = { "return", "break", "yield" };
1196 char kind_str_len[] = { 6, 5, 5 };
1197 static const char lead[] = "unexpected ";
1198 mrb_value msg;
1199 mrb_value exc;
1201 msg = mrb_str_new_capa(mrb, sizeof(lead) + 7);
1202 mrb_str_cat(mrb, msg, lead, sizeof(lead) - 1);
1203 mrb_str_cat(mrb, msg, kind_str[kind], kind_str_len[kind]);
1204 exc = mrb_exc_new_str(mrb, E_LOCALJUMP_ERROR, msg);
1205 mrb_exc_set(mrb, exc);
1208 #define RAISE_EXC(mrb, exc) do { \
1209 mrb_value exc_value = (exc); \
1210 mrb_exc_set(mrb, exc_value); \
1211 goto L_RAISE; \
1212 } while (0)
1214 #define RAISE_LIT(mrb, c, str) RAISE_EXC(mrb, mrb_exc_new_lit(mrb, c, str))
1215 #define RAISE_FORMAT(mrb, c, fmt, ...) RAISE_EXC(mrb, mrb_exc_new_str(mrb, c, mrb_format(mrb, fmt, __VA_ARGS__)))
1217 static void
1218 argnum_error(mrb_state *mrb, mrb_int num)
1220 mrb_value exc;
1221 mrb_value str;
1222 mrb_int argc = mrb->c->ci->n;
1224 if (argc == 15) {
1225 mrb_value args = mrb->c->ci->stack[1];
1226 if (mrb_array_p(args)) {
1227 argc = RARRAY_LEN(args);
1230 if (argc == 0 && mrb->c->ci->nk != 0 && !mrb_hash_empty_p(mrb, mrb->c->ci->stack[1])) {
1231 argc++;
1233 str = mrb_format(mrb, "wrong number of arguments (given %i, expected %i)", argc, num);
1234 exc = mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str);
1235 mrb_exc_set(mrb, exc);
1238 static mrb_bool
1239 break_tag_p(struct RBreak *brk, uint32_t tag)
1241 return (brk != NULL && brk->tt == MRB_TT_BREAK) ? TRUE : FALSE;
1244 static void
1245 prepare_tagged_break(mrb_state *mrb, uint32_t tag, const mrb_callinfo *return_ci, mrb_value val)
1247 if (break_tag_p((struct RBreak*)mrb->exc, tag)) {
1248 mrb_break_tag_set((struct RBreak*)mrb->exc, tag);
1250 else {
1251 mrb->exc = (struct RObject*)break_new(mrb, tag, return_ci, val);
1255 #define THROW_TAGGED_BREAK(mrb, tag, return_ci, val) \
1256 do { \
1257 prepare_tagged_break(mrb, tag, return_ci, val); \
1258 goto L_CATCH_TAGGED_BREAK; \
1259 } while (0)
1261 #define UNWIND_ENSURE(mrb, ci, pc, tag, return_ci, val) \
1262 do { \
1263 if ((proc = (ci)->proc) && !MRB_PROC_CFUNC_P(proc) && (irep = proc->body.irep) && irep->clen > 0 && \
1264 (ch = catch_handler_find(irep, pc, MRB_CATCH_FILTER_ENSURE))) { \
1265 THROW_TAGGED_BREAK(mrb, tag, return_ci, val); \
1267 } while (0)
1270 * CHECKPOINT_RESTORE(tag) {
1271 * This part is executed when jumping by the same "tag" of RBreak (it is not executed the first time).
1272 * Write the code required (initialization of variables, etc.) for the subsequent processing.
1274 * CHECKPOINT_MAIN(tag) {
1275 * This part is always executed.
1277 * CHECKPOINT_END(tag);
1279 * ...
1281 * // Jump to CHECKPOINT_RESTORE with the same "tag".
1282 * goto CHECKPOINT_LABEL_MAKE(tag);
1285 #define CHECKPOINT_LABEL_MAKE(tag) L_CHECKPOINT_ ## tag
1287 #define CHECKPOINT_RESTORE(tag) \
1288 do { \
1289 if (FALSE) { \
1290 CHECKPOINT_LABEL_MAKE(tag): \
1291 do {
1293 #define CHECKPOINT_MAIN(tag) \
1294 } while (0); \
1296 do {
1298 #define CHECKPOINT_END(tag) \
1299 } while (0); \
1300 } while (0)
1302 #ifdef MRB_USE_DEBUG_HOOK
1303 #define CODE_FETCH_HOOK(mrb, irep, pc, regs) if ((mrb)->code_fetch_hook) (mrb)->code_fetch_hook((mrb), (irep), (pc), (regs));
1304 #else
1305 #define CODE_FETCH_HOOK(mrb, irep, pc, regs)
1306 #endif
1308 #ifdef MRB_BYTECODE_DECODE_OPTION
1309 #define BYTECODE_DECODER(x) ((mrb)->bytecode_decoder)?(mrb)->bytecode_decoder((mrb), (x)):(x)
1310 #else
1311 #define BYTECODE_DECODER(x) (x)
1312 #endif
1314 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1315 #if !defined __GNUC__ && !defined __clang__ && !defined __INTEL_COMPILER
1316 #define MRB_USE_VM_SWITCH_DISPATCH
1317 #endif
1318 #endif /* ifndef MRB_USE_VM_SWITCH_DISPATCH */
1320 #ifdef MRB_USE_VM_SWITCH_DISPATCH
1322 #define INIT_DISPATCH for (;;) { insn = BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); switch (insn) {
1323 #define CASE(insn,ops) case insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1324 #define NEXT goto L_END_DISPATCH
1325 #define JUMP NEXT
1326 #define END_DISPATCH L_END_DISPATCH:;}}
1328 #else
1330 #define INIT_DISPATCH JUMP; return mrb_nil_value();
1331 #define CASE(insn,ops) L_ ## insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1332 #define NEXT insn=BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); goto *optable[insn]
1333 #define JUMP NEXT
1335 #define END_DISPATCH
1337 #endif
1339 MRB_API mrb_value
1340 mrb_vm_run(mrb_state *mrb, const struct RProc *proc, mrb_value self, mrb_int stack_keep)
1342 const mrb_irep *irep = proc->body.irep;
1343 mrb_value result;
1344 struct mrb_context *c = mrb->c;
1345 #ifdef MRB_DEBUG
1346 ptrdiff_t cioff = c->ci - c->cibase;
1347 #endif
1348 mrb_int nregs = irep->nregs;
1350 if (!c->stbase) {
1351 stack_init(mrb);
1353 if (stack_keep > nregs)
1354 nregs = stack_keep;
1355 else {
1356 struct REnv *e = CI_ENV(mrb->c->ci);
1357 if (e && (stack_keep == 0 || irep->nlocals < MRB_ENV_LEN(e))) {
1358 ci_env_set(mrb->c->ci, NULL);
1359 mrb_env_unshare(mrb, e, FALSE);
1362 stack_extend(mrb, nregs);
1363 stack_clear(c->ci->stack + stack_keep, nregs - stack_keep);
1364 c->ci->stack[0] = self;
1365 result = mrb_vm_exec(mrb, proc, irep->iseq);
1366 mrb_assert(mrb->c == c); /* do not switch fibers via mrb_vm_run(), unlike mrb_vm_exec() */
1367 mrb_assert(c->ci == c->cibase || (c->ci - c->cibase) == cioff - 1);
1368 return result;
1371 static struct RClass*
1372 check_target_class(mrb_state *mrb)
1374 struct RClass *target = CI_TARGET_CLASS(mrb->c->ci);
1375 if (!target) {
1376 mrb_raise(mrb, E_TYPE_ERROR, "no class/module to add method");
1378 return target;
1381 #define regs (mrb->c->ci->stack)
1383 static mrb_value
1384 hash_new_from_regs(mrb_state *mrb, mrb_int argc, mrb_int idx)
1386 mrb_value hash = mrb_hash_new_capa(mrb, argc);
1387 while (argc--) {
1388 mrb_hash_set(mrb, hash, regs[idx+0], regs[idx+1]);
1389 idx += 2;
1391 return hash;
1394 #define ary_new_from_regs(mrb, argc, idx) mrb_ary_new_from_values(mrb, (argc), &regs[idx]);
1396 MRB_API mrb_value
1397 mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc)
1399 /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
1400 const mrb_irep *irep = proc->body.irep;
1401 const mrb_pool_value *pool = irep->pool;
1402 const mrb_sym *syms = irep->syms;
1403 mrb_code insn;
1404 int ai = mrb_gc_arena_save(mrb);
1405 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1406 struct mrb_jmpbuf c_jmp;
1407 uint32_t a;
1408 uint16_t b;
1409 uint16_t c;
1410 mrb_sym mid;
1411 const struct mrb_irep_catch_handler *ch;
1413 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1414 static const void * const optable[] = {
1415 #define OPCODE(x,_) &&L_OP_ ## x,
1416 #include <mruby/ops.h>
1417 #undef OPCODE
1419 #endif
1421 RETRY_TRY_BLOCK:
1423 MRB_TRY(&c_jmp) {
1425 if (mrb->exc) {
1426 mrb_gc_arena_restore(mrb, ai);
1427 if (mrb->exc->tt == MRB_TT_BREAK)
1428 goto L_BREAK;
1429 goto L_RAISE;
1431 mrb->jmp = &c_jmp;
1432 CI_PROC_SET(mrb->c->ci, proc);
1434 INIT_DISPATCH {
1435 CASE(OP_NOP, Z) {
1436 /* do nothing */
1437 NEXT;
1440 CASE(OP_MOVE, BB) {
1441 regs[a] = regs[b];
1442 NEXT;
1445 CASE(OP_LOADL, BB) {
1446 switch (pool[b].tt) { /* number */
1447 case IREP_TT_INT32:
1448 regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32);
1449 break;
1450 case IREP_TT_INT64:
1451 #if defined(MRB_INT64)
1452 regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
1453 break;
1454 #else
1455 #if defined(MRB_64BIT)
1456 if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) {
1457 regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64);
1458 break;
1460 #endif
1461 goto L_INT_OVERFLOW;
1462 #endif
1463 case IREP_TT_BIGINT:
1464 #ifdef MRB_USE_BIGINT
1466 const char *s = pool[b].u.str;
1467 regs[a] = mrb_bint_new_str(mrb, s+2, (uint8_t)s[0], s[1]);
1469 break;
1470 #else
1471 goto L_INT_OVERFLOW;
1472 #endif
1473 #ifndef MRB_NO_FLOAT
1474 case IREP_TT_FLOAT:
1475 regs[a] = mrb_float_value(mrb, pool[b].u.f);
1476 break;
1477 #endif
1478 default:
1479 /* should not happen (tt:string) */
1480 regs[a] = mrb_nil_value();
1481 break;
1483 NEXT;
1486 CASE(OP_LOADI, BB) {
1487 SET_FIXNUM_VALUE(regs[a], b);
1488 NEXT;
1491 CASE(OP_LOADINEG, BB) {
1492 SET_FIXNUM_VALUE(regs[a], -b);
1493 NEXT;
1496 CASE(OP_LOADI__1,B) goto L_LOADI;
1497 CASE(OP_LOADI_0,B) goto L_LOADI;
1498 CASE(OP_LOADI_1,B) goto L_LOADI;
1499 CASE(OP_LOADI_2,B) goto L_LOADI;
1500 CASE(OP_LOADI_3,B) goto L_LOADI;
1501 CASE(OP_LOADI_4,B) goto L_LOADI;
1502 CASE(OP_LOADI_5,B) goto L_LOADI;
1503 CASE(OP_LOADI_6,B) goto L_LOADI;
1504 CASE(OP_LOADI_7, B) {
1505 L_LOADI:
1506 SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0);
1507 NEXT;
1510 CASE(OP_LOADI16, BS) {
1511 SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b);
1512 NEXT;
1515 CASE(OP_LOADI32, BSS) {
1516 SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c));
1517 NEXT;
1520 CASE(OP_LOADSYM, BB) {
1521 SET_SYM_VALUE(regs[a], syms[b]);
1522 NEXT;
1525 CASE(OP_LOADNIL, B) {
1526 SET_NIL_VALUE(regs[a]);
1527 NEXT;
1530 CASE(OP_LOADSELF, B) {
1531 regs[a] = regs[0];
1532 NEXT;
1535 CASE(OP_LOADT, B) {
1536 SET_TRUE_VALUE(regs[a]);
1537 NEXT;
1540 CASE(OP_LOADF, B) {
1541 SET_FALSE_VALUE(regs[a]);
1542 NEXT;
1545 CASE(OP_GETGV, BB) {
1546 mrb_value val = mrb_gv_get(mrb, syms[b]);
1547 regs[a] = val;
1548 NEXT;
1551 CASE(OP_SETGV, BB) {
1552 mrb_gv_set(mrb, syms[b], regs[a]);
1553 NEXT;
1556 CASE(OP_GETSV, BB) {
1557 mrb_value val = mrb_vm_special_get(mrb, syms[b]);
1558 regs[a] = val;
1559 NEXT;
1562 CASE(OP_SETSV, BB) {
1563 mrb_vm_special_set(mrb, syms[b], regs[a]);
1564 NEXT;
1567 CASE(OP_GETIV, BB) {
1568 regs[a] = mrb_iv_get(mrb, regs[0], syms[b]);
1569 NEXT;
1572 CASE(OP_SETIV, BB) {
1573 mrb_iv_set(mrb, regs[0], syms[b], regs[a]);
1574 NEXT;
1577 CASE(OP_GETCV, BB) {
1578 mrb_value val;
1579 val = mrb_vm_cv_get(mrb, syms[b]);
1580 regs[a] = val;
1581 NEXT;
1584 CASE(OP_SETCV, BB) {
1585 mrb_vm_cv_set(mrb, syms[b], regs[a]);
1586 NEXT;
1589 CASE(OP_GETIDX, B) {
1590 mrb_value va = regs[a], vb = regs[a+1];
1591 switch (mrb_type(va)) {
1592 case MRB_TT_ARRAY:
1593 if (!mrb_integer_p(vb)) goto getidx_fallback;
1594 else {
1595 mrb_int idx = mrb_integer(vb);
1596 if (0 <= idx && idx < RARRAY_LEN(va)) {
1597 regs[a] = RARRAY_PTR(va)[idx];
1599 else {
1600 regs[a] = mrb_ary_entry(va, idx);
1603 break;
1604 case MRB_TT_HASH:
1605 va = mrb_hash_get(mrb, va, vb);
1606 regs[a] = va;
1607 break;
1608 case MRB_TT_STRING:
1609 switch (mrb_type(vb)) {
1610 case MRB_TT_INTEGER:
1611 case MRB_TT_STRING:
1612 case MRB_TT_RANGE:
1613 va = mrb_str_aref(mrb, va, vb, mrb_undef_value());
1614 regs[a] = va;
1615 break;
1616 default:
1617 goto getidx_fallback;
1619 break;
1620 default:
1621 getidx_fallback:
1622 mid = MRB_OPSYM(aref);
1623 goto L_SEND_SYM;
1625 NEXT;
1628 CASE(OP_SETIDX, B) {
1629 c = 2;
1630 mid = MRB_OPSYM(aset);
1631 SET_NIL_VALUE(regs[a+3]);
1632 goto L_SENDB_SYM;
1635 CASE(OP_GETCONST, BB) {
1636 mrb_value v = mrb_vm_const_get(mrb, syms[b]);
1637 regs[a] = v;
1638 NEXT;
1641 CASE(OP_SETCONST, BB) {
1642 mrb_vm_const_set(mrb, syms[b], regs[a]);
1643 NEXT;
1646 CASE(OP_GETMCNST, BB) {
1647 mrb_value v = mrb_const_get(mrb, regs[a], syms[b]);
1648 regs[a] = v;
1649 NEXT;
1652 CASE(OP_SETMCNST, BB) {
1653 mrb_const_set(mrb, regs[a+1], syms[b], regs[a]);
1654 NEXT;
1657 CASE(OP_GETUPVAR, BBB) {
1658 struct REnv *e = uvenv(mrb, c);
1660 if (e && b < MRB_ENV_LEN(e)) {
1661 regs[a] = e->stack[b];
1663 else {
1664 regs[a] = mrb_nil_value();
1666 NEXT;
1669 CASE(OP_SETUPVAR, BBB) {
1670 struct REnv *e = uvenv(mrb, c);
1672 if (e) {
1673 if (b < MRB_ENV_LEN(e)) {
1674 e->stack[b] = regs[a];
1675 mrb_write_barrier(mrb, (struct RBasic*)e);
1678 NEXT;
1681 CASE(OP_JMP, S) {
1682 pc += (int16_t)a;
1683 JUMP;
1685 CASE(OP_JMPIF, BS) {
1686 if (mrb_test(regs[a])) {
1687 pc += (int16_t)b;
1688 JUMP;
1690 NEXT;
1692 CASE(OP_JMPNOT, BS) {
1693 if (!mrb_test(regs[a])) {
1694 pc += (int16_t)b;
1695 JUMP;
1697 NEXT;
1699 CASE(OP_JMPNIL, BS) {
1700 if (mrb_nil_p(regs[a])) {
1701 pc += (int16_t)b;
1702 JUMP;
1704 NEXT;
1707 CASE(OP_JMPUW, S) {
1708 a = (uint32_t)((pc - irep->iseq) + (int16_t)a);
1709 CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) {
1710 struct RBreak *brk = (struct RBreak*)mrb->exc;
1711 mrb_value target = mrb_break_value_get(brk);
1712 mrb_assert(mrb_integer_p(target));
1713 a = (uint32_t)mrb_integer(target);
1714 mrb_assert(a >= 0 && a < irep->ilen);
1716 CHECKPOINT_MAIN(RBREAK_TAG_JUMP) {
1717 if (irep->clen > 0 &&
1718 (ch = catch_handler_find(irep, pc, MRB_CATCH_FILTER_ENSURE))) {
1719 /* avoiding a jump from a catch handler into the same handler */
1720 if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) {
1721 THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, mrb->c->ci, mrb_fixnum_value(a));
1725 CHECKPOINT_END(RBREAK_TAG_JUMP);
1727 mrb->exc = NULL; /* clear break object */
1728 pc = irep->iseq + a;
1729 JUMP;
1732 CASE(OP_EXCEPT, B) {
1733 mrb_value exc;
1735 if (mrb->exc == NULL) {
1736 exc = mrb_nil_value();
1738 else {
1739 switch (mrb->exc->tt) {
1740 case MRB_TT_BREAK:
1741 case MRB_TT_EXCEPTION:
1742 exc = mrb_obj_value(mrb->exc);
1743 break;
1744 default:
1745 mrb_assert(!"bad mrb_type");
1746 exc = mrb_nil_value();
1747 break;
1749 mrb->exc = NULL;
1751 regs[a] = exc;
1752 NEXT;
1754 CASE(OP_RESCUE, BB) {
1755 mrb_value exc = regs[a]; /* exc on stack */
1756 mrb_value e = regs[b];
1757 struct RClass *ec;
1759 switch (mrb_type(e)) {
1760 case MRB_TT_CLASS:
1761 case MRB_TT_MODULE:
1762 break;
1763 default:
1764 RAISE_LIT(mrb, E_TYPE_ERROR, "class or module required for rescue clause");
1766 ec = mrb_class_ptr(e);
1767 regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec));
1768 NEXT;
1771 CASE(OP_RAISEIF, B) {
1772 mrb_value exc;
1773 exc = regs[a];
1774 if (mrb_nil_p(exc)) {
1775 mrb->exc = NULL;
1777 else if (mrb_break_p(exc)) {
1778 struct RBreak *brk;
1779 mrb->exc = mrb_obj_ptr(exc);
1780 L_BREAK:
1781 brk = (struct RBreak*)mrb->exc;
1782 switch (mrb_break_tag_get(brk)) {
1783 #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
1784 RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS)
1785 #undef DISPATCH_CHECKPOINTS
1786 default:
1787 mrb_assert(!"wrong break tag");
1790 else {
1791 mrb_callinfo *ci;
1792 mrb_exc_set(mrb, exc);
1793 L_RAISE:
1794 ci = mrb->c->ci;
1795 while (!(proc = ci->proc) || MRB_PROC_CFUNC_P(ci->proc) || !(irep = proc->body.irep) || irep->clen < 1 ||
1796 (ch = catch_handler_find(irep, ci->pc, MRB_CATCH_FILTER_ALL)) == NULL) {
1797 if (ci != mrb->c->cibase) {
1798 ci = cipop(mrb);
1799 if (ci[1].cci == CINFO_SKIP) {
1800 mrb_assert(prev_jmp != NULL);
1801 mrb->jmp = prev_jmp;
1802 MRB_THROW(prev_jmp);
1805 else if (mrb->c == mrb->root_c) {
1806 mrb->c->ci->stack = mrb->c->stbase;
1807 mrb->jmp = prev_jmp;
1808 return mrb_obj_value(mrb->exc);
1810 else {
1811 struct mrb_context *c = mrb->c;
1813 fiber_terminate(mrb, c, ci);
1814 if (!c->vmexec) goto L_RAISE;
1815 mrb->jmp = prev_jmp;
1816 if (!prev_jmp) return mrb_obj_value(mrb->exc);
1817 MRB_THROW(prev_jmp);
1821 if (FALSE) {
1822 L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
1823 ci = mrb->c->ci;
1825 proc = ci->proc;
1826 irep = proc->body.irep;
1827 pool = irep->pool;
1828 syms = irep->syms;
1829 stack_extend(mrb, irep->nregs);
1830 pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target);
1832 NEXT;
1835 CASE(OP_SSEND, BBB) {
1836 regs[a] = regs[0];
1837 insn = OP_SEND;
1839 goto L_SENDB;
1841 CASE(OP_SSENDB, BBB) {
1842 regs[a] = regs[0];
1844 goto L_SENDB;
1846 CASE(OP_SEND, BBB)
1847 goto L_SENDB;
1849 L_SEND_SYM:
1850 c = 1;
1851 /* push nil after arguments */
1852 SET_NIL_VALUE(regs[a+2]);
1853 goto L_SENDB_SYM;
1855 CASE(OP_SENDB, BBB)
1856 L_SENDB:
1857 mid = syms[b];
1858 L_SENDB_SYM:
1860 mrb_callinfo *ci;
1861 mrb_method_t m;
1862 mrb_value recv, blk;
1863 int n = c&0xf;
1864 int nk = (c>>4)&0xf;
1865 mrb_int bidx = a + mrb_bidx(n,nk);
1866 mrb_int new_bidx = bidx;
1868 if (nk == CALL_MAXARGS) {
1869 mrb_ensure_hash_type(mrb, regs[a+(n==CALL_MAXARGS?1:n)+1]);
1871 else if (nk > 0) { /* pack keyword arguments */
1872 mrb_int kidx = a+(n==CALL_MAXARGS?1:n)+1;
1873 mrb_value kdict = hash_new_from_regs(mrb, nk, kidx);
1874 regs[kidx] = kdict;
1875 nk = CALL_MAXARGS;
1876 c = n | (nk<<4);
1877 new_bidx = a+mrb_bidx(n, nk);
1880 mrb_assert(bidx < irep->nregs);
1881 if (insn == OP_SEND) {
1882 /* clear block argument */
1883 SET_NIL_VALUE(regs[new_bidx]);
1884 SET_NIL_VALUE(blk);
1886 else {
1887 blk = ensure_block(mrb, regs[bidx]);
1888 regs[new_bidx] = blk;
1891 ci = cipush(mrb, a, CINFO_DIRECT, NULL, NULL, BLK_PTR(blk), 0, c);
1892 recv = regs[0];
1893 ci->u.target_class = (insn == OP_SUPER) ? CI_TARGET_CLASS(ci - 1)->super : mrb_class(mrb, recv);
1894 m = mrb_vm_find_method(mrb, ci->u.target_class, &ci->u.target_class, mid);
1895 if (MRB_METHOD_UNDEF_P(m)) {
1896 m = prepare_missing(mrb, ci, recv, mid, blk, (insn == OP_SUPER));
1898 else {
1899 ci->mid = mid;
1901 ci->cci = CINFO_NONE;
1903 if (MRB_METHOD_PROC_P(m)) {
1904 const struct RProc *p = MRB_METHOD_PROC(m);
1905 /* handle alias */
1906 if (MRB_PROC_ALIAS_P(p)) {
1907 ci->mid = p->body.mid;
1908 p = p->upper;
1910 CI_PROC_SET(ci, p);
1911 if (!MRB_PROC_CFUNC_P(p)) {
1912 /* setup environment for calling method */
1913 proc = p;
1914 irep = proc->body.irep;
1915 pool = irep->pool;
1916 syms = irep->syms;
1917 stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs);
1918 pc = irep->iseq;
1919 JUMP;
1921 else {
1922 if (MRB_PROC_NOARG_P(p) && (ci->n > 0 || ci->nk > 0)) {
1923 check_method_noarg(mrb, ci);
1925 recv = MRB_PROC_CFUNC(p)(mrb, recv);
1928 else {
1929 if (MRB_METHOD_NOARG_P(m) && (ci->n > 0 || ci->nk > 0)) {
1930 check_method_noarg(mrb, ci);
1932 recv = MRB_METHOD_FUNC(m)(mrb, recv);
1935 /* cfunc epilogue */
1936 mrb_gc_arena_shrink(mrb, ai);
1937 if (mrb->exc) goto L_RAISE;
1938 ci = mrb->c->ci;
1939 if (!ci->u.keep_context) { /* return from context modifying method (resume/yield) */
1940 if (ci->cci == CINFO_RESUMED) {
1941 mrb->jmp = prev_jmp;
1942 return recv;
1944 else {
1945 mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc));
1946 proc = ci[-1].proc;
1947 irep = proc->body.irep;
1948 pool = irep->pool;
1949 syms = irep->syms;
1952 mrb_assert(ci > mrb->c->cibase);
1953 ci->stack[0] = recv;
1954 /* pop stackpos */
1955 ci = cipop(mrb);
1956 pc = ci->pc;
1957 JUMP;
1960 CASE(OP_CALL, Z) {
1961 mrb_callinfo *ci = mrb->c->ci;
1962 mrb_value recv = ci->stack[0];
1963 const struct RProc *p = mrb_proc_ptr(recv);
1965 /* handle alias */
1966 if (MRB_PROC_ALIAS_P(p)) {
1967 ci->mid = p->body.mid;
1968 p = p->upper;
1970 else if (MRB_PROC_ENV_P(p)) {
1971 ci->mid = MRB_PROC_ENV(p)->mid;
1973 /* replace callinfo */
1974 ci->u.target_class = MRB_PROC_TARGET_CLASS(p);
1975 CI_PROC_SET(ci, p);
1977 /* prepare stack */
1978 if (MRB_PROC_CFUNC_P(p)) {
1979 recv = MRB_PROC_CFUNC(p)(mrb, recv);
1980 mrb_gc_arena_shrink(mrb, ai);
1981 if (mrb->exc) goto L_RAISE;
1982 /* pop stackpos */
1983 ci = cipop(mrb);
1984 pc = ci->pc;
1985 ci[1].stack[0] = recv;
1986 irep = mrb->c->ci->proc->body.irep;
1988 else {
1989 /* setup environment for calling method */
1990 proc = p;
1991 irep = p->body.irep;
1992 if (!irep) {
1993 mrb->c->ci->stack[0] = mrb_nil_value();
1994 a = 0;
1995 goto L_OP_RETURN_BODY;
1997 mrb_int nargs = ci_bidx(ci)+1;
1998 if (nargs < irep->nregs) {
1999 stack_extend(mrb, irep->nregs);
2000 stack_clear(regs+nargs, irep->nregs-nargs);
2002 if (MRB_PROC_ENV_P(p)) {
2003 regs[0] = MRB_PROC_ENV(p)->stack[0];
2005 pc = irep->iseq;
2007 pool = irep->pool;
2008 syms = irep->syms;
2009 JUMP;
2012 CASE(OP_SUPER, BB) {
2013 mrb_callinfo *ci = mrb->c->ci;
2014 mrb_value recv;
2015 struct RClass* target_class = CI_TARGET_CLASS(ci);
2017 mid = ci->mid;
2018 if (mid == 0 || !target_class) {
2019 RAISE_LIT(mrb, E_NOMETHOD_ERROR, "super called outside of method");
2021 if ((target_class->flags & MRB_FL_CLASS_IS_PREPENDED) || target_class->tt == MRB_TT_MODULE) {
2022 goto super_typeerror;
2024 recv = regs[0];
2025 if (!mrb_obj_is_kind_of(mrb, recv, target_class)) {
2026 super_typeerror:
2027 RAISE_LIT(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context");
2030 c = b; // arg info
2031 regs[a] = recv;
2032 goto L_SENDB_SYM;
2035 CASE(OP_ARGARY, BS) {
2036 mrb_int m1 = (b>>11)&0x3f;
2037 mrb_int r = (b>>10)&0x1;
2038 mrb_int m2 = (b>>5)&0x1f;
2039 mrb_int kd = (b>>4)&0x1;
2040 mrb_int lv = (b>>0)&0xf;
2041 mrb_value *stack;
2043 if (mrb->c->ci->mid == 0 || CI_TARGET_CLASS(mrb->c->ci) == NULL) {
2044 L_NOSUPER:
2045 RAISE_LIT(mrb, E_NOMETHOD_ERROR, "super called outside of method");
2047 if (lv == 0) stack = regs + 1;
2048 else {
2049 struct REnv *e = uvenv(mrb, lv-1);
2050 if (!e) goto L_NOSUPER;
2051 if (MRB_ENV_LEN(e) <= m1+r+m2+1)
2052 goto L_NOSUPER;
2053 stack = e->stack + 1;
2055 if (r == 0) {
2056 regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack);
2058 else {
2059 mrb_value *pp = NULL;
2060 struct RArray *rest;
2061 mrb_int len = 0;
2063 if (mrb_array_p(stack[m1])) {
2064 struct RArray *ary = mrb_ary_ptr(stack[m1]);
2066 pp = ARY_PTR(ary);
2067 len = ARY_LEN(ary);
2069 regs[a] = mrb_ary_new_capa(mrb, m1+len+m2);
2070 rest = mrb_ary_ptr(regs[a]);
2071 if (m1 > 0) {
2072 stack_copy(ARY_PTR(rest), stack, m1);
2074 if (len > 0) {
2075 stack_copy(ARY_PTR(rest)+m1, pp, len);
2077 if (m2 > 0) {
2078 stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2);
2080 ARY_SET_LEN(rest, m1+len+m2);
2082 if (kd) {
2083 regs[a+1] = stack[m1+r+m2];
2084 regs[a+2] = stack[m1+r+m2+1];
2086 else {
2087 regs[a+1] = stack[m1+r+m2];
2089 mrb_gc_arena_restore(mrb, ai);
2090 NEXT;
2093 CASE(OP_ENTER, W) {
2094 mrb_callinfo *ci = mrb->c->ci;
2095 mrb_int argc = ci->n;
2096 mrb_value *argv = regs+1;
2098 mrb_int m1 = MRB_ASPEC_REQ(a);
2100 /* no other args */
2101 if ((a & ~0x7c0001) == 0 && argc < 15 && MRB_PROC_STRICT_P(proc)) {
2102 if (argc+(ci->nk==15) != m1) { /* count kdict too */
2103 argnum_error(mrb, m1);
2104 goto L_RAISE;
2106 /* clear local (but non-argument) variables */
2107 mrb_int pos = m1+2; /* self+m1+blk */
2108 if (irep->nlocals-pos > 0) {
2109 stack_clear(&regs[pos], irep->nlocals-pos);
2111 NEXT;
2114 mrb_int o = MRB_ASPEC_OPT(a);
2115 mrb_int r = MRB_ASPEC_REST(a);
2116 mrb_int m2 = MRB_ASPEC_POST(a);
2117 mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0;
2118 /* unused
2119 int b = MRB_ASPEC_BLOCK(a);
2121 mrb_int const len = m1 + o + r + m2;
2123 mrb_value * const argv0 = argv;
2124 mrb_value blk = regs[ci_bidx(ci)];
2125 mrb_value kdict = mrb_nil_value();
2127 /* keyword arguments */
2128 if (ci->nk == 15) {
2129 kdict = regs[mrb_ci_kidx(ci)];
2131 if (!kd) {
2132 if (!mrb_nil_p(kdict) && mrb_hash_size(mrb, kdict) > 0) {
2133 if (argc < 14) {
2134 ci->n++;
2135 argc++; /* include kdict in normal arguments */
2137 else if (argc == 14) {
2138 /* pack arguments and kdict */
2139 regs[1] = ary_new_from_regs(mrb, argc+1, 1);
2140 argc = ci->n = 15;
2142 else {/* argc == 15 */
2143 /* push kdict to packed arguments */
2144 mrb_ary_push(mrb, regs[1], kdict);
2147 kdict = mrb_nil_value();
2148 ci->nk = 0;
2150 else if (MRB_ASPEC_KEY(a) > 0 && !mrb_nil_p(kdict)) {
2151 kdict = mrb_hash_dup(mrb, kdict);
2153 else if (!mrb_nil_p(kdict)) {
2154 mrb_gc_protect(mrb, kdict);
2157 /* arguments is passed with Array */
2158 if (argc == 15) {
2159 struct RArray *ary = mrb_ary_ptr(regs[1]);
2160 argv = ARY_PTR(ary);
2161 argc = (int)ARY_LEN(ary);
2162 mrb_gc_protect(mrb, regs[1]);
2165 /* strict argument check */
2166 if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) {
2167 if (argc < m1 + m2 || (r == 0 && argc > len)) {
2168 argnum_error(mrb, m1+m2);
2169 goto L_RAISE;
2172 /* extract first argument array to arguments */
2173 else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) {
2174 mrb_gc_protect(mrb, argv[0]);
2175 argc = (int)RARRAY_LEN(argv[0]);
2176 argv = RARRAY_PTR(argv[0]);
2179 /* rest arguments */
2180 mrb_value rest;
2181 if (argc < len) {
2182 mrb_int mlen = m2;
2183 if (argc < m1+m2) {
2184 mlen = m1 < argc ? argc - m1 : 0;
2187 /* copy mandatory and optional arguments */
2188 if (argv0 != argv && argv) {
2189 value_move(&regs[1], argv, argc-mlen); /* m1 + o */
2191 if (argc < m1) {
2192 stack_clear(&regs[argc+1], m1-argc);
2194 /* copy post mandatory arguments */
2195 if (mlen) {
2196 value_move(&regs[len-m2+1], &argv[argc-mlen], mlen);
2198 if (mlen < m2) {
2199 stack_clear(&regs[len-m2+mlen+1], m2-mlen);
2201 /* initialize rest arguments with empty Array */
2202 if (r) {
2203 rest = mrb_ary_new_capa(mrb, 0);
2204 regs[m1+o+1] = rest;
2206 /* skip initializer of passed arguments */
2207 if (o > 0 && argc > m1+m2)
2208 pc += (argc - m1 - m2)*3;
2210 else {
2211 mrb_int rnum = 0;
2212 if (argv0 != argv) {
2213 mrb_gc_protect(mrb, blk);
2214 value_move(&regs[1], argv, m1+o);
2216 if (r) {
2217 rnum = argc-m1-o-m2;
2218 rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o);
2219 regs[m1+o+1] = rest;
2221 if (m2 > 0 && argc-m2 > m1) {
2222 value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2);
2224 pc += o*3;
2227 /* need to be update blk first to protect blk from GC */
2228 mrb_int const kw_pos = len + kd; /* where kwhash should be */
2229 mrb_int const blk_pos = kw_pos + 1; /* where block should be */
2230 regs[blk_pos] = blk; /* move block */
2231 if (kd) {
2232 if (mrb_nil_p(kdict)) {
2233 kdict = mrb_hash_new_capa(mrb, 0);
2235 regs[kw_pos] = kdict; /* set kwhash */
2236 ci->nk = 15;
2239 /* format arguments for generated code */
2240 mrb->c->ci->n = (uint8_t)len;
2242 /* clear local (but non-argument) variables */
2243 if (irep->nlocals-blk_pos-1 > 0) {
2244 stack_clear(&regs[blk_pos+1], irep->nlocals-blk_pos-1);
2246 JUMP;
2249 CASE(OP_KARG, BB) {
2250 mrb_value k = mrb_symbol_value(syms[b]);
2251 mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
2252 mrb_value kdict, v;
2254 if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) {
2255 RAISE_FORMAT(mrb, E_ARGUMENT_ERROR, "missing keyword: %v", k);
2257 v = mrb_hash_get(mrb, kdict, k);
2258 regs[a] = v;
2259 mrb_hash_delete_key(mrb, kdict, k);
2260 NEXT;
2263 CASE(OP_KEY_P, BB) {
2264 mrb_value k = mrb_symbol_value(syms[b]);
2265 mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
2266 mrb_value kdict;
2267 mrb_bool key_p = FALSE;
2269 if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) {
2270 key_p = mrb_hash_key_p(mrb, kdict, k);
2272 regs[a] = mrb_bool_value(key_p);
2273 NEXT;
2276 CASE(OP_KEYEND, Z) {
2277 mrb_int kidx = mrb_ci_kidx(mrb->c->ci);
2278 mrb_value kdict;
2280 if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) {
2281 mrb_value keys = mrb_hash_keys(mrb, kdict);
2282 mrb_value key1 = RARRAY_PTR(keys)[0];
2283 RAISE_FORMAT(mrb, E_ARGUMENT_ERROR, "unknown keyword: %v", key1);
2285 NEXT;
2288 CASE(OP_BREAK, B) {
2289 if (mrb->exc) {
2290 goto L_RAISE;
2293 if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN;
2294 if (!MRB_PROC_ENV_P(proc)) {
2295 L_BREAK_ERROR:
2296 RAISE_LIT(mrb, E_LOCALJUMP_ERROR, "break from proc-closure");
2298 else {
2299 struct REnv *e = MRB_PROC_ENV(proc);
2301 if (e->cxt != mrb->c) {
2302 goto L_BREAK_ERROR;
2305 mrb_callinfo *ci = mrb->c->ci;
2306 proc = proc->upper;
2307 while (mrb->c->cibase < ci && ci[-1].proc != proc) {
2308 ci--;
2310 if (ci == mrb->c->cibase || !(ci->flags & MRB_CI_COMPANION_BLOCK)) {
2311 goto L_BREAK_ERROR;
2313 c = a; // release the "a" variable, which can handle 32-bit values
2314 a = ci - mrb->c->cibase;
2315 goto L_UNWINDING;
2317 CASE(OP_RETURN_BLK, B) {
2318 if (mrb->exc) {
2319 goto L_RAISE;
2322 mrb_callinfo *ci = mrb->c->ci;
2324 if (!MRB_PROC_ENV_P(proc) || MRB_PROC_STRICT_P(proc)) {
2325 goto NORMAL_RETURN;
2328 const struct RProc *dst;
2329 mrb_callinfo *cibase;
2330 cibase = mrb->c->cibase;
2331 dst = top_proc(mrb, proc);
2333 if (MRB_PROC_ENV_P(dst)) {
2334 struct REnv *e = MRB_PROC_ENV(dst);
2336 if (e->cxt != mrb->c) {
2337 localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
2338 goto L_RAISE;
2341 /* check jump destination */
2342 while (cibase <= ci && ci->proc != dst) {
2343 ci--;
2345 if (ci <= cibase) { /* no jump destination */
2346 localjump_error(mrb, LOCALJUMP_ERROR_RETURN);
2347 goto L_RAISE;
2349 c = a; // release the "a" variable, which can handle 32-bit values
2350 a = ci - mrb->c->cibase;
2351 goto L_UNWINDING;
2353 CASE(OP_RETURN, B) {
2354 mrb_callinfo *ci;
2356 if (mrb->exc) {
2357 goto L_RAISE;
2359 else {
2360 mrb_int acc;
2361 mrb_value v;
2363 NORMAL_RETURN:
2364 ci = mrb->c->ci;
2365 v = regs[a];
2366 mrb_gc_protect(mrb, v);
2367 CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) {
2368 if (TRUE) {
2369 struct RBreak *brk = (struct RBreak*)mrb->exc;
2370 ci = &mrb->c->cibase[brk->ci_break_index];
2371 v = mrb_break_value_get(brk);
2373 else {
2374 L_UNWINDING: // for a check on the role of `a` and `c`, see `goto L_UNWINDING`
2375 ci = mrb->c->cibase + a;
2376 v = regs[c];
2378 mrb_gc_protect(mrb, v);
2380 CHECKPOINT_MAIN(RBREAK_TAG_BREAK) {
2381 for (;;) {
2382 UNWIND_ENSURE(mrb, mrb->c->ci, mrb->c->ci->pc, RBREAK_TAG_BREAK, ci, v);
2384 if (mrb->c->ci == ci) {
2385 break;
2387 cipop(mrb);
2388 if (mrb->c->ci[1].cci != CINFO_NONE) {
2389 mrb_assert(prev_jmp != NULL);
2390 mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, ci, v);
2391 mrb_gc_arena_restore(mrb, ai);
2392 mrb->c->vmexec = FALSE;
2393 mrb->jmp = prev_jmp;
2394 MRB_THROW(prev_jmp);
2398 CHECKPOINT_END(RBREAK_TAG_BREAK);
2399 mrb->exc = NULL; /* clear break object */
2401 if (ci == mrb->c->cibase) {
2402 struct mrb_context *c = mrb->c;
2403 if (c == mrb->root_c) {
2404 /* toplevel return */
2405 mrb_gc_arena_restore(mrb, ai);
2406 mrb->jmp = prev_jmp;
2407 return v;
2410 fiber_terminate(mrb, c, ci);
2411 if (c->vmexec ||
2412 (mrb->c == mrb->root_c && mrb->c->ci == mrb->c->cibase) /* case using Fiber#transfer in mrb_fiber_resume() */) {
2413 mrb_gc_arena_restore(mrb, ai);
2414 c->vmexec = FALSE;
2415 mrb->jmp = prev_jmp;
2416 return v;
2418 ci = mrb->c->ci;
2421 if (mrb->c->vmexec && !ci->u.keep_context) {
2422 mrb_gc_arena_restore(mrb, ai);
2423 mrb->c->vmexec = FALSE;
2424 mrb->jmp = prev_jmp;
2425 return v;
2427 acc = ci->cci;
2428 ci = cipop(mrb);
2429 if (acc == CINFO_SKIP || acc == CINFO_DIRECT) {
2430 mrb_gc_arena_restore(mrb, ai);
2431 mrb->jmp = prev_jmp;
2432 return v;
2434 pc = ci->pc;
2435 DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid)));
2436 proc = ci->proc;
2437 irep = proc->body.irep;
2438 pool = irep->pool;
2439 syms = irep->syms;
2441 ci[1].stack[0] = v;
2442 mrb_gc_arena_restore(mrb, ai);
2444 JUMP;
2447 CASE(OP_BLKPUSH, BS) {
2448 int m1 = (b>>11)&0x3f;
2449 int r = (b>>10)&0x1;
2450 int m2 = (b>>5)&0x1f;
2451 int kd = (b>>4)&0x1;
2452 int lv = (b>>0)&0xf;
2453 mrb_value *stack;
2455 if (lv == 0) stack = regs + 1;
2456 else {
2457 struct REnv *e = uvenv(mrb, lv-1);
2458 if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) ||
2459 MRB_ENV_LEN(e) <= m1+r+m2+1) {
2460 localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
2461 goto L_RAISE;
2463 stack = e->stack + 1;
2465 if (mrb_nil_p(stack[m1+r+m2+kd])) {
2466 localjump_error(mrb, LOCALJUMP_ERROR_YIELD);
2467 goto L_RAISE;
2469 regs[a] = stack[m1+r+m2+kd];
2470 NEXT;
2473 #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32)
2474 L_INT_OVERFLOW:
2475 RAISE_LIT(mrb, E_RANGE_ERROR, "integer overflow");
2476 #endif
2478 #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
2479 #define OP_MATH(op_name) \
2480 /* need to check if op is overridden */ \
2481 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
2482 OP_MATH_CASE_INTEGER(op_name); \
2483 OP_MATH_CASE_FLOAT(op_name, integer, float); \
2484 OP_MATH_CASE_FLOAT(op_name, float, integer); \
2485 OP_MATH_CASE_FLOAT(op_name, float, float); \
2486 OP_MATH_CASE_STRING_##op_name(); \
2487 default: \
2488 mid = MRB_OPSYM(op_name); \
2489 goto L_SEND_SYM; \
2491 NEXT;
2492 #define OP_MATH_CASE_INTEGER(op_name) \
2493 case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
2495 mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
2496 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2497 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2499 else \
2500 SET_INT_VALUE(mrb,regs[a], z); \
2502 break
2503 #ifdef MRB_NO_FLOAT
2504 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
2505 #else
2506 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
2507 case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
2509 mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
2510 SET_FLOAT_VALUE(mrb, regs[a], z); \
2512 break
2513 #endif
2514 #ifdef MRB_USE_BIGINT
2515 #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y)
2516 #else
2517 #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW
2518 #endif
2519 #define OP_MATH_CASE_STRING_add() \
2520 case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
2521 regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
2522 mrb_gc_arena_restore(mrb, ai); \
2523 break
2524 #define OP_MATH_CASE_STRING_sub() (void)0
2525 #define OP_MATH_CASE_STRING_mul() (void)0
2526 #define OP_MATH_OP_add +
2527 #define OP_MATH_OP_sub -
2528 #define OP_MATH_OP_mul *
2529 #define OP_MATH_TT_integer MRB_TT_INTEGER
2530 #define OP_MATH_TT_float MRB_TT_FLOAT
2532 CASE(OP_ADD, B) {
2533 OP_MATH(add);
2536 CASE(OP_SUB, B) {
2537 OP_MATH(sub);
2540 CASE(OP_MUL, B) {
2541 OP_MATH(mul);
2544 CASE(OP_DIV, B) {
2545 #ifndef MRB_NO_FLOAT
2546 mrb_float x, y, f;
2547 #endif
2549 /* need to check if op is overridden */
2550 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {
2551 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):
2553 mrb_int x = mrb_integer(regs[a]);
2554 mrb_int y = mrb_integer(regs[a+1]);
2555 regs[a] = mrb_div_int_value(mrb, x, y);
2557 NEXT;
2558 #ifndef MRB_NO_FLOAT
2559 case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):
2560 x = (mrb_float)mrb_integer(regs[a]);
2561 y = mrb_float(regs[a+1]);
2562 break;
2563 case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):
2564 x = mrb_float(regs[a]);
2565 y = (mrb_float)mrb_integer(regs[a+1]);
2566 break;
2567 case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):
2568 x = mrb_float(regs[a]);
2569 y = mrb_float(regs[a+1]);
2570 break;
2571 #endif
2572 default:
2573 mid = MRB_OPSYM(div);
2574 goto L_SEND_SYM;
2577 #ifndef MRB_NO_FLOAT
2578 f = mrb_div_float(x, y);
2579 SET_FLOAT_VALUE(mrb, regs[a], f);
2580 #endif
2581 NEXT;
2584 #define OP_MATHI(op_name) \
2585 /* need to check if op is overridden */ \
2586 switch (mrb_type(regs[a])) { \
2587 OP_MATHI_CASE_INTEGER(op_name); \
2588 OP_MATHI_CASE_FLOAT(op_name); \
2589 default: \
2590 SET_INT_VALUE(mrb,regs[a+1], b); \
2591 mid = MRB_OPSYM(op_name); \
2592 goto L_SEND_SYM; \
2594 NEXT;
2595 #define OP_MATHI_CASE_INTEGER(op_name) \
2596 case MRB_TT_INTEGER: \
2598 mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
2599 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2600 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2602 else \
2603 SET_INT_VALUE(mrb,regs[a], z); \
2605 break
2606 #ifdef MRB_NO_FLOAT
2607 #define OP_MATHI_CASE_FLOAT(op_name) (void)0
2608 #else
2609 #define OP_MATHI_CASE_FLOAT(op_name) \
2610 case MRB_TT_FLOAT: \
2612 mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
2613 SET_FLOAT_VALUE(mrb, regs[a], z); \
2615 break
2616 #endif
2618 CASE(OP_ADDI, BB) {
2619 OP_MATHI(add);
2622 CASE(OP_SUBI, BB) {
2623 OP_MATHI(sub);
2626 #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
2628 #ifdef MRB_NO_FLOAT
2629 #define OP_CMP(op,sym) do {\
2630 int result;\
2631 /* need to check if - is overridden */\
2632 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2633 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2634 result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
2635 break;\
2636 default:\
2637 mid = MRB_OPSYM(sym);\
2638 goto L_SEND_SYM;\
2640 if (result) {\
2641 SET_TRUE_VALUE(regs[a]);\
2643 else {\
2644 SET_FALSE_VALUE(regs[a]);\
2646 } while(0)
2647 #else
2648 #define OP_CMP(op, sym) do {\
2649 int result;\
2650 /* need to check if - is overridden */\
2651 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2652 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2653 result = OP_CMP_BODY(op,mrb_integer,mrb_integer);\
2654 break;\
2655 case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
2656 result = OP_CMP_BODY(op,mrb_integer,mrb_float);\
2657 break;\
2658 case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
2659 result = OP_CMP_BODY(op,mrb_float,mrb_integer);\
2660 break;\
2661 case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
2662 result = OP_CMP_BODY(op,mrb_float,mrb_float);\
2663 break;\
2664 default:\
2665 mid = MRB_OPSYM(sym);\
2666 goto L_SEND_SYM;\
2668 if (result) {\
2669 SET_TRUE_VALUE(regs[a]);\
2671 else {\
2672 SET_FALSE_VALUE(regs[a]);\
2674 } while(0)
2675 #endif
2677 CASE(OP_EQ, B) {
2678 if (mrb_obj_eq(mrb, regs[a], regs[a+1])) {
2679 SET_TRUE_VALUE(regs[a]);
2681 else if (mrb_symbol_p(regs[a])) {
2682 SET_FALSE_VALUE(regs[a]);
2684 else {
2685 OP_CMP(==,eq);
2687 NEXT;
2690 CASE(OP_LT, B) {
2691 OP_CMP(<,lt);
2692 NEXT;
2695 CASE(OP_LE, B) {
2696 OP_CMP(<=,le);
2697 NEXT;
2700 CASE(OP_GT, B) {
2701 OP_CMP(>,gt);
2702 NEXT;
2705 CASE(OP_GE, B) {
2706 OP_CMP(>=,ge);
2707 NEXT;
2710 CASE(OP_ARRAY, BB) {
2711 regs[a] = ary_new_from_regs(mrb, b, a);
2712 mrb_gc_arena_restore(mrb, ai);
2713 NEXT;
2715 CASE(OP_ARRAY2, BBB) {
2716 regs[a] = ary_new_from_regs(mrb, c, b);
2717 mrb_gc_arena_restore(mrb, ai);
2718 NEXT;
2721 CASE(OP_ARYCAT, B) {
2722 mrb_value splat = mrb_ary_splat(mrb, regs[a+1]);
2723 if (mrb_nil_p(regs[a])) {
2724 regs[a] = splat;
2726 else {
2727 mrb_assert(mrb_array_p(regs[a]));
2728 mrb_ary_concat(mrb, regs[a], splat);
2730 mrb_gc_arena_restore(mrb, ai);
2731 NEXT;
2734 CASE(OP_ARYPUSH, BB) {
2735 mrb_assert(mrb_array_p(regs[a]));
2736 for (mrb_int i=0; i<b; i++) {
2737 mrb_ary_push(mrb, regs[a], regs[a+i+1]);
2739 NEXT;
2742 CASE(OP_ARYSPLAT, B) {
2743 mrb_value ary = mrb_ary_splat(mrb, regs[a]);
2744 regs[a] = ary;
2745 mrb_gc_arena_restore(mrb, ai);
2746 NEXT;
2749 CASE(OP_AREF, BBB) {
2750 mrb_value v = regs[b];
2752 if (!mrb_array_p(v)) {
2753 if (c == 0) {
2754 regs[a] = v;
2756 else {
2757 SET_NIL_VALUE(regs[a]);
2760 else {
2761 v = mrb_ary_ref(mrb, v, c);
2762 regs[a] = v;
2764 NEXT;
2767 CASE(OP_ASET, BBB) {
2768 mrb_assert(mrb_array_p(regs[a]));
2769 mrb_ary_set(mrb, regs[b], c, regs[a]);
2770 NEXT;
2773 CASE(OP_APOST, BBB) {
2774 mrb_value v = regs[a];
2775 int pre = b;
2776 int post = c;
2777 struct RArray *ary;
2778 int len, idx;
2780 if (!mrb_array_p(v)) {
2781 v = ary_new_from_regs(mrb, 1, a);
2783 ary = mrb_ary_ptr(v);
2784 len = (int)ARY_LEN(ary);
2785 if (len > pre + post) {
2786 v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre);
2787 regs[a++] = v;
2788 while (post--) {
2789 regs[a++] = ARY_PTR(ary)[len-post-1];
2792 else {
2793 v = mrb_ary_new_capa(mrb, 0);
2794 regs[a++] = v;
2795 for (idx=0; idx+pre<len; idx++) {
2796 regs[a+idx] = ARY_PTR(ary)[pre+idx];
2798 while (idx < post) {
2799 SET_NIL_VALUE(regs[a+idx]);
2800 idx++;
2803 mrb_gc_arena_restore(mrb, ai);
2804 NEXT;
2807 CASE(OP_INTERN, B) {
2808 mrb_assert(mrb_string_p(regs[a]));
2809 mrb_sym sym = mrb_intern_str(mrb, regs[a]);
2810 regs[a] = mrb_symbol_value(sym);
2811 NEXT;
2814 CASE(OP_SYMBOL, BB) {
2815 size_t len;
2816 mrb_sym sym;
2818 mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
2819 len = pool[b].tt >> 2;
2820 if (pool[b].tt & IREP_TT_SFLAG) {
2821 sym = mrb_intern_static(mrb, pool[b].u.str, len);
2823 else {
2824 sym = mrb_intern(mrb, pool[b].u.str, len);
2826 regs[a] = mrb_symbol_value(sym);
2827 NEXT;
2830 CASE(OP_STRING, BB) {
2831 mrb_int len;
2833 mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0);
2834 len = pool[b].tt >> 2;
2835 if (pool[b].tt & IREP_TT_SFLAG) {
2836 regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len);
2838 else {
2839 regs[a] = mrb_str_new(mrb, pool[b].u.str, len);
2841 mrb_gc_arena_restore(mrb, ai);
2842 NEXT;
2845 CASE(OP_STRCAT, B) {
2846 mrb_assert(mrb_string_p(regs[a]));
2847 mrb_str_concat(mrb, regs[a], regs[a+1]);
2848 NEXT;
2851 CASE(OP_HASH, BB) {
2852 mrb_value hash = mrb_hash_new_capa(mrb, b);
2853 int lim = a+b*2;
2855 for (int i=a; i<lim; i+=2) {
2856 mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
2858 regs[a] = hash;
2859 mrb_gc_arena_restore(mrb, ai);
2860 NEXT;
2863 CASE(OP_HASHADD, BB) {
2864 mrb_value hash;
2865 int lim = a+b*2+1;
2867 hash = regs[a];
2868 mrb_ensure_hash_type(mrb, hash);
2869 for (int i=a+1; i<lim; i+=2) {
2870 mrb_hash_set(mrb, hash, regs[i], regs[i+1]);
2872 mrb_gc_arena_restore(mrb, ai);
2873 NEXT;
2875 CASE(OP_HASHCAT, B) {
2876 mrb_value hash = regs[a];
2878 mrb_assert(mrb_hash_p(hash));
2879 mrb_hash_merge(mrb, hash, regs[a+1]);
2880 mrb_gc_arena_restore(mrb, ai);
2881 NEXT;
2884 CASE(OP_LAMBDA, BB)
2885 c = OP_L_LAMBDA;
2886 L_MAKE_LAMBDA:
2888 struct RProc *p;
2889 const mrb_irep *nirep = irep->reps[b];
2891 if (c & OP_L_CAPTURE) {
2892 p = mrb_closure_new(mrb, nirep);
2894 else {
2895 p = mrb_proc_new(mrb, nirep);
2896 p->flags |= MRB_PROC_SCOPE;
2898 if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT;
2899 regs[a] = mrb_obj_value(p);
2900 mrb_gc_arena_restore(mrb, ai);
2901 NEXT;
2903 CASE(OP_BLOCK, BB) {
2904 c = OP_L_BLOCK;
2905 goto L_MAKE_LAMBDA;
2907 CASE(OP_METHOD, BB) {
2908 c = OP_L_METHOD;
2909 goto L_MAKE_LAMBDA;
2912 CASE(OP_RANGE_INC, B) {
2913 mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE);
2914 regs[a] = v;
2915 mrb_gc_arena_restore(mrb, ai);
2916 NEXT;
2919 CASE(OP_RANGE_EXC, B) {
2920 mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE);
2921 regs[a] = v;
2922 mrb_gc_arena_restore(mrb, ai);
2923 NEXT;
2926 CASE(OP_OCLASS, B) {
2927 regs[a] = mrb_obj_value(mrb->object_class);
2928 NEXT;
2931 CASE(OP_CLASS, BB) {
2932 struct RClass *c = 0, *baseclass;
2933 mrb_value base, super;
2934 mrb_sym id = syms[b];
2936 base = regs[a];
2937 super = regs[a+1];
2938 if (mrb_nil_p(base)) {
2939 baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
2940 if (!baseclass) baseclass = mrb->object_class;
2941 base = mrb_obj_value(baseclass);
2943 c = mrb_vm_define_class(mrb, base, super, id);
2944 regs[a] = mrb_obj_value(c);
2945 mrb_gc_arena_restore(mrb, ai);
2946 NEXT;
2949 CASE(OP_MODULE, BB) {
2950 struct RClass *cls = 0, *baseclass;
2951 mrb_value base;
2952 mrb_sym id = syms[b];
2954 base = regs[a];
2955 if (mrb_nil_p(base)) {
2956 baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc);
2957 if (!baseclass) baseclass = mrb->object_class;
2958 base = mrb_obj_value(baseclass);
2960 cls = mrb_vm_define_module(mrb, base, id);
2961 regs[a] = mrb_obj_value(cls);
2962 mrb_gc_arena_restore(mrb, ai);
2963 NEXT;
2966 CASE(OP_EXEC, BB)
2968 mrb_value recv = regs[a];
2969 struct RProc *p;
2970 const mrb_irep *nirep = irep->reps[b];
2972 /* prepare closure */
2973 p = mrb_proc_new(mrb, nirep);
2974 p->c = NULL;
2975 mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc);
2976 MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv));
2977 p->flags |= MRB_PROC_SCOPE;
2979 /* prepare call stack */
2980 cipush(mrb, a, 0, mrb_class_ptr(recv), p, NULL, 0, 0);
2982 irep = p->body.irep;
2983 pool = irep->pool;
2984 syms = irep->syms;
2985 stack_extend(mrb, irep->nregs);
2986 stack_clear(regs+1, irep->nregs-1);
2987 pc = irep->iseq;
2988 JUMP;
2991 CASE(OP_DEF, BB) {
2992 struct RClass *target = mrb_class_ptr(regs[a]);
2993 struct RProc *p = mrb_proc_ptr(regs[a+1]);
2994 mrb_method_t m;
2995 mrb_sym mid = syms[b];
2997 MRB_METHOD_FROM_PROC(m, p);
2998 mrb_define_method_raw(mrb, target, mid, m);
2999 mrb_method_added(mrb, target, mid);
3000 mrb_gc_arena_restore(mrb, ai);
3001 regs[a] = mrb_symbol_value(mid);
3002 NEXT;
3005 CASE(OP_SCLASS, B) {
3006 regs[a] = mrb_singleton_class(mrb, regs[a]);
3007 mrb_gc_arena_restore(mrb, ai);
3008 NEXT;
3011 CASE(OP_TCLASS, B) {
3012 struct RClass *target = check_target_class(mrb);
3013 if (!target) goto L_RAISE;
3014 regs[a] = mrb_obj_value(target);
3015 NEXT;
3018 CASE(OP_ALIAS, BB) {
3019 struct RClass *target = check_target_class(mrb);
3021 if (!target) goto L_RAISE;
3022 mrb_alias_method(mrb, target, syms[a], syms[b]);
3023 mrb_method_added(mrb, target, syms[a]);
3024 NEXT;
3026 CASE(OP_UNDEF, B) {
3027 struct RClass *target = check_target_class(mrb);
3029 if (!target) goto L_RAISE;
3030 mrb_undef_method_id(mrb, target, syms[a]);
3031 NEXT;
3034 CASE(OP_DEBUG, Z) {
3035 FETCH_BBB();
3036 #ifdef MRB_USE_DEBUG_HOOK
3037 mrb->debug_op_hook(mrb, irep, pc, regs);
3038 #else
3039 #ifndef MRB_NO_STDIO
3040 printf("OP_DEBUG %d %d %d\n", a, b, c);
3041 #else
3042 abort();
3043 #endif
3044 #endif
3045 NEXT;
3048 CASE(OP_ERR, B) {
3049 size_t len = pool[a].tt >> 2;
3050 mrb_value exc;
3052 mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0);
3053 exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len);
3054 RAISE_EXC(mrb, exc);
3057 CASE(OP_EXT1, Z) {
3058 insn = READ_B();
3059 switch (insn) {
3060 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3061 #include <mruby/ops.h>
3062 #undef OPCODE
3064 pc--;
3065 NEXT;
3067 CASE(OP_EXT2, Z) {
3068 insn = READ_B();
3069 switch (insn) {
3070 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3071 #include <mruby/ops.h>
3072 #undef OPCODE
3074 pc--;
3075 NEXT;
3077 CASE(OP_EXT3, Z) {
3078 insn = READ_B();
3079 switch (insn) {
3080 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3081 #include <mruby/ops.h>
3082 #undef OPCODE
3084 pc--;
3085 NEXT;
3088 CASE(OP_STOP, Z) {
3089 /* stop VM */
3090 mrb_value v;
3091 v = mrb->exc ? mrb_obj_value(mrb->exc) : mrb_nil_value();
3092 CHECKPOINT_RESTORE(RBREAK_TAG_STOP) {
3093 struct RBreak *brk = (struct RBreak*)mrb->exc;
3094 v = mrb_break_value_get(brk);
3096 CHECKPOINT_MAIN(RBREAK_TAG_STOP) {
3097 UNWIND_ENSURE(mrb, mrb->c->ci, mrb->c->ci->pc, RBREAK_TAG_STOP, mrb->c->ci, v);
3099 CHECKPOINT_END(RBREAK_TAG_STOP);
3100 mrb->jmp = prev_jmp;
3101 if (!mrb_nil_p(v)) {
3102 mrb->exc = mrb_obj_ptr(v);
3103 return v;
3105 mrb->exc = NULL;
3106 return regs[irep->nlocals];
3109 END_DISPATCH;
3110 #undef regs
3112 MRB_CATCH(&c_jmp) {
3113 mrb_callinfo *ci = mrb->c->ci;
3114 while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) {
3115 ci = cipop(mrb);
3117 pc = ci->pc;
3118 goto RETRY_TRY_BLOCK;
3120 MRB_END_EXC(&c_jmp);
3123 static mrb_value
3124 mrb_run(mrb_state *mrb, const struct RProc *proc, mrb_value self)
3126 return mrb_vm_run(mrb, proc, self, ci_bidx(mrb->c->ci) + 1);
3129 MRB_API mrb_value
3130 mrb_top_run(mrb_state *mrb, const struct RProc *proc, mrb_value self, mrb_int stack_keep)
3132 if (mrb->c->cibase && mrb->c->ci > mrb->c->cibase) {
3133 cipush(mrb, 0, CINFO_SKIP, mrb->object_class, NULL, NULL, 0, 0);
3135 return mrb_vm_run(mrb, proc, self, stack_keep);