2 ** vm.c - virtual machine for mruby
4 ** See Copyright Notice in mruby.h
8 #include <mruby/array.h>
9 #include <mruby/class.h>
10 #include <mruby/hash.h>
11 #include <mruby/irep.h>
12 #include <mruby/numeric.h>
13 #include <mruby/proc.h>
14 #include <mruby/range.h>
15 #include <mruby/string.h>
16 #include <mruby/variable.h>
17 #include <mruby/error.h>
18 #include <mruby/opcode.h>
19 #include "value_array.h"
20 #include <mruby/throw.h>
21 #include <mruby/dump.h>
22 #include <mruby/internal.h>
23 #include <mruby/presym.h>
26 #if defined(__cplusplus)
30 #if defined(__cplusplus)
35 #define STACK_INIT_SIZE 128
36 #define CALLINFO_INIT_SIZE 32
38 /* Define amount of linear stack growth. */
39 #ifndef MRB_STACK_GROWTH
40 #define MRB_STACK_GROWTH 128
43 /* Maximum recursive depth. Should be set lower on memory constrained systems. */
45 #if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
46 #define __SANITIZE_ADDRESS__
50 #ifndef MRB_CALL_LEVEL_MAX
51 #if defined(__SANITIZE_ADDRESS__)
52 #define MRB_CALL_LEVEL_MAX 128
54 #define MRB_CALL_LEVEL_MAX 512
58 /* Maximum stack depth. Should be set lower on memory constrained systems.
59 The value below allows about 60000 recursive calls in the simplest case. */
61 #define MRB_STACK_MAX (0x40000 - MRB_STACK_GROWTH)
71 #ifndef MRB_GC_FIXED_ARENA
73 mrb_gc_arena_shrink(mrb_state
*mrb
, int idx
)
75 mrb_gc
*gc
= &mrb
->gc
;
76 int capa
= gc
->arena_capa
;
81 if (capa
< MRB_GC_ARENA_SIZE
) {
82 capa
= MRB_GC_ARENA_SIZE
;
84 if (capa
!= gc
->arena_capa
) {
85 gc
->arena
= (struct RBasic
**)mrb_realloc(mrb
, gc
->arena
, sizeof(struct RBasic
*)*capa
);
86 gc
->arena_capa
= capa
;
91 #define mrb_gc_arena_shrink(mrb, idx) mrb_gc_arena_restore(mrb, idx)
94 #define CALL_MAXARGS 15
95 #define CALL_VARARGS (CALL_MAXARGS<<4 | CALL_MAXARGS)
98 stack_clear(mrb_value
*from
, size_t count
)
100 while (count
-- > 0) {
101 SET_NIL_VALUE(*from
);
107 stack_copy(mrb_value
*dst
, const mrb_value
*src
, size_t size
)
110 memcpy(dst
, src
, sizeof(mrb_value
)*size
);
114 stack_init(mrb_state
*mrb
)
116 struct mrb_context
*c
= mrb
->c
;
118 /* mrb_assert(mrb->stack == NULL); */
119 c
->stbase
= (mrb_value
*)mrb_malloc(mrb
, STACK_INIT_SIZE
* sizeof(mrb_value
));
120 c
->stend
= c
->stbase
+ STACK_INIT_SIZE
;
122 /* mrb_assert(ci == NULL); */
123 static const mrb_callinfo ci_zero
= { 0 };
124 c
->cibase
= (mrb_callinfo
*)mrb_malloc(mrb
, CALLINFO_INIT_SIZE
* sizeof(mrb_callinfo
));
125 c
->ciend
= c
->cibase
+ CALLINFO_INIT_SIZE
;
126 c
->cibase
[0] = ci_zero
;
128 c
->ci
->u
.target_class
= mrb
->object_class
;
129 c
->ci
->stack
= c
->stbase
;
133 envadjust(mrb_state
*mrb
, mrb_value
*oldbase
, mrb_value
*newbase
)
135 mrb_callinfo
*ci
= mrb
->c
->cibase
;
136 ptrdiff_t delta
= newbase
- oldbase
;
138 if (delta
== 0) return;
139 while (ci
<= mrb
->c
->ci
) {
140 struct REnv
*e
= mrb_vm_ci_env(ci
);
143 mrb_assert(e
->cxt
== mrb
->c
&& MRB_ENV_ONSTACK_P(e
));
144 mrb_assert(e
->stack
== ci
->stack
);
153 /** def rec; $deep =+ 1; if $deep > 1000; return 0; end; rec; end **/
156 stack_extend_alloc(mrb_state
*mrb
, mrb_int room
)
158 mrb_value
*oldbase
= mrb
->c
->stbase
;
160 size_t oldsize
= mrb
->c
->stend
- mrb
->c
->stbase
;
161 size_t size
= oldsize
;
162 size_t off
= mrb
->c
->ci
->stack
? mrb
->c
->stend
- mrb
->c
->ci
->stack
: 0;
164 if (off
> size
) size
= off
;
165 #ifdef MRB_STACK_EXTEND_DOUBLING
166 if ((size_t)room
<= size
)
171 /* Use linear stack growth.
172 It is slightly slower than doubling the stack space,
173 but it saves memory on small devices. */
174 if (room
<= MRB_STACK_GROWTH
)
175 size
+= MRB_STACK_GROWTH
;
180 newstack
= (mrb_value
*)mrb_realloc(mrb
, mrb
->c
->stbase
, sizeof(mrb_value
) * size
);
181 stack_clear(&(newstack
[oldsize
]), size
- oldsize
);
182 envadjust(mrb
, oldbase
, newstack
);
183 mrb
->c
->stbase
= newstack
;
184 mrb
->c
->stend
= mrb
->c
->stbase
+ size
;
186 /* Raise an exception if the new stack size will be too large,
187 to prevent infinite recursion. However, do this only after resizing the stack, so mrb_raise has stack space to work with. */
188 if (size
> MRB_STACK_MAX
) {
189 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->stack_err
));
194 stack_extend(mrb_state
*mrb
, mrb_int room
)
196 if (!mrb
->c
->ci
->stack
|| mrb
->c
->ci
->stack
+ room
>= mrb
->c
->stend
) {
197 stack_extend_alloc(mrb
, room
);
202 mrb_stack_extend(mrb_state
*mrb
, mrb_int room
)
204 stack_extend(mrb
, room
);
208 stack_extend_adjust(mrb_state
*mrb
, mrb_int room
, const mrb_value
**argp
)
210 const struct mrb_context
*c
= mrb
->c
;
211 ptrdiff_t voff
= *argp
- c
->stbase
;
213 if (voff
< 0 || voff
>= c
->stend
- c
->stbase
) {
214 stack_extend(mrb
, room
);
217 stack_extend(mrb
, room
);
218 *argp
= c
->stbase
+ voff
;
222 static inline struct REnv
*
223 uvenv(mrb_state
*mrb
, mrb_int up
)
225 const struct RProc
*proc
= mrb
->c
->ci
->proc
;
230 if (!proc
) return NULL
;
232 e
= MRB_PROC_ENV(proc
);
233 if (e
) return e
; /* proc has enclosed env */
237 static inline const struct RProc
*
238 top_proc(mrb_state
*mrb
, const struct RProc
*proc
)
240 while (proc
->upper
) {
241 if (MRB_PROC_SCOPE_P(proc
) || MRB_PROC_STRICT_P(proc
))
248 #define CI_PROC_SET(ci, p) do {\
250 mrb_assert(!p || !MRB_PROC_ALIAS_P(p));\
251 ci->pc = (p && !MRB_PROC_CFUNC_P(p) && p->body.irep) ? p->body.irep->iseq : NULL;\
255 mrb_vm_ci_proc_set(mrb_callinfo
*ci
, const struct RProc
*p
)
260 #define CI_TARGET_CLASS(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env->c : (ci)->u.target_class)
263 mrb_vm_ci_target_class(const mrb_callinfo
*ci
)
265 return CI_TARGET_CLASS(ci
);
269 mrb_vm_ci_target_class_set(mrb_callinfo
*ci
, struct RClass
*tc
)
271 struct REnv
*e
= ci
->u
.env
;
272 if (e
&& e
->tt
== MRB_TT_ENV
) {
276 ci
->u
.target_class
= tc
;
280 #define CI_ENV(ci) (((ci)->u.env && (ci)->u.env->tt == MRB_TT_ENV)? (ci)->u.env : NULL)
283 mrb_vm_ci_env(const mrb_callinfo
*ci
)
289 ci_env_set(mrb_callinfo
*ci
, struct REnv
*e
)
292 if (ci
->u
.env
->tt
== MRB_TT_ENV
) {
298 ci
->u
.target_class
= ci
->u
.env
->c
;
302 e
->c
= ci
->u
.target_class
;
312 mrb_vm_ci_env_set(mrb_callinfo
*ci
, struct REnv
*e
)
318 mrb_vm_ci_env_clear(mrb_state
*mrb
, mrb_callinfo
*ci
)
320 struct REnv
*e
= ci
->u
.env
;
321 if (e
&& e
->tt
== MRB_TT_ENV
) {
322 ci
->u
.target_class
= e
->c
;
323 mrb_env_unshare(mrb
, e
, FALSE
);
327 #define CINFO_NONE 0 // called method from mruby VM (without C functions)
328 #define CINFO_SKIP 1 // ignited mruby VM from C
329 #define CINFO_DIRECT 2 // called method from C
330 #define CINFO_RESUMED 3 // resumed by `Fiber.yield` (probably the main call is `mrb_fiber_resume()`)
332 #define BLK_PTR(b) ((mrb_proc_p(b)) ? mrb_proc_ptr(b) : NULL)
334 static inline mrb_callinfo
*
335 cipush(mrb_state
*mrb
, mrb_int push_stacks
, uint8_t cci
, struct RClass
*target_class
,
336 const struct RProc
*proc
, struct RProc
*blk
, mrb_sym mid
, uint16_t argc
)
338 struct mrb_context
*c
= mrb
->c
;
339 mrb_callinfo
*ci
= c
->ci
;
341 if (ci
+ 1 == c
->ciend
) {
342 ptrdiff_t size
= ci
- c
->cibase
;
344 if (size
> MRB_CALL_LEVEL_MAX
) {
345 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->stack_err
));
347 c
->cibase
= (mrb_callinfo
*)mrb_realloc(mrb
, c
->cibase
, sizeof(mrb_callinfo
)*size
*2);
348 c
->ci
= c
->cibase
+ size
;
349 c
->ciend
= c
->cibase
+ size
* 2;
353 if (blk
&& (blk
->flags
& (MRB_PROC_CFUNC_FL
| MRB_PROC_ENVSET
| MRB_PROC_ORPHAN
)) == MRB_PROC_ENVSET
&&
354 blk
->e
.env
== ci
[-1].u
.env
) {
355 mrb_assert(blk
->color
!= MRB_GC_RED
); // no exist red object with env set
356 ci
->flags
= MRB_CI_COMPANION_BLOCK
;
357 blk
->flags
|= MRB_PROC_ORPHAN
;
360 CI_PROC_SET(ci
, proc
);
361 ci
->stack
= ci
[-1].stack
+ push_stacks
;
363 ci
->nk
= (argc
>>4) & 0xf;
365 ci
->u
.target_class
= target_class
;
371 fiber_terminate(mrb_state
*mrb
, struct mrb_context
*c
, mrb_callinfo
*ci
)
373 mrb_assert(c
!= mrb
->root_c
);
375 struct REnv
*env
= CI_ENV(ci
);
376 mrb_assert(env
== NULL
|| MRB_ENV_LEN(env
) <= c
->stend
- ci
->stack
);
378 c
->status
= MRB_FIBER_TERMINATED
;
379 mrb_free(mrb
, c
->cibase
);
380 c
->cibase
= c
->ciend
= c
->ci
= NULL
;
381 mrb_value
*stack
= c
->stbase
;
382 c
->stbase
= c
->stend
= NULL
;
385 mrb_free(mrb
, stack
);
388 size_t len
= (size_t)MRB_ENV_LEN(env
);
392 mrb_free(mrb
, stack
);
395 mrb_assert(stack
== env
->stack
);
396 mrb_write_barrier(mrb
, (struct RBasic
*)env
);
398 // don't call MRB_ENV_CLOSE() before mrb_realloc().
399 // the reason is that env->stack may be freed by mrb_realloc() if MRB_DEBUG + MRB_GC_STRESS are enabled.
400 // realloc() on a freed heap will cause double-free.
402 stack
= (mrb_value
*)mrb_realloc(mrb
, stack
, len
* sizeof(mrb_value
));
403 if (mrb_object_dead_p(mrb
, (struct RBasic
*)env
)) {
404 mrb_free(mrb
, stack
);
413 /* fiber termination should automatic yield or transfer to root */
415 if (!mrb
->c
) mrb
->c
= mrb
->root_c
;
417 mrb
->c
->status
= MRB_FIBER_RUNNING
;
421 mrb_env_unshare(mrb_state
*mrb
, struct REnv
*e
, mrb_bool noraise
)
423 if (e
== NULL
) return TRUE
;
424 if (!MRB_ENV_ONSTACK_P(e
)) return TRUE
;
426 size_t len
= (size_t)MRB_ENV_LEN(e
);
433 size_t live
= mrb
->gc
.live
;
434 mrb_value
*p
= (mrb_value
*)mrb_malloc_simple(mrb
, sizeof(mrb_value
)*len
);
435 if (live
!= mrb
->gc
.live
&& mrb_object_dead_p(mrb
, (struct RBasic
*)e
)) {
436 // The e object is now subject to GC inside mrb_malloc_simple().
437 // Moreover, if NULL is returned due to mrb_malloc_simple() failure, simply ignore it.
442 stack_copy(p
, e
->stack
, len
);
445 mrb_write_barrier(mrb
, (struct RBasic
*)e
);
451 MRB_ENV_SET_LEN(e
, 0);
452 MRB_ENV_SET_BIDX(e
, 0);
454 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->nomem_err
));
460 static inline mrb_callinfo
*
461 cipop(mrb_state
*mrb
)
463 struct mrb_context
*c
= mrb
->c
;
464 mrb_callinfo
*ci
= c
->ci
;
465 struct REnv
*env
= CI_ENV(ci
);
467 ci_env_set(ci
, NULL
); // make possible to free env by GC if not needed
468 if (env
&& !mrb_env_unshare(mrb
, env
, TRUE
)) {
469 c
->ci
--; // exceptions are handled at the method caller; see #3087
470 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->nomem_err
));
477 mrb_protect_error(mrb_state
*mrb
, mrb_protect_error_func
*body
, void *userdata
, mrb_bool
*error
)
479 struct mrb_jmpbuf
*prev_jmp
= mrb
->jmp
;
480 struct mrb_jmpbuf c_jmp
;
482 int ai
= mrb_gc_arena_save(mrb
);
483 const struct mrb_context
*c
= mrb
->c
;
484 ptrdiff_t ci_index
= c
->ci
- c
->cibase
;
486 if (error
) { *error
= FALSE
; }
490 result
= body(mrb
, userdata
);
495 result
= mrb_obj_value(mrb
->exc
);
497 if (error
) { *error
= TRUE
; }
499 while (c
->ci
- c
->cibase
> ci_index
) {
504 // It was probably switched by mrb_fiber_resume().
505 // Simply destroy all successive CINFO_DIRECTs once the fiber has been switched.
507 while (c
->ci
> c
->cibase
&& c
->ci
->cci
== CINFO_DIRECT
) {
514 mrb_gc_arena_restore(mrb
, ai
);
515 mrb_gc_protect(mrb
, result
);
519 void mrb_exc_set(mrb_state
*mrb
, mrb_value exc
);
520 static mrb_value
mrb_run(mrb_state
*mrb
, const struct RProc
* proc
, mrb_value self
);
522 #ifndef MRB_FUNCALL_ARGC_MAX
523 #define MRB_FUNCALL_ARGC_MAX 16
527 mrb_funcall(mrb_state
*mrb
, mrb_value self
, const char *name
, mrb_int argc
, ...)
529 mrb_value argv
[MRB_FUNCALL_ARGC_MAX
];
531 mrb_sym mid
= mrb_intern_cstr(mrb
, name
);
533 if (argc
> MRB_FUNCALL_ARGC_MAX
) {
534 mrb_raise(mrb
, E_ARGUMENT_ERROR
, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX
) ")");
538 for (mrb_int i
= 0; i
< argc
; i
++) {
539 argv
[i
] = va_arg(ap
, mrb_value
);
542 return mrb_funcall_argv(mrb
, self
, mid
, argc
, argv
);
546 mrb_funcall_id(mrb_state
*mrb
, mrb_value self
, mrb_sym mid
, mrb_int argc
, ...)
548 mrb_value argv
[MRB_FUNCALL_ARGC_MAX
];
551 if (argc
> MRB_FUNCALL_ARGC_MAX
) {
552 mrb_raise(mrb
, E_ARGUMENT_ERROR
, "Too long arguments. (limit=" MRB_STRINGIZE(MRB_FUNCALL_ARGC_MAX
) ")");
556 for (mrb_int i
= 0; i
< argc
; i
++) {
557 argv
[i
] = va_arg(ap
, mrb_value
);
560 return mrb_funcall_argv(mrb
, self
, mid
, argc
, argv
);
564 mrb_ci_kidx(const mrb_callinfo
*ci
)
566 if (ci
->nk
== 0) return -1;
567 return (ci
->n
== CALL_MAXARGS
) ? 2 : ci
->n
+ 1;
570 static inline mrb_int
571 mrb_bidx(uint8_t n
, uint8_t k
)
576 return n
+ 1; /* self + args + kargs */
579 static inline mrb_int
580 ci_bidx(mrb_callinfo
*ci
)
582 return mrb_bidx(ci
->n
, ci
->nk
);
586 mrb_ci_bidx(mrb_callinfo
*ci
)
592 mrb_ci_nregs(mrb_callinfo
*ci
)
594 const struct RProc
*p
;
597 mrb_int nregs
= ci_bidx(ci
) + 1; /* self + args + kargs + blk */
599 if (p
&& !MRB_PROC_CFUNC_P(p
) && p
->body
.irep
&& p
->body
.irep
->nregs
> nregs
) {
600 return p
->body
.irep
->nregs
;
605 mrb_value
mrb_obj_missing(mrb_state
*mrb
, mrb_value mod
);
608 prepare_missing(mrb_state
*mrb
, mrb_callinfo
*ci
, mrb_value recv
, mrb_sym mid
, mrb_value blk
, mrb_bool super
)
610 mrb_sym missing
= MRB_SYM(method_missing
);
611 mrb_value
*argv
= &ci
->stack
[1];
615 /* pack positional arguments */
616 if (ci
->n
== 15) args
= argv
[0];
617 else args
= mrb_ary_new_from_values(mrb
, ci
->n
, argv
);
619 if (mrb_func_basic_p(mrb
, recv
, missing
, mrb_obj_missing
)) {
621 if (super
) mrb_no_method_error(mrb
, mid
, args
, "no superclass method '%n'", mid
);
622 else mrb_method_missing(mrb
, mid
, recv
, args
);
625 if (mid
!= missing
) {
626 ci
->u
.target_class
= mrb_class(mrb
, recv
);
628 m
= mrb_vm_find_method(mrb
, ci
->u
.target_class
, &ci
->u
.target_class
, missing
);
629 if (MRB_METHOD_UNDEF_P(m
)) goto method_missing
; /* just in case */
630 stack_extend(mrb
, 4);
632 argv
= &ci
->stack
[1]; /* maybe reallocated */
638 mrb_assert(ci
->nk
== 15);
639 argv
[1] = argv
[ci
->n
];
642 ci
->n
= CALL_MAXARGS
;
643 /* ci->nk is already set to zero or CALL_MAXARGS */
644 mrb_ary_unshift(mrb
, args
, mrb_symbol_value(mid
));
650 funcall_args_capture(mrb_state
*mrb
, int stoff
, mrb_int argc
, const mrb_value
*argv
, mrb_value block
, mrb_callinfo
*ci
)
652 if (argc
< 0 || argc
> INT32_MAX
) {
653 mrb_raisef(mrb
, E_ARGUMENT_ERROR
, "negative or too big argc for funcall (%i)", argc
);
656 ci
->nk
= 0; /* funcall does not support keyword arguments */
657 if (argc
< CALL_MAXARGS
) {
658 mrb_int extends
= stoff
+ argc
+ 2 /* self + block */;
659 stack_extend_adjust(mrb
, extends
, &argv
);
661 mrb_value
*args
= mrb
->c
->ci
->stack
+ stoff
+ 1 /* self */;
662 stack_copy(args
, argv
, argc
);
664 ci
->n
= (uint8_t)argc
;
667 int extends
= stoff
+ 3 /* self + splat + block */;
668 stack_extend_adjust(mrb
, extends
, &argv
);
670 mrb_value
*args
= mrb
->c
->ci
->stack
+ stoff
+ 1 /* self */;
671 args
[0] = mrb_ary_new_from_values(mrb
, argc
, argv
);
673 ci
->n
= CALL_MAXARGS
;
677 static inline mrb_value
678 ensure_block(mrb_state
*mrb
, mrb_value blk
)
680 if (!mrb_nil_p(blk
) && !mrb_proc_p(blk
)) {
681 blk
= mrb_type_convert(mrb
, blk
, MRB_TT_PROC
, MRB_SYM(to_proc
));
682 /* The stack might have been reallocated during mrb_type_convert(), see #3622 */
688 mrb_funcall_with_block(mrb_state
*mrb
, mrb_value self
, mrb_sym mid
, mrb_int argc
, const mrb_value
*argv
, mrb_value blk
)
691 int ai
= mrb_gc_arena_save(mrb
);
694 struct mrb_jmpbuf c_jmp
;
695 ptrdiff_t nth_ci
= mrb
->c
->ci
- mrb
->c
->cibase
;
700 val
= mrb_funcall_with_block(mrb
, self
, mid
, argc
, argv
, blk
);
703 MRB_CATCH(&c_jmp
) { /* error */
704 while (nth_ci
< (mrb
->c
->ci
- mrb
->c
->cibase
)) {
708 val
= mrb_obj_value(mrb
->exc
);
715 mrb_callinfo
*ci
= mrb
->c
->ci
;
716 mrb_int n
= mrb_ci_nregs(ci
);
718 if (!mrb
->c
->stbase
) {
721 if (ci
- mrb
->c
->cibase
> MRB_CALL_LEVEL_MAX
) {
722 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->stack_err
));
724 blk
= ensure_block(mrb
, blk
);
725 ci
= cipush(mrb
, n
, CINFO_DIRECT
, NULL
, NULL
, BLK_PTR(blk
), 0, 0);
726 funcall_args_capture(mrb
, 0, argc
, argv
, blk
, ci
);
727 ci
->u
.target_class
= mrb_class(mrb
, self
);
728 m
= mrb_vm_find_method(mrb
, ci
->u
.target_class
, &ci
->u
.target_class
, mid
);
729 if (MRB_METHOD_UNDEF_P(m
)) {
730 m
= prepare_missing(mrb
, ci
, self
, mid
, mrb_nil_value(), FALSE
);
735 ci
->proc
= MRB_METHOD_PROC_P(m
) ? MRB_METHOD_PROC(m
) : NULL
;
737 if (MRB_METHOD_CFUNC_P(m
)) {
739 val
= MRB_METHOD_CFUNC(m
)(mrb
, self
);
744 if (MRB_PROC_ALIAS_P(ci
->proc
)) {
745 ci
->mid
= ci
->proc
->body
.mid
;
746 ci
->proc
= ci
->proc
->upper
;
748 ci
->cci
= CINFO_SKIP
;
749 val
= mrb_run(mrb
, ci
->proc
, self
);
752 mrb_gc_arena_restore(mrb
, ai
);
753 mrb_gc_protect(mrb
, val
);
758 mrb_funcall_argv(mrb_state
*mrb
, mrb_value self
, mrb_sym mid
, mrb_int argc
, const mrb_value
*argv
)
760 return mrb_funcall_with_block(mrb
, self
, mid
, argc
, argv
, mrb_nil_value());
764 check_method_noarg(mrb_state
*mrb
, const mrb_callinfo
*ci
)
766 mrb_int argc
= ci
->n
== CALL_MAXARGS
? RARRAY_LEN(ci
->stack
[1]) : ci
->n
;
768 mrb_value kdict
= ci
->stack
[mrb_ci_kidx(ci
)];
769 if (!(mrb_hash_p(kdict
) && mrb_hash_empty_p(mrb
, kdict
))) {
774 mrb_argnum_error(mrb
, argc
, 0, 0);
779 exec_irep(mrb_state
*mrb
, mrb_value self
, const struct RProc
*p
)
781 mrb_callinfo
*ci
= mrb
->c
->ci
;
786 if (MRB_PROC_ALIAS_P(p
)) {
787 ci
->mid
= p
->body
.mid
;
791 if (MRB_PROC_CFUNC_P(p
)) {
792 if (MRB_PROC_NOARG_P(p
) && (ci
->n
> 0 || ci
->nk
> 0)) {
793 check_method_noarg(mrb
, ci
);
795 return MRB_PROC_CFUNC(p
)(mrb
, self
);
797 nregs
= p
->body
.irep
->nregs
;
798 keep
= ci_bidx(ci
)+1;
800 stack_extend(mrb
, keep
);
803 stack_extend(mrb
, nregs
);
804 stack_clear(ci
->stack
+keep
, nregs
-keep
);
807 cipush(mrb
, 0, 0, NULL
, NULL
, NULL
, 0, 0);
813 mrb_exec_irep(mrb_state
*mrb
, mrb_value self
, struct RProc
*p
)
815 mrb_callinfo
*ci
= mrb
->c
->ci
;
816 if (ci
->cci
== CINFO_NONE
) {
817 return exec_irep(mrb
, self
, p
);
821 if (MRB_PROC_CFUNC_P(p
)) {
822 if (MRB_PROC_NOARG_P(p
) && (ci
->n
> 0 || ci
->nk
> 0)) {
823 check_method_noarg(mrb
, ci
);
825 cipush(mrb
, 0, CINFO_DIRECT
, CI_TARGET_CLASS(ci
), p
, NULL
, ci
->mid
, ci
->n
|(ci
->nk
<<4));
826 ret
= MRB_PROC_CFUNC(p
)(mrb
, self
);
830 mrb_int keep
= ci_bidx(ci
) + 1; /* receiver + block */
831 ret
= mrb_top_run(mrb
, p
, self
, keep
);
833 if (mrb
->exc
&& mrb
->jmp
) {
834 mrb_exc_raise(mrb
, mrb_obj_value(mrb
->exc
));
844 * obj.send(symbol [, args...]) -> obj
845 * obj.__send__(symbol [, args...]) -> obj
847 * Invokes the method identified by _symbol_, passing it any
848 * arguments specified. You can use <code>__send__</code> if the name
849 * +send+ clashes with an existing method in _obj_.
853 * "Hello " + args.join(' ')
857 * k.send :hello, "gentle", "readers" #=> "Hello gentle readers"
860 mrb_f_send(mrb_state
*mrb
, mrb_value self
)
863 mrb_value block
, *regs
;
866 mrb_callinfo
*ci
= mrb
->c
->ci
;
869 if (ci
->cci
> CINFO_NONE
) {
871 const mrb_value
*argv
;
873 mrb_get_args(mrb
, "n*&", &name
, &argv
, &argc
, &block
);
874 return mrb_funcall_with_block(mrb
, self
, name
, argc
, argv
, block
);
877 regs
= mrb
->c
->ci
->stack
+1;
881 mrb_argnum_error(mrb
, 0, 1, -1);
884 if (RARRAY_LEN(regs
[0]) == 0) goto argnum_error
;
885 name
= mrb_obj_to_sym(mrb
, RARRAY_PTR(regs
[0])[0]);
888 name
= mrb_obj_to_sym(mrb
, regs
[0]);
891 c
= mrb_class(mrb
, self
);
892 m
= mrb_vm_find_method(mrb
, c
, &c
, name
);
893 if (MRB_METHOD_UNDEF_P(m
)) { /* call method_missing */
898 ci
->u
.target_class
= c
;
899 /* remove first symbol from arguments */
900 if (n
== 15) { /* variable length arguments */
901 regs
[0] = mrb_ary_subseq(mrb
, regs
[0], 1, RARRAY_LEN(regs
[0]) - 1);
904 for (int i
=0; i
<n
; i
++) {
907 regs
[n
] = regs
[n
+1]; /* copy kdict or block */
909 regs
[n
+1] = regs
[n
+2]; /* copy block */
914 const struct RProc
*p
;
915 if (MRB_METHOD_PROC_P(m
)) {
916 p
= MRB_METHOD_PROC(m
);
918 if (MRB_PROC_ALIAS_P(p
)) {
919 ci
->mid
= p
->body
.mid
;
924 if (MRB_METHOD_CFUNC_P(m
)) {
925 if (MRB_METHOD_NOARG_P(m
) && (ci
->n
> 0 || ci
->nk
> 0)) {
926 check_method_noarg(mrb
, ci
);
928 return MRB_METHOD_CFUNC(m
)(mrb
, self
);
930 return exec_irep(mrb
, self
, p
);
934 check_block(mrb_state
*mrb
, mrb_value blk
)
936 if (mrb_nil_p(blk
)) {
937 mrb_raise(mrb
, E_ARGUMENT_ERROR
, "no block given");
939 if (!mrb_proc_p(blk
)) {
940 mrb_raise(mrb
, E_TYPE_ERROR
, "not a block");
945 eval_under(mrb_state
*mrb
, mrb_value self
, mrb_value blk
, struct RClass
*c
)
951 check_block(mrb
, blk
);
953 if (ci
->cci
== CINFO_DIRECT
) {
954 return mrb_yield_with_class(mrb
, blk
, 1, &self
, self
, c
);
956 ci
->u
.target_class
= c
;
957 p
= mrb_proc_ptr(blk
);
958 /* just in case irep is NULL; #6065 */
959 if (p
->body
.irep
== NULL
) return mrb_nil_value();
963 ci
->mid
= ci
[-1].mid
;
964 if (MRB_PROC_CFUNC_P(p
)) {
965 stack_extend(mrb
, 4);
966 mrb
->c
->ci
->stack
[0] = self
;
967 mrb
->c
->ci
->stack
[1] = self
;
968 mrb
->c
->ci
->stack
[2] = mrb_nil_value();
969 return MRB_PROC_CFUNC(p
)(mrb
, self
);
971 nregs
= p
->body
.irep
->nregs
;
972 if (nregs
< 4) nregs
= 4;
973 stack_extend(mrb
, nregs
);
974 mrb
->c
->ci
->stack
[0] = self
;
975 mrb
->c
->ci
->stack
[1] = self
;
976 stack_clear(mrb
->c
->ci
->stack
+2, nregs
-2);
977 cipush(mrb
, 0, 0, NULL
, NULL
, NULL
, 0, 0);
985 * mod.class_eval {| | block } -> obj
986 * mod.module_eval {| | block } -> obj
988 * Evaluates block in the context of _mod_. This can
989 * be used to add methods to a class. <code>module_eval</code> returns
990 * the result of evaluating its argument.
993 mrb_mod_module_eval(mrb_state
*mrb
, mrb_value mod
)
997 if (mrb_get_args(mrb
, "|S&", &a
, &b
) == 1) {
998 mrb_raise(mrb
, E_NOTIMP_ERROR
, "module_eval/class_eval with string not implemented");
1000 return eval_under(mrb
, mod
, b
, mrb_class_ptr(mod
));
1006 * obj.instance_eval {| | block } -> obj
1008 * Evaluates the given block,within the context of the receiver (_obj_).
1009 * In order to set the context, the variable +self+ is set to _obj_ while
1010 * the code is executing, giving the code access to _obj_'s
1011 * instance variables. In the version of <code>instance_eval</code>
1012 * that takes a +String+, the optional second and third
1013 * parameters supply a filename and starting line number that are used
1014 * when reporting compilation errors.
1016 * class KlassWithSecret
1021 * k = KlassWithSecret.new
1022 * k.instance_eval { @secret } #=> 99
1025 mrb_obj_instance_eval(mrb_state
*mrb
, mrb_value self
)
1029 if (mrb_get_args(mrb
, "|S&", &a
, &b
) == 1) {
1030 mrb_raise(mrb
, E_NOTIMP_ERROR
, "instance_eval with string not implemented");
1032 return eval_under(mrb
, self
, b
, mrb_singleton_class_ptr(mrb
, self
));
1036 mrb_yield_with_class(mrb_state
*mrb
, mrb_value b
, mrb_int argc
, const mrb_value
*argv
, mrb_value self
, struct RClass
*c
)
1044 check_block(mrb
, b
);
1046 n
= mrb_ci_nregs(ci
);
1047 p
= mrb_proc_ptr(b
);
1048 if (MRB_PROC_ENV_P(p
)) {
1049 mid
= p
->e
.env
->mid
;
1054 ci
= cipush(mrb
, n
, CINFO_DIRECT
, NULL
, NULL
, NULL
, mid
, 0);
1055 funcall_args_capture(mrb
, 0, argc
, argv
, mrb_nil_value(), ci
);
1056 ci
->u
.target_class
= c
;
1059 if (MRB_PROC_CFUNC_P(p
)) {
1060 ci
->stack
[0] = self
;
1061 val
= MRB_PROC_CFUNC(p
)(mrb
, self
);
1065 ci
->cci
= CINFO_SKIP
;
1066 val
= mrb_run(mrb
, p
, self
);
1072 mrb_yield_argv(mrb_state
*mrb
, mrb_value b
, mrb_int argc
, const mrb_value
*argv
)
1074 struct RProc
*p
= mrb_proc_ptr(b
);
1076 mrb_value self
= mrb_proc_get_self(mrb
, p
, &tc
);
1078 return mrb_yield_with_class(mrb
, b
, argc
, argv
, self
, tc
);
1082 mrb_yield(mrb_state
*mrb
, mrb_value b
, mrb_value arg
)
1084 struct RProc
*p
= mrb_proc_ptr(b
);
1086 mrb_value self
= mrb_proc_get_self(mrb
, p
, &tc
);
1088 return mrb_yield_with_class(mrb
, b
, 1, &arg
, self
, tc
);
1092 mrb_yield_cont(mrb_state
*mrb
, mrb_value b
, mrb_value self
, mrb_int argc
, const mrb_value
*argv
)
1097 check_block(mrb
, b
);
1098 p
= mrb_proc_ptr(b
);
1101 stack_extend_adjust(mrb
, 4, &argv
);
1102 mrb
->c
->ci
->stack
[1] = mrb_ary_new_from_values(mrb
, argc
, argv
);
1103 mrb
->c
->ci
->stack
[2] = mrb_nil_value();
1104 mrb
->c
->ci
->stack
[3] = mrb_nil_value();
1107 return exec_irep(mrb
, self
, p
);
1110 #define RBREAK_TAG_FOREACH(f) \
1111 f(RBREAK_TAG_BREAK, 0) \
1112 f(RBREAK_TAG_JUMP, 1) \
1113 f(RBREAK_TAG_STOP, 2)
1115 #define RBREAK_TAG_DEFINE(tag, i) tag = i,
1117 RBREAK_TAG_FOREACH(RBREAK_TAG_DEFINE
)
1119 #undef RBREAK_TAG_DEFINE
1121 #define RBREAK_TAG_BIT 3
1122 #define RBREAK_TAG_BIT_OFF 8
1123 #define RBREAK_TAG_MASK (~(~UINT32_C(0) << RBREAK_TAG_BIT))
1125 static inline uint32_t
1126 mrb_break_tag_get(struct RBreak
*brk
)
1128 return (brk
->flags
>> RBREAK_TAG_BIT_OFF
) & RBREAK_TAG_MASK
;
1132 mrb_break_tag_set(struct RBreak
*brk
, uint32_t tag
)
1134 brk
->flags
&= ~(RBREAK_TAG_MASK
<< RBREAK_TAG_BIT_OFF
);
1135 brk
->flags
|= (tag
& RBREAK_TAG_MASK
) << RBREAK_TAG_BIT_OFF
;
1138 static struct RBreak
*
1139 break_new(mrb_state
*mrb
, uint32_t tag
, const mrb_callinfo
*return_ci
, mrb_value val
)
1141 mrb_assert((size_t)(return_ci
- mrb
->c
->cibase
) <= (size_t)(mrb
->c
->ci
- mrb
->c
->cibase
));
1143 struct RBreak
*brk
= MRB_OBJ_ALLOC(mrb
, MRB_TT_BREAK
, NULL
);
1144 brk
->ci_break_index
= return_ci
- mrb
->c
->cibase
;
1145 mrb_break_value_set(brk
, val
);
1146 mrb_break_tag_set(brk
, tag
);
1151 #define MRB_CATCH_FILTER_RESCUE (UINT32_C(1) << MRB_CATCH_RESCUE)
1152 #define MRB_CATCH_FILTER_ENSURE (UINT32_C(1) << MRB_CATCH_ENSURE)
1153 #define MRB_CATCH_FILTER_ALL (MRB_CATCH_FILTER_RESCUE | MRB_CATCH_FILTER_ENSURE)
1155 static const struct mrb_irep_catch_handler
*
1156 catch_handler_find(const mrb_irep
*irep
, const mrb_code
*pc
, uint32_t filter
)
1160 const struct mrb_irep_catch_handler
*e
;
1162 /* The comparison operators use `>` and `<=` because pc already points to the next instruction */
1163 #define catch_cover_p(pc, beg, end) ((pc) > (ptrdiff_t)(beg) && (pc) <= (ptrdiff_t)(end))
1165 mrb_assert(irep
&& irep
->clen
> 0);
1166 xpc
= pc
- irep
->iseq
;
1167 /* If it retry at the top level, pc will be 0, so check with -1 as the start position */
1168 mrb_assert(catch_cover_p(xpc
, -1, irep
->ilen
));
1169 if (!catch_cover_p(xpc
, -1, irep
->ilen
)) return NULL
;
1171 /* Currently uses a simple linear search to avoid processing complexity. */
1173 e
= mrb_irep_catch_handler_table(irep
) + cnt
- 1;
1174 for (; cnt
> 0; cnt
--, e
--) {
1175 if (((UINT32_C(1) << e
->type
) & filter
) &&
1176 catch_cover_p(xpc
, mrb_irep_catch_handler_unpack(e
->begin
), mrb_irep_catch_handler_unpack(e
->end
))) {
1181 #undef catch_cover_p
1187 LOCALJUMP_ERROR_RETURN
= 0,
1188 LOCALJUMP_ERROR_BREAK
= 1,
1189 LOCALJUMP_ERROR_YIELD
= 2
1190 } localjump_error_kind
;
1193 localjump_error(mrb_state
*mrb
, localjump_error_kind kind
)
1195 char kind_str
[3][7] = { "return", "break", "yield" };
1196 char kind_str_len
[] = { 6, 5, 5 };
1197 static const char lead
[] = "unexpected ";
1201 msg
= mrb_str_new_capa(mrb
, sizeof(lead
) + 7);
1202 mrb_str_cat(mrb
, msg
, lead
, sizeof(lead
) - 1);
1203 mrb_str_cat(mrb
, msg
, kind_str
[kind
], kind_str_len
[kind
]);
1204 exc
= mrb_exc_new_str(mrb
, E_LOCALJUMP_ERROR
, msg
);
1205 mrb_exc_set(mrb
, exc
);
1208 #define RAISE_EXC(mrb, exc) do { \
1209 mrb_value exc_value = (exc); \
1210 mrb_exc_set(mrb, exc_value); \
1214 #define RAISE_LIT(mrb, c, str) RAISE_EXC(mrb, mrb_exc_new_lit(mrb, c, str))
1215 #define RAISE_FORMAT(mrb, c, fmt, ...) RAISE_EXC(mrb, mrb_exc_new_str(mrb, c, mrb_format(mrb, fmt, __VA_ARGS__)))
1218 argnum_error(mrb_state
*mrb
, mrb_int num
)
1222 mrb_int argc
= mrb
->c
->ci
->n
;
1225 mrb_value args
= mrb
->c
->ci
->stack
[1];
1226 if (mrb_array_p(args
)) {
1227 argc
= RARRAY_LEN(args
);
1230 if (argc
== 0 && mrb
->c
->ci
->nk
!= 0 && !mrb_hash_empty_p(mrb
, mrb
->c
->ci
->stack
[1])) {
1233 str
= mrb_format(mrb
, "wrong number of arguments (given %i, expected %i)", argc
, num
);
1234 exc
= mrb_exc_new_str(mrb
, E_ARGUMENT_ERROR
, str
);
1235 mrb_exc_set(mrb
, exc
);
1239 break_tag_p(struct RBreak
*brk
, uint32_t tag
)
1241 return (brk
!= NULL
&& brk
->tt
== MRB_TT_BREAK
) ? TRUE
: FALSE
;
1245 prepare_tagged_break(mrb_state
*mrb
, uint32_t tag
, const mrb_callinfo
*return_ci
, mrb_value val
)
1247 if (break_tag_p((struct RBreak
*)mrb
->exc
, tag
)) {
1248 mrb_break_tag_set((struct RBreak
*)mrb
->exc
, tag
);
1251 mrb
->exc
= (struct RObject
*)break_new(mrb
, tag
, return_ci
, val
);
1255 #define THROW_TAGGED_BREAK(mrb, tag, return_ci, val) \
1257 prepare_tagged_break(mrb, tag, return_ci, val); \
1258 goto L_CATCH_TAGGED_BREAK; \
1261 #define UNWIND_ENSURE(mrb, ci, pc, tag, return_ci, val) \
1263 if ((proc = (ci)->proc) && !MRB_PROC_CFUNC_P(proc) && (irep = proc->body.irep) && irep->clen > 0 && \
1264 (ch = catch_handler_find(irep, pc, MRB_CATCH_FILTER_ENSURE))) { \
1265 THROW_TAGGED_BREAK(mrb, tag, return_ci, val); \
1270 * CHECKPOINT_RESTORE(tag) {
1271 * This part is executed when jumping by the same "tag" of RBreak (it is not executed the first time).
1272 * Write the code required (initialization of variables, etc.) for the subsequent processing.
1274 * CHECKPOINT_MAIN(tag) {
1275 * This part is always executed.
1277 * CHECKPOINT_END(tag);
1281 * // Jump to CHECKPOINT_RESTORE with the same "tag".
1282 * goto CHECKPOINT_LABEL_MAKE(tag);
1285 #define CHECKPOINT_LABEL_MAKE(tag) L_CHECKPOINT_ ## tag
1287 #define CHECKPOINT_RESTORE(tag) \
1290 CHECKPOINT_LABEL_MAKE(tag): \
1293 #define CHECKPOINT_MAIN(tag) \
1298 #define CHECKPOINT_END(tag) \
1302 #ifdef MRB_USE_DEBUG_HOOK
1303 #define CODE_FETCH_HOOK(mrb, irep, pc, regs) if ((mrb)->code_fetch_hook) (mrb)->code_fetch_hook((mrb), (irep), (pc), (regs));
1305 #define CODE_FETCH_HOOK(mrb, irep, pc, regs)
1308 #ifdef MRB_BYTECODE_DECODE_OPTION
1309 #define BYTECODE_DECODER(x) ((mrb)->bytecode_decoder)?(mrb)->bytecode_decoder((mrb), (x)):(x)
1311 #define BYTECODE_DECODER(x) (x)
1314 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1315 #if !defined __GNUC__ && !defined __clang__ && !defined __INTEL_COMPILER
1316 #define MRB_USE_VM_SWITCH_DISPATCH
1318 #endif /* ifndef MRB_USE_VM_SWITCH_DISPATCH */
1320 #ifdef MRB_USE_VM_SWITCH_DISPATCH
1322 #define INIT_DISPATCH for (;;) { insn = BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); switch (insn) {
1323 #define CASE(insn,ops) case insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1324 #define NEXT goto L_END_DISPATCH
1326 #define END_DISPATCH L_END_DISPATCH:;}}
1330 #define INIT_DISPATCH JUMP; return mrb_nil_value();
1331 #define CASE(insn,ops) L_ ## insn: pc++; FETCH_ ## ops (); mrb->c->ci->pc = pc; L_ ## insn ## _BODY:
1332 #define NEXT insn=BYTECODE_DECODER(*pc); CODE_FETCH_HOOK(mrb, irep, pc, regs); goto *optable[insn]
1335 #define END_DISPATCH
1340 mrb_vm_run(mrb_state
*mrb
, const struct RProc
*proc
, mrb_value self
, mrb_int stack_keep
)
1342 const mrb_irep
*irep
= proc
->body
.irep
;
1344 struct mrb_context
*c
= mrb
->c
;
1346 ptrdiff_t cioff
= c
->ci
- c
->cibase
;
1348 mrb_int nregs
= irep
->nregs
;
1353 if (stack_keep
> nregs
)
1356 struct REnv
*e
= CI_ENV(mrb
->c
->ci
);
1357 if (e
&& (stack_keep
== 0 || irep
->nlocals
< MRB_ENV_LEN(e
))) {
1358 ci_env_set(mrb
->c
->ci
, NULL
);
1359 mrb_env_unshare(mrb
, e
, FALSE
);
1362 stack_extend(mrb
, nregs
);
1363 stack_clear(c
->ci
->stack
+ stack_keep
, nregs
- stack_keep
);
1364 c
->ci
->stack
[0] = self
;
1365 result
= mrb_vm_exec(mrb
, proc
, irep
->iseq
);
1366 mrb_assert(mrb
->c
== c
); /* do not switch fibers via mrb_vm_run(), unlike mrb_vm_exec() */
1367 mrb_assert(c
->ci
== c
->cibase
|| (c
->ci
- c
->cibase
) == cioff
- 1);
1371 static struct RClass
*
1372 check_target_class(mrb_state
*mrb
)
1374 struct RClass
*target
= CI_TARGET_CLASS(mrb
->c
->ci
);
1376 mrb_raise(mrb
, E_TYPE_ERROR
, "no class/module to add method");
1381 #define regs (mrb->c->ci->stack)
1384 hash_new_from_regs(mrb_state
*mrb
, mrb_int argc
, mrb_int idx
)
1386 mrb_value hash
= mrb_hash_new_capa(mrb
, argc
);
1388 mrb_hash_set(mrb
, hash
, regs
[idx
+0], regs
[idx
+1]);
1394 #define ary_new_from_regs(mrb, argc, idx) mrb_ary_new_from_values(mrb, (argc), ®s[idx]);
1397 mrb_vm_exec(mrb_state
*mrb
, const struct RProc
*proc
, const mrb_code
*pc
)
1399 /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */
1400 const mrb_irep
*irep
= proc
->body
.irep
;
1401 const mrb_pool_value
*pool
= irep
->pool
;
1402 const mrb_sym
*syms
= irep
->syms
;
1404 int ai
= mrb_gc_arena_save(mrb
);
1405 struct mrb_jmpbuf
*prev_jmp
= mrb
->jmp
;
1406 struct mrb_jmpbuf c_jmp
;
1411 const struct mrb_irep_catch_handler
*ch
;
1413 #ifndef MRB_USE_VM_SWITCH_DISPATCH
1414 static const void * const optable
[] = {
1415 #define OPCODE(x,_) &&L_OP_ ## x,
1416 #include <mruby/ops.h>
1426 mrb_gc_arena_restore(mrb
, ai
);
1427 if (mrb
->exc
->tt
== MRB_TT_BREAK
)
1432 CI_PROC_SET(mrb
->c
->ci
, proc
);
1445 CASE(OP_LOADL
, BB
) {
1446 switch (pool
[b
].tt
) { /* number */
1448 regs
[a
] = mrb_int_value(mrb
, (mrb_int
)pool
[b
].u
.i32
);
1451 #if defined(MRB_INT64)
1452 regs
[a
] = mrb_int_value(mrb
, (mrb_int
)pool
[b
].u
.i64
);
1455 #if defined(MRB_64BIT)
1456 if (INT32_MIN
<= pool
[b
].u
.i64
&& pool
[b
].u
.i64
<= INT32_MAX
) {
1457 regs
[a
] = mrb_int_value(mrb
, (mrb_int
)pool
[b
].u
.i64
);
1461 goto L_INT_OVERFLOW
;
1463 case IREP_TT_BIGINT
:
1464 #ifdef MRB_USE_BIGINT
1466 const char *s
= pool
[b
].u
.str
;
1467 regs
[a
] = mrb_bint_new_str(mrb
, s
+2, (uint8_t)s
[0], s
[1]);
1471 goto L_INT_OVERFLOW
;
1473 #ifndef MRB_NO_FLOAT
1475 regs
[a
] = mrb_float_value(mrb
, pool
[b
].u
.f
);
1479 /* should not happen (tt:string) */
1480 regs
[a
] = mrb_nil_value();
1486 CASE(OP_LOADI
, BB
) {
1487 SET_FIXNUM_VALUE(regs
[a
], b
);
1491 CASE(OP_LOADINEG
, BB
) {
1492 SET_FIXNUM_VALUE(regs
[a
], -b
);
1496 CASE(OP_LOADI__1
,B
) goto L_LOADI
;
1497 CASE(OP_LOADI_0
,B
) goto L_LOADI
;
1498 CASE(OP_LOADI_1
,B
) goto L_LOADI
;
1499 CASE(OP_LOADI_2
,B
) goto L_LOADI
;
1500 CASE(OP_LOADI_3
,B
) goto L_LOADI
;
1501 CASE(OP_LOADI_4
,B
) goto L_LOADI
;
1502 CASE(OP_LOADI_5
,B
) goto L_LOADI
;
1503 CASE(OP_LOADI_6
,B
) goto L_LOADI
;
1504 CASE(OP_LOADI_7
, B
) {
1506 SET_FIXNUM_VALUE(regs
[a
], (mrb_int
)insn
- (mrb_int
)OP_LOADI_0
);
1510 CASE(OP_LOADI16
, BS
) {
1511 SET_FIXNUM_VALUE(regs
[a
], (mrb_int
)(int16_t)b
);
1515 CASE(OP_LOADI32
, BSS
) {
1516 SET_INT_VALUE(mrb
, regs
[a
], (int32_t)(((uint32_t)b
<<16)+c
));
1520 CASE(OP_LOADSYM
, BB
) {
1521 SET_SYM_VALUE(regs
[a
], syms
[b
]);
1525 CASE(OP_LOADNIL
, B
) {
1526 SET_NIL_VALUE(regs
[a
]);
1530 CASE(OP_LOADSELF
, B
) {
1536 SET_TRUE_VALUE(regs
[a
]);
1541 SET_FALSE_VALUE(regs
[a
]);
1545 CASE(OP_GETGV
, BB
) {
1546 mrb_value val
= mrb_gv_get(mrb
, syms
[b
]);
1551 CASE(OP_SETGV
, BB
) {
1552 mrb_gv_set(mrb
, syms
[b
], regs
[a
]);
1556 CASE(OP_GETSV
, BB
) {
1557 mrb_value val
= mrb_vm_special_get(mrb
, syms
[b
]);
1562 CASE(OP_SETSV
, BB
) {
1563 mrb_vm_special_set(mrb
, syms
[b
], regs
[a
]);
1567 CASE(OP_GETIV
, BB
) {
1568 regs
[a
] = mrb_iv_get(mrb
, regs
[0], syms
[b
]);
1572 CASE(OP_SETIV
, BB
) {
1573 mrb_iv_set(mrb
, regs
[0], syms
[b
], regs
[a
]);
1577 CASE(OP_GETCV
, BB
) {
1579 val
= mrb_vm_cv_get(mrb
, syms
[b
]);
1584 CASE(OP_SETCV
, BB
) {
1585 mrb_vm_cv_set(mrb
, syms
[b
], regs
[a
]);
1589 CASE(OP_GETIDX
, B
) {
1590 mrb_value va
= regs
[a
], vb
= regs
[a
+1];
1591 switch (mrb_type(va
)) {
1593 if (!mrb_integer_p(vb
)) goto getidx_fallback
;
1595 mrb_int idx
= mrb_integer(vb
);
1596 if (0 <= idx
&& idx
< RARRAY_LEN(va
)) {
1597 regs
[a
] = RARRAY_PTR(va
)[idx
];
1600 regs
[a
] = mrb_ary_entry(va
, idx
);
1605 va
= mrb_hash_get(mrb
, va
, vb
);
1609 switch (mrb_type(vb
)) {
1610 case MRB_TT_INTEGER
:
1613 va
= mrb_str_aref(mrb
, va
, vb
, mrb_undef_value());
1617 goto getidx_fallback
;
1622 mid
= MRB_OPSYM(aref
);
1628 CASE(OP_SETIDX
, B
) {
1630 mid
= MRB_OPSYM(aset
);
1631 SET_NIL_VALUE(regs
[a
+3]);
1635 CASE(OP_GETCONST
, BB
) {
1636 mrb_value v
= mrb_vm_const_get(mrb
, syms
[b
]);
1641 CASE(OP_SETCONST
, BB
) {
1642 mrb_vm_const_set(mrb
, syms
[b
], regs
[a
]);
1646 CASE(OP_GETMCNST
, BB
) {
1647 mrb_value v
= mrb_const_get(mrb
, regs
[a
], syms
[b
]);
1652 CASE(OP_SETMCNST
, BB
) {
1653 mrb_const_set(mrb
, regs
[a
+1], syms
[b
], regs
[a
]);
1657 CASE(OP_GETUPVAR
, BBB
) {
1658 struct REnv
*e
= uvenv(mrb
, c
);
1660 if (e
&& b
< MRB_ENV_LEN(e
)) {
1661 regs
[a
] = e
->stack
[b
];
1664 regs
[a
] = mrb_nil_value();
1669 CASE(OP_SETUPVAR
, BBB
) {
1670 struct REnv
*e
= uvenv(mrb
, c
);
1673 if (b
< MRB_ENV_LEN(e
)) {
1674 e
->stack
[b
] = regs
[a
];
1675 mrb_write_barrier(mrb
, (struct RBasic
*)e
);
1685 CASE(OP_JMPIF
, BS
) {
1686 if (mrb_test(regs
[a
])) {
1692 CASE(OP_JMPNOT
, BS
) {
1693 if (!mrb_test(regs
[a
])) {
1699 CASE(OP_JMPNIL
, BS
) {
1700 if (mrb_nil_p(regs
[a
])) {
1708 a
= (uint32_t)((pc
- irep
->iseq
) + (int16_t)a
);
1709 CHECKPOINT_RESTORE(RBREAK_TAG_JUMP
) {
1710 struct RBreak
*brk
= (struct RBreak
*)mrb
->exc
;
1711 mrb_value target
= mrb_break_value_get(brk
);
1712 mrb_assert(mrb_integer_p(target
));
1713 a
= (uint32_t)mrb_integer(target
);
1714 mrb_assert(a
>= 0 && a
< irep
->ilen
);
1716 CHECKPOINT_MAIN(RBREAK_TAG_JUMP
) {
1717 if (irep
->clen
> 0 &&
1718 (ch
= catch_handler_find(irep
, pc
, MRB_CATCH_FILTER_ENSURE
))) {
1719 /* avoiding a jump from a catch handler into the same handler */
1720 if (a
< mrb_irep_catch_handler_unpack(ch
->begin
) || a
>= mrb_irep_catch_handler_unpack(ch
->end
)) {
1721 THROW_TAGGED_BREAK(mrb
, RBREAK_TAG_JUMP
, mrb
->c
->ci
, mrb_fixnum_value(a
));
1725 CHECKPOINT_END(RBREAK_TAG_JUMP
);
1727 mrb
->exc
= NULL
; /* clear break object */
1728 pc
= irep
->iseq
+ a
;
1732 CASE(OP_EXCEPT
, B
) {
1735 if (mrb
->exc
== NULL
) {
1736 exc
= mrb_nil_value();
1739 switch (mrb
->exc
->tt
) {
1741 case MRB_TT_EXCEPTION
:
1742 exc
= mrb_obj_value(mrb
->exc
);
1745 mrb_assert(!"bad mrb_type");
1746 exc
= mrb_nil_value();
1754 CASE(OP_RESCUE
, BB
) {
1755 mrb_value exc
= regs
[a
]; /* exc on stack */
1756 mrb_value e
= regs
[b
];
1759 switch (mrb_type(e
)) {
1764 RAISE_LIT(mrb
, E_TYPE_ERROR
, "class or module required for rescue clause");
1766 ec
= mrb_class_ptr(e
);
1767 regs
[b
] = mrb_bool_value(mrb_obj_is_kind_of(mrb
, exc
, ec
));
1771 CASE(OP_RAISEIF
, B
) {
1774 if (mrb_nil_p(exc
)) {
1777 else if (mrb_break_p(exc
)) {
1779 mrb
->exc
= mrb_obj_ptr(exc
);
1781 brk
= (struct RBreak
*)mrb
->exc
;
1782 switch (mrb_break_tag_get(brk
)) {
1783 #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n);
1784 RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS
)
1785 #undef DISPATCH_CHECKPOINTS
1787 mrb_assert(!"wrong break tag");
1792 mrb_exc_set(mrb
, exc
);
1795 while (!(proc
= ci
->proc
) || MRB_PROC_CFUNC_P(ci
->proc
) || !(irep
= proc
->body
.irep
) || irep
->clen
< 1 ||
1796 (ch
= catch_handler_find(irep
, ci
->pc
, MRB_CATCH_FILTER_ALL
)) == NULL
) {
1797 if (ci
!= mrb
->c
->cibase
) {
1799 if (ci
[1].cci
== CINFO_SKIP
) {
1800 mrb_assert(prev_jmp
!= NULL
);
1801 mrb
->jmp
= prev_jmp
;
1802 MRB_THROW(prev_jmp
);
1805 else if (mrb
->c
== mrb
->root_c
) {
1806 mrb
->c
->ci
->stack
= mrb
->c
->stbase
;
1807 mrb
->jmp
= prev_jmp
;
1808 return mrb_obj_value(mrb
->exc
);
1811 struct mrb_context
*c
= mrb
->c
;
1813 fiber_terminate(mrb
, c
, ci
);
1814 if (!c
->vmexec
) goto L_RAISE
;
1815 mrb
->jmp
= prev_jmp
;
1816 if (!prev_jmp
) return mrb_obj_value(mrb
->exc
);
1817 MRB_THROW(prev_jmp
);
1822 L_CATCH_TAGGED_BREAK
: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */
1826 irep
= proc
->body
.irep
;
1829 stack_extend(mrb
, irep
->nregs
);
1830 pc
= irep
->iseq
+ mrb_irep_catch_handler_unpack(ch
->target
);
1835 CASE(OP_SSEND
, BBB
) {
1841 CASE(OP_SSENDB
, BBB
) {
1851 /* push nil after arguments */
1852 SET_NIL_VALUE(regs
[a
+2]);
1862 mrb_value recv
, blk
;
1864 int nk
= (c
>>4)&0xf;
1865 mrb_int bidx
= a
+ mrb_bidx(n
,nk
);
1866 mrb_int new_bidx
= bidx
;
1868 if (nk
== CALL_MAXARGS
) {
1869 mrb_ensure_hash_type(mrb
, regs
[a
+(n
==CALL_MAXARGS
?1:n
)+1]);
1871 else if (nk
> 0) { /* pack keyword arguments */
1872 mrb_int kidx
= a
+(n
==CALL_MAXARGS
?1:n
)+1;
1873 mrb_value kdict
= hash_new_from_regs(mrb
, nk
, kidx
);
1877 new_bidx
= a
+mrb_bidx(n
, nk
);
1880 mrb_assert(bidx
< irep
->nregs
);
1881 if (insn
== OP_SEND
) {
1882 /* clear block argument */
1883 SET_NIL_VALUE(regs
[new_bidx
]);
1887 blk
= ensure_block(mrb
, regs
[bidx
]);
1888 regs
[new_bidx
] = blk
;
1891 ci
= cipush(mrb
, a
, CINFO_DIRECT
, NULL
, NULL
, BLK_PTR(blk
), 0, c
);
1893 ci
->u
.target_class
= (insn
== OP_SUPER
) ? CI_TARGET_CLASS(ci
- 1)->super
: mrb_class(mrb
, recv
);
1894 m
= mrb_vm_find_method(mrb
, ci
->u
.target_class
, &ci
->u
.target_class
, mid
);
1895 if (MRB_METHOD_UNDEF_P(m
)) {
1896 m
= prepare_missing(mrb
, ci
, recv
, mid
, blk
, (insn
== OP_SUPER
));
1901 ci
->cci
= CINFO_NONE
;
1903 if (MRB_METHOD_PROC_P(m
)) {
1904 const struct RProc
*p
= MRB_METHOD_PROC(m
);
1906 if (MRB_PROC_ALIAS_P(p
)) {
1907 ci
->mid
= p
->body
.mid
;
1911 if (!MRB_PROC_CFUNC_P(p
)) {
1912 /* setup environment for calling method */
1914 irep
= proc
->body
.irep
;
1917 stack_extend(mrb
, (irep
->nregs
< 4) ? 4 : irep
->nregs
);
1922 if (MRB_PROC_NOARG_P(p
) && (ci
->n
> 0 || ci
->nk
> 0)) {
1923 check_method_noarg(mrb
, ci
);
1925 recv
= MRB_PROC_CFUNC(p
)(mrb
, recv
);
1929 if (MRB_METHOD_NOARG_P(m
) && (ci
->n
> 0 || ci
->nk
> 0)) {
1930 check_method_noarg(mrb
, ci
);
1932 recv
= MRB_METHOD_FUNC(m
)(mrb
, recv
);
1935 /* cfunc epilogue */
1936 mrb_gc_arena_shrink(mrb
, ai
);
1937 if (mrb
->exc
) goto L_RAISE
;
1939 if (!ci
->u
.keep_context
) { /* return from context modifying method (resume/yield) */
1940 if (ci
->cci
== CINFO_RESUMED
) {
1941 mrb
->jmp
= prev_jmp
;
1945 mrb_assert(!MRB_PROC_CFUNC_P(ci
[-1].proc
));
1947 irep
= proc
->body
.irep
;
1952 mrb_assert(ci
> mrb
->c
->cibase
);
1953 ci
->stack
[0] = recv
;
1961 mrb_callinfo
*ci
= mrb
->c
->ci
;
1962 mrb_value recv
= ci
->stack
[0];
1963 const struct RProc
*p
= mrb_proc_ptr(recv
);
1966 if (MRB_PROC_ALIAS_P(p
)) {
1967 ci
->mid
= p
->body
.mid
;
1970 else if (MRB_PROC_ENV_P(p
)) {
1971 ci
->mid
= MRB_PROC_ENV(p
)->mid
;
1973 /* replace callinfo */
1974 ci
->u
.target_class
= MRB_PROC_TARGET_CLASS(p
);
1978 if (MRB_PROC_CFUNC_P(p
)) {
1979 recv
= MRB_PROC_CFUNC(p
)(mrb
, recv
);
1980 mrb_gc_arena_shrink(mrb
, ai
);
1981 if (mrb
->exc
) goto L_RAISE
;
1985 ci
[1].stack
[0] = recv
;
1986 irep
= mrb
->c
->ci
->proc
->body
.irep
;
1989 /* setup environment for calling method */
1991 irep
= p
->body
.irep
;
1993 mrb
->c
->ci
->stack
[0] = mrb_nil_value();
1995 goto L_OP_RETURN_BODY
;
1997 mrb_int nargs
= ci_bidx(ci
)+1;
1998 if (nargs
< irep
->nregs
) {
1999 stack_extend(mrb
, irep
->nregs
);
2000 stack_clear(regs
+nargs
, irep
->nregs
-nargs
);
2002 if (MRB_PROC_ENV_P(p
)) {
2003 regs
[0] = MRB_PROC_ENV(p
)->stack
[0];
2012 CASE(OP_SUPER
, BB
) {
2013 mrb_callinfo
*ci
= mrb
->c
->ci
;
2015 struct RClass
* target_class
= CI_TARGET_CLASS(ci
);
2018 if (mid
== 0 || !target_class
) {
2019 RAISE_LIT(mrb
, E_NOMETHOD_ERROR
, "super called outside of method");
2021 if ((target_class
->flags
& MRB_FL_CLASS_IS_PREPENDED
) || target_class
->tt
== MRB_TT_MODULE
) {
2022 goto super_typeerror
;
2025 if (!mrb_obj_is_kind_of(mrb
, recv
, target_class
)) {
2027 RAISE_LIT(mrb
, E_TYPE_ERROR
, "self has wrong type to call super in this context");
2035 CASE(OP_ARGARY
, BS
) {
2036 mrb_int m1
= (b
>>11)&0x3f;
2037 mrb_int r
= (b
>>10)&0x1;
2038 mrb_int m2
= (b
>>5)&0x1f;
2039 mrb_int kd
= (b
>>4)&0x1;
2040 mrb_int lv
= (b
>>0)&0xf;
2043 if (mrb
->c
->ci
->mid
== 0 || CI_TARGET_CLASS(mrb
->c
->ci
) == NULL
) {
2045 RAISE_LIT(mrb
, E_NOMETHOD_ERROR
, "super called outside of method");
2047 if (lv
== 0) stack
= regs
+ 1;
2049 struct REnv
*e
= uvenv(mrb
, lv
-1);
2050 if (!e
) goto L_NOSUPER
;
2051 if (MRB_ENV_LEN(e
) <= m1
+r
+m2
+1)
2053 stack
= e
->stack
+ 1;
2056 regs
[a
] = mrb_ary_new_from_values(mrb
, m1
+m2
, stack
);
2059 mrb_value
*pp
= NULL
;
2060 struct RArray
*rest
;
2063 if (mrb_array_p(stack
[m1
])) {
2064 struct RArray
*ary
= mrb_ary_ptr(stack
[m1
]);
2069 regs
[a
] = mrb_ary_new_capa(mrb
, m1
+len
+m2
);
2070 rest
= mrb_ary_ptr(regs
[a
]);
2072 stack_copy(ARY_PTR(rest
), stack
, m1
);
2075 stack_copy(ARY_PTR(rest
)+m1
, pp
, len
);
2078 stack_copy(ARY_PTR(rest
)+m1
+len
, stack
+m1
+1, m2
);
2080 ARY_SET_LEN(rest
, m1
+len
+m2
);
2083 regs
[a
+1] = stack
[m1
+r
+m2
];
2084 regs
[a
+2] = stack
[m1
+r
+m2
+1];
2087 regs
[a
+1] = stack
[m1
+r
+m2
];
2089 mrb_gc_arena_restore(mrb
, ai
);
2094 mrb_callinfo
*ci
= mrb
->c
->ci
;
2095 mrb_int argc
= ci
->n
;
2096 mrb_value
*argv
= regs
+1;
2098 mrb_int m1
= MRB_ASPEC_REQ(a
);
2101 if ((a
& ~0x7c0001) == 0 && argc
< 15 && MRB_PROC_STRICT_P(proc
)) {
2102 if (argc
+(ci
->nk
==15) != m1
) { /* count kdict too */
2103 argnum_error(mrb
, m1
);
2106 /* clear local (but non-argument) variables */
2107 mrb_int pos
= m1
+2; /* self+m1+blk */
2108 if (irep
->nlocals
-pos
> 0) {
2109 stack_clear(®s
[pos
], irep
->nlocals
-pos
);
2114 mrb_int o
= MRB_ASPEC_OPT(a
);
2115 mrb_int r
= MRB_ASPEC_REST(a
);
2116 mrb_int m2
= MRB_ASPEC_POST(a
);
2117 mrb_int kd
= (MRB_ASPEC_KEY(a
) > 0 || MRB_ASPEC_KDICT(a
))? 1 : 0;
2119 int b = MRB_ASPEC_BLOCK(a);
2121 mrb_int
const len
= m1
+ o
+ r
+ m2
;
2123 mrb_value
* const argv0
= argv
;
2124 mrb_value blk
= regs
[ci_bidx(ci
)];
2125 mrb_value kdict
= mrb_nil_value();
2127 /* keyword arguments */
2129 kdict
= regs
[mrb_ci_kidx(ci
)];
2132 if (!mrb_nil_p(kdict
) && mrb_hash_size(mrb
, kdict
) > 0) {
2135 argc
++; /* include kdict in normal arguments */
2137 else if (argc
== 14) {
2138 /* pack arguments and kdict */
2139 regs
[1] = ary_new_from_regs(mrb
, argc
+1, 1);
2142 else {/* argc == 15 */
2143 /* push kdict to packed arguments */
2144 mrb_ary_push(mrb
, regs
[1], kdict
);
2147 kdict
= mrb_nil_value();
2150 else if (MRB_ASPEC_KEY(a
) > 0 && !mrb_nil_p(kdict
)) {
2151 kdict
= mrb_hash_dup(mrb
, kdict
);
2153 else if (!mrb_nil_p(kdict
)) {
2154 mrb_gc_protect(mrb
, kdict
);
2157 /* arguments is passed with Array */
2159 struct RArray
*ary
= mrb_ary_ptr(regs
[1]);
2160 argv
= ARY_PTR(ary
);
2161 argc
= (int)ARY_LEN(ary
);
2162 mrb_gc_protect(mrb
, regs
[1]);
2165 /* strict argument check */
2166 if (ci
->proc
&& MRB_PROC_STRICT_P(ci
->proc
)) {
2167 if (argc
< m1
+ m2
|| (r
== 0 && argc
> len
)) {
2168 argnum_error(mrb
, m1
+m2
);
2172 /* extract first argument array to arguments */
2173 else if (len
> 1 && argc
== 1 && mrb_array_p(argv
[0])) {
2174 mrb_gc_protect(mrb
, argv
[0]);
2175 argc
= (int)RARRAY_LEN(argv
[0]);
2176 argv
= RARRAY_PTR(argv
[0]);
2179 /* rest arguments */
2184 mlen
= m1
< argc
? argc
- m1
: 0;
2187 /* copy mandatory and optional arguments */
2188 if (argv0
!= argv
&& argv
) {
2189 value_move(®s
[1], argv
, argc
-mlen
); /* m1 + o */
2192 stack_clear(®s
[argc
+1], m1
-argc
);
2194 /* copy post mandatory arguments */
2196 value_move(®s
[len
-m2
+1], &argv
[argc
-mlen
], mlen
);
2199 stack_clear(®s
[len
-m2
+mlen
+1], m2
-mlen
);
2201 /* initialize rest arguments with empty Array */
2203 rest
= mrb_ary_new_capa(mrb
, 0);
2204 regs
[m1
+o
+1] = rest
;
2206 /* skip initializer of passed arguments */
2207 if (o
> 0 && argc
> m1
+m2
)
2208 pc
+= (argc
- m1
- m2
)*3;
2212 if (argv0
!= argv
) {
2213 mrb_gc_protect(mrb
, blk
);
2214 value_move(®s
[1], argv
, m1
+o
);
2217 rnum
= argc
-m1
-o
-m2
;
2218 rest
= mrb_ary_new_from_values(mrb
, rnum
, argv
+m1
+o
);
2219 regs
[m1
+o
+1] = rest
;
2221 if (m2
> 0 && argc
-m2
> m1
) {
2222 value_move(®s
[m1
+o
+r
+1], &argv
[m1
+o
+rnum
], m2
);
2227 /* need to be update blk first to protect blk from GC */
2228 mrb_int
const kw_pos
= len
+ kd
; /* where kwhash should be */
2229 mrb_int
const blk_pos
= kw_pos
+ 1; /* where block should be */
2230 regs
[blk_pos
] = blk
; /* move block */
2232 if (mrb_nil_p(kdict
)) {
2233 kdict
= mrb_hash_new_capa(mrb
, 0);
2235 regs
[kw_pos
] = kdict
; /* set kwhash */
2239 /* format arguments for generated code */
2240 mrb
->c
->ci
->n
= (uint8_t)len
;
2242 /* clear local (but non-argument) variables */
2243 if (irep
->nlocals
-blk_pos
-1 > 0) {
2244 stack_clear(®s
[blk_pos
+1], irep
->nlocals
-blk_pos
-1);
2250 mrb_value k
= mrb_symbol_value(syms
[b
]);
2251 mrb_int kidx
= mrb_ci_kidx(mrb
->c
->ci
);
2254 if (kidx
< 0 || !mrb_hash_p(kdict
=regs
[kidx
]) || !mrb_hash_key_p(mrb
, kdict
, k
)) {
2255 RAISE_FORMAT(mrb
, E_ARGUMENT_ERROR
, "missing keyword: %v", k
);
2257 v
= mrb_hash_get(mrb
, kdict
, k
);
2259 mrb_hash_delete_key(mrb
, kdict
, k
);
2263 CASE(OP_KEY_P
, BB
) {
2264 mrb_value k
= mrb_symbol_value(syms
[b
]);
2265 mrb_int kidx
= mrb_ci_kidx(mrb
->c
->ci
);
2267 mrb_bool key_p
= FALSE
;
2269 if (kidx
>= 0 && mrb_hash_p(kdict
=regs
[kidx
])) {
2270 key_p
= mrb_hash_key_p(mrb
, kdict
, k
);
2272 regs
[a
] = mrb_bool_value(key_p
);
2276 CASE(OP_KEYEND
, Z
) {
2277 mrb_int kidx
= mrb_ci_kidx(mrb
->c
->ci
);
2280 if (kidx
>= 0 && mrb_hash_p(kdict
=regs
[kidx
]) && !mrb_hash_empty_p(mrb
, kdict
)) {
2281 mrb_value keys
= mrb_hash_keys(mrb
, kdict
);
2282 mrb_value key1
= RARRAY_PTR(keys
)[0];
2283 RAISE_FORMAT(mrb
, E_ARGUMENT_ERROR
, "unknown keyword: %v", key1
);
2293 if (MRB_PROC_STRICT_P(proc
)) goto NORMAL_RETURN
;
2294 if (!MRB_PROC_ENV_P(proc
)) {
2296 RAISE_LIT(mrb
, E_LOCALJUMP_ERROR
, "break from proc-closure");
2299 struct REnv
*e
= MRB_PROC_ENV(proc
);
2301 if (e
->cxt
!= mrb
->c
) {
2305 mrb_callinfo
*ci
= mrb
->c
->ci
;
2307 while (mrb
->c
->cibase
< ci
&& ci
[-1].proc
!= proc
) {
2310 if (ci
== mrb
->c
->cibase
|| !(ci
->flags
& MRB_CI_COMPANION_BLOCK
)) {
2313 c
= a
; // release the "a" variable, which can handle 32-bit values
2314 a
= ci
- mrb
->c
->cibase
;
2317 CASE(OP_RETURN_BLK
, B
) {
2322 mrb_callinfo
*ci
= mrb
->c
->ci
;
2324 if (!MRB_PROC_ENV_P(proc
) || MRB_PROC_STRICT_P(proc
)) {
2328 const struct RProc
*dst
;
2329 mrb_callinfo
*cibase
;
2330 cibase
= mrb
->c
->cibase
;
2331 dst
= top_proc(mrb
, proc
);
2333 if (MRB_PROC_ENV_P(dst
)) {
2334 struct REnv
*e
= MRB_PROC_ENV(dst
);
2336 if (e
->cxt
!= mrb
->c
) {
2337 localjump_error(mrb
, LOCALJUMP_ERROR_RETURN
);
2341 /* check jump destination */
2342 while (cibase
<= ci
&& ci
->proc
!= dst
) {
2345 if (ci
<= cibase
) { /* no jump destination */
2346 localjump_error(mrb
, LOCALJUMP_ERROR_RETURN
);
2349 c
= a
; // release the "a" variable, which can handle 32-bit values
2350 a
= ci
- mrb
->c
->cibase
;
2353 CASE(OP_RETURN
, B
) {
2366 mrb_gc_protect(mrb
, v
);
2367 CHECKPOINT_RESTORE(RBREAK_TAG_BREAK
) {
2369 struct RBreak
*brk
= (struct RBreak
*)mrb
->exc
;
2370 ci
= &mrb
->c
->cibase
[brk
->ci_break_index
];
2371 v
= mrb_break_value_get(brk
);
2374 L_UNWINDING
: // for a check on the role of `a` and `c`, see `goto L_UNWINDING`
2375 ci
= mrb
->c
->cibase
+ a
;
2378 mrb_gc_protect(mrb
, v
);
2380 CHECKPOINT_MAIN(RBREAK_TAG_BREAK
) {
2382 UNWIND_ENSURE(mrb
, mrb
->c
->ci
, mrb
->c
->ci
->pc
, RBREAK_TAG_BREAK
, ci
, v
);
2384 if (mrb
->c
->ci
== ci
) {
2388 if (mrb
->c
->ci
[1].cci
!= CINFO_NONE
) {
2389 mrb_assert(prev_jmp
!= NULL
);
2390 mrb
->exc
= (struct RObject
*)break_new(mrb
, RBREAK_TAG_BREAK
, ci
, v
);
2391 mrb_gc_arena_restore(mrb
, ai
);
2392 mrb
->c
->vmexec
= FALSE
;
2393 mrb
->jmp
= prev_jmp
;
2394 MRB_THROW(prev_jmp
);
2398 CHECKPOINT_END(RBREAK_TAG_BREAK
);
2399 mrb
->exc
= NULL
; /* clear break object */
2401 if (ci
== mrb
->c
->cibase
) {
2402 struct mrb_context
*c
= mrb
->c
;
2403 if (c
== mrb
->root_c
) {
2404 /* toplevel return */
2405 mrb_gc_arena_restore(mrb
, ai
);
2406 mrb
->jmp
= prev_jmp
;
2410 fiber_terminate(mrb
, c
, ci
);
2412 (mrb
->c
== mrb
->root_c
&& mrb
->c
->ci
== mrb
->c
->cibase
) /* case using Fiber#transfer in mrb_fiber_resume() */) {
2413 mrb_gc_arena_restore(mrb
, ai
);
2415 mrb
->jmp
= prev_jmp
;
2421 if (mrb
->c
->vmexec
&& !ci
->u
.keep_context
) {
2422 mrb_gc_arena_restore(mrb
, ai
);
2423 mrb
->c
->vmexec
= FALSE
;
2424 mrb
->jmp
= prev_jmp
;
2429 if (acc
== CINFO_SKIP
|| acc
== CINFO_DIRECT
) {
2430 mrb_gc_arena_restore(mrb
, ai
);
2431 mrb
->jmp
= prev_jmp
;
2435 DEBUG(fprintf(stderr
, "from :%s\n", mrb_sym_name(mrb
, ci
->mid
)));
2437 irep
= proc
->body
.irep
;
2442 mrb_gc_arena_restore(mrb
, ai
);
2447 CASE(OP_BLKPUSH
, BS
) {
2448 int m1
= (b
>>11)&0x3f;
2449 int r
= (b
>>10)&0x1;
2450 int m2
= (b
>>5)&0x1f;
2451 int kd
= (b
>>4)&0x1;
2452 int lv
= (b
>>0)&0xf;
2455 if (lv
== 0) stack
= regs
+ 1;
2457 struct REnv
*e
= uvenv(mrb
, lv
-1);
2458 if (!e
|| (!MRB_ENV_ONSTACK_P(e
) && e
->mid
== 0) ||
2459 MRB_ENV_LEN(e
) <= m1
+r
+m2
+1) {
2460 localjump_error(mrb
, LOCALJUMP_ERROR_YIELD
);
2463 stack
= e
->stack
+ 1;
2465 if (mrb_nil_p(stack
[m1
+r
+m2
+kd
])) {
2466 localjump_error(mrb
, LOCALJUMP_ERROR_YIELD
);
2469 regs
[a
] = stack
[m1
+r
+m2
+kd
];
2473 #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32)
2475 RAISE_LIT(mrb
, E_RANGE_ERROR
, "integer overflow");
2478 #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff))
2479 #define OP_MATH(op_name) \
2480 /* need to check if op is overridden */ \
2481 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \
2482 OP_MATH_CASE_INTEGER(op_name); \
2483 OP_MATH_CASE_FLOAT(op_name, integer, float); \
2484 OP_MATH_CASE_FLOAT(op_name, float, integer); \
2485 OP_MATH_CASE_FLOAT(op_name, float, float); \
2486 OP_MATH_CASE_STRING_##op_name(); \
2488 mid = MRB_OPSYM(op_name); \
2492 #define OP_MATH_CASE_INTEGER(op_name) \
2493 case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \
2495 mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \
2496 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2497 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2500 SET_INT_VALUE(mrb,regs[a], z); \
2504 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0
2506 #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \
2507 case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \
2509 mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \
2510 SET_FLOAT_VALUE(mrb, regs[a], z); \
2514 #ifdef MRB_USE_BIGINT
2515 #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y)
2517 #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW
2519 #define OP_MATH_CASE_STRING_add() \
2520 case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \
2521 regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \
2522 mrb_gc_arena_restore(mrb, ai); \
2524 #define OP_MATH_CASE_STRING_sub() (void)0
2525 #define OP_MATH_CASE_STRING_mul() (void)0
2526 #define OP_MATH_OP_add +
2527 #define OP_MATH_OP_sub -
2528 #define OP_MATH_OP_mul *
2529 #define OP_MATH_TT_integer MRB_TT_INTEGER
2530 #define OP_MATH_TT_float MRB_TT_FLOAT
2545 #ifndef MRB_NO_FLOAT
2549 /* need to check if op is overridden */
2550 switch (TYPES2(mrb_type(regs
[a
]),mrb_type(regs
[a
+1]))) {
2551 case TYPES2(MRB_TT_INTEGER
,MRB_TT_INTEGER
):
2553 mrb_int x
= mrb_integer(regs
[a
]);
2554 mrb_int y
= mrb_integer(regs
[a
+1]);
2555 regs
[a
] = mrb_div_int_value(mrb
, x
, y
);
2558 #ifndef MRB_NO_FLOAT
2559 case TYPES2(MRB_TT_INTEGER
,MRB_TT_FLOAT
):
2560 x
= (mrb_float
)mrb_integer(regs
[a
]);
2561 y
= mrb_float(regs
[a
+1]);
2563 case TYPES2(MRB_TT_FLOAT
,MRB_TT_INTEGER
):
2564 x
= mrb_float(regs
[a
]);
2565 y
= (mrb_float
)mrb_integer(regs
[a
+1]);
2567 case TYPES2(MRB_TT_FLOAT
,MRB_TT_FLOAT
):
2568 x
= mrb_float(regs
[a
]);
2569 y
= mrb_float(regs
[a
+1]);
2573 mid
= MRB_OPSYM(div
);
2577 #ifndef MRB_NO_FLOAT
2578 f
= mrb_div_float(x
, y
);
2579 SET_FLOAT_VALUE(mrb
, regs
[a
], f
);
2584 #define OP_MATHI(op_name) \
2585 /* need to check if op is overridden */ \
2586 switch (mrb_type(regs[a])) { \
2587 OP_MATHI_CASE_INTEGER(op_name); \
2588 OP_MATHI_CASE_FLOAT(op_name); \
2590 SET_INT_VALUE(mrb,regs[a+1], b); \
2591 mid = MRB_OPSYM(op_name); \
2595 #define OP_MATHI_CASE_INTEGER(op_name) \
2596 case MRB_TT_INTEGER: \
2598 mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \
2599 if (mrb_int_##op_name##_overflow(x, y, &z)) { \
2600 OP_MATH_OVERFLOW_INT(op_name,x,y); \
2603 SET_INT_VALUE(mrb,regs[a], z); \
2607 #define OP_MATHI_CASE_FLOAT(op_name) (void)0
2609 #define OP_MATHI_CASE_FLOAT(op_name) \
2610 case MRB_TT_FLOAT: \
2612 mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \
2613 SET_FLOAT_VALUE(mrb, regs[a], z); \
2626 #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1]))
2629 #define OP_CMP(op,sym) do {\
2631 /* need to check if - is overridden */\
2632 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2633 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2634 result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\
2637 mid = MRB_OPSYM(sym);\
2641 SET_TRUE_VALUE(regs[a]);\
2644 SET_FALSE_VALUE(regs[a]);\
2648 #define OP_CMP(op, sym) do {\
2650 /* need to check if - is overridden */\
2651 switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\
2652 case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\
2653 result = OP_CMP_BODY(op,mrb_integer,mrb_integer);\
2655 case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\
2656 result = OP_CMP_BODY(op,mrb_integer,mrb_float);\
2658 case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\
2659 result = OP_CMP_BODY(op,mrb_float,mrb_integer);\
2661 case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\
2662 result = OP_CMP_BODY(op,mrb_float,mrb_float);\
2665 mid = MRB_OPSYM(sym);\
2669 SET_TRUE_VALUE(regs[a]);\
2672 SET_FALSE_VALUE(regs[a]);\
2678 if (mrb_obj_eq(mrb
, regs
[a
], regs
[a
+1])) {
2679 SET_TRUE_VALUE(regs
[a
]);
2681 else if (mrb_symbol_p(regs
[a
])) {
2682 SET_FALSE_VALUE(regs
[a
]);
2710 CASE(OP_ARRAY
, BB
) {
2711 regs
[a
] = ary_new_from_regs(mrb
, b
, a
);
2712 mrb_gc_arena_restore(mrb
, ai
);
2715 CASE(OP_ARRAY2
, BBB
) {
2716 regs
[a
] = ary_new_from_regs(mrb
, c
, b
);
2717 mrb_gc_arena_restore(mrb
, ai
);
2721 CASE(OP_ARYCAT
, B
) {
2722 mrb_value splat
= mrb_ary_splat(mrb
, regs
[a
+1]);
2723 if (mrb_nil_p(regs
[a
])) {
2727 mrb_assert(mrb_array_p(regs
[a
]));
2728 mrb_ary_concat(mrb
, regs
[a
], splat
);
2730 mrb_gc_arena_restore(mrb
, ai
);
2734 CASE(OP_ARYPUSH
, BB
) {
2735 mrb_assert(mrb_array_p(regs
[a
]));
2736 for (mrb_int i
=0; i
<b
; i
++) {
2737 mrb_ary_push(mrb
, regs
[a
], regs
[a
+i
+1]);
2742 CASE(OP_ARYSPLAT
, B
) {
2743 mrb_value ary
= mrb_ary_splat(mrb
, regs
[a
]);
2745 mrb_gc_arena_restore(mrb
, ai
);
2749 CASE(OP_AREF
, BBB
) {
2750 mrb_value v
= regs
[b
];
2752 if (!mrb_array_p(v
)) {
2757 SET_NIL_VALUE(regs
[a
]);
2761 v
= mrb_ary_ref(mrb
, v
, c
);
2767 CASE(OP_ASET
, BBB
) {
2768 mrb_assert(mrb_array_p(regs
[a
]));
2769 mrb_ary_set(mrb
, regs
[b
], c
, regs
[a
]);
2773 CASE(OP_APOST
, BBB
) {
2774 mrb_value v
= regs
[a
];
2780 if (!mrb_array_p(v
)) {
2781 v
= ary_new_from_regs(mrb
, 1, a
);
2783 ary
= mrb_ary_ptr(v
);
2784 len
= (int)ARY_LEN(ary
);
2785 if (len
> pre
+ post
) {
2786 v
= mrb_ary_new_from_values(mrb
, len
- pre
- post
, ARY_PTR(ary
)+pre
);
2789 regs
[a
++] = ARY_PTR(ary
)[len
-post
-1];
2793 v
= mrb_ary_new_capa(mrb
, 0);
2795 for (idx
=0; idx
+pre
<len
; idx
++) {
2796 regs
[a
+idx
] = ARY_PTR(ary
)[pre
+idx
];
2798 while (idx
< post
) {
2799 SET_NIL_VALUE(regs
[a
+idx
]);
2803 mrb_gc_arena_restore(mrb
, ai
);
2807 CASE(OP_INTERN
, B
) {
2808 mrb_assert(mrb_string_p(regs
[a
]));
2809 mrb_sym sym
= mrb_intern_str(mrb
, regs
[a
]);
2810 regs
[a
] = mrb_symbol_value(sym
);
2814 CASE(OP_SYMBOL
, BB
) {
2818 mrb_assert((pool
[b
].tt
&IREP_TT_NFLAG
)==0);
2819 len
= pool
[b
].tt
>> 2;
2820 if (pool
[b
].tt
& IREP_TT_SFLAG
) {
2821 sym
= mrb_intern_static(mrb
, pool
[b
].u
.str
, len
);
2824 sym
= mrb_intern(mrb
, pool
[b
].u
.str
, len
);
2826 regs
[a
] = mrb_symbol_value(sym
);
2830 CASE(OP_STRING
, BB
) {
2833 mrb_assert((pool
[b
].tt
&IREP_TT_NFLAG
)==0);
2834 len
= pool
[b
].tt
>> 2;
2835 if (pool
[b
].tt
& IREP_TT_SFLAG
) {
2836 regs
[a
] = mrb_str_new_static(mrb
, pool
[b
].u
.str
, len
);
2839 regs
[a
] = mrb_str_new(mrb
, pool
[b
].u
.str
, len
);
2841 mrb_gc_arena_restore(mrb
, ai
);
2845 CASE(OP_STRCAT
, B
) {
2846 mrb_assert(mrb_string_p(regs
[a
]));
2847 mrb_str_concat(mrb
, regs
[a
], regs
[a
+1]);
2852 mrb_value hash
= mrb_hash_new_capa(mrb
, b
);
2855 for (int i
=a
; i
<lim
; i
+=2) {
2856 mrb_hash_set(mrb
, hash
, regs
[i
], regs
[i
+1]);
2859 mrb_gc_arena_restore(mrb
, ai
);
2863 CASE(OP_HASHADD
, BB
) {
2868 mrb_ensure_hash_type(mrb
, hash
);
2869 for (int i
=a
+1; i
<lim
; i
+=2) {
2870 mrb_hash_set(mrb
, hash
, regs
[i
], regs
[i
+1]);
2872 mrb_gc_arena_restore(mrb
, ai
);
2875 CASE(OP_HASHCAT
, B
) {
2876 mrb_value hash
= regs
[a
];
2878 mrb_assert(mrb_hash_p(hash
));
2879 mrb_hash_merge(mrb
, hash
, regs
[a
+1]);
2880 mrb_gc_arena_restore(mrb
, ai
);
2889 const mrb_irep
*nirep
= irep
->reps
[b
];
2891 if (c
& OP_L_CAPTURE
) {
2892 p
= mrb_closure_new(mrb
, nirep
);
2895 p
= mrb_proc_new(mrb
, nirep
);
2896 p
->flags
|= MRB_PROC_SCOPE
;
2898 if (c
& OP_L_STRICT
) p
->flags
|= MRB_PROC_STRICT
;
2899 regs
[a
] = mrb_obj_value(p
);
2900 mrb_gc_arena_restore(mrb
, ai
);
2903 CASE(OP_BLOCK
, BB
) {
2907 CASE(OP_METHOD
, BB
) {
2912 CASE(OP_RANGE_INC
, B
) {
2913 mrb_value v
= mrb_range_new(mrb
, regs
[a
], regs
[a
+1], FALSE
);
2915 mrb_gc_arena_restore(mrb
, ai
);
2919 CASE(OP_RANGE_EXC
, B
) {
2920 mrb_value v
= mrb_range_new(mrb
, regs
[a
], regs
[a
+1], TRUE
);
2922 mrb_gc_arena_restore(mrb
, ai
);
2926 CASE(OP_OCLASS
, B
) {
2927 regs
[a
] = mrb_obj_value(mrb
->object_class
);
2931 CASE(OP_CLASS
, BB
) {
2932 struct RClass
*c
= 0, *baseclass
;
2933 mrb_value base
, super
;
2934 mrb_sym id
= syms
[b
];
2938 if (mrb_nil_p(base
)) {
2939 baseclass
= MRB_PROC_TARGET_CLASS(mrb
->c
->ci
->proc
);
2940 if (!baseclass
) baseclass
= mrb
->object_class
;
2941 base
= mrb_obj_value(baseclass
);
2943 c
= mrb_vm_define_class(mrb
, base
, super
, id
);
2944 regs
[a
] = mrb_obj_value(c
);
2945 mrb_gc_arena_restore(mrb
, ai
);
2949 CASE(OP_MODULE
, BB
) {
2950 struct RClass
*cls
= 0, *baseclass
;
2952 mrb_sym id
= syms
[b
];
2955 if (mrb_nil_p(base
)) {
2956 baseclass
= MRB_PROC_TARGET_CLASS(mrb
->c
->ci
->proc
);
2957 if (!baseclass
) baseclass
= mrb
->object_class
;
2958 base
= mrb_obj_value(baseclass
);
2960 cls
= mrb_vm_define_module(mrb
, base
, id
);
2961 regs
[a
] = mrb_obj_value(cls
);
2962 mrb_gc_arena_restore(mrb
, ai
);
2968 mrb_value recv
= regs
[a
];
2970 const mrb_irep
*nirep
= irep
->reps
[b
];
2972 /* prepare closure */
2973 p
= mrb_proc_new(mrb
, nirep
);
2975 mrb_field_write_barrier(mrb
, (struct RBasic
*)p
, (struct RBasic
*)proc
);
2976 MRB_PROC_SET_TARGET_CLASS(p
, mrb_class_ptr(recv
));
2977 p
->flags
|= MRB_PROC_SCOPE
;
2979 /* prepare call stack */
2980 cipush(mrb
, a
, 0, mrb_class_ptr(recv
), p
, NULL
, 0, 0);
2982 irep
= p
->body
.irep
;
2985 stack_extend(mrb
, irep
->nregs
);
2986 stack_clear(regs
+1, irep
->nregs
-1);
2992 struct RClass
*target
= mrb_class_ptr(regs
[a
]);
2993 struct RProc
*p
= mrb_proc_ptr(regs
[a
+1]);
2995 mrb_sym mid
= syms
[b
];
2997 MRB_METHOD_FROM_PROC(m
, p
);
2998 mrb_define_method_raw(mrb
, target
, mid
, m
);
2999 mrb_method_added(mrb
, target
, mid
);
3000 mrb_gc_arena_restore(mrb
, ai
);
3001 regs
[a
] = mrb_symbol_value(mid
);
3005 CASE(OP_SCLASS
, B
) {
3006 regs
[a
] = mrb_singleton_class(mrb
, regs
[a
]);
3007 mrb_gc_arena_restore(mrb
, ai
);
3011 CASE(OP_TCLASS
, B
) {
3012 struct RClass
*target
= check_target_class(mrb
);
3013 if (!target
) goto L_RAISE
;
3014 regs
[a
] = mrb_obj_value(target
);
3018 CASE(OP_ALIAS
, BB
) {
3019 struct RClass
*target
= check_target_class(mrb
);
3021 if (!target
) goto L_RAISE
;
3022 mrb_alias_method(mrb
, target
, syms
[a
], syms
[b
]);
3023 mrb_method_added(mrb
, target
, syms
[a
]);
3027 struct RClass
*target
= check_target_class(mrb
);
3029 if (!target
) goto L_RAISE
;
3030 mrb_undef_method_id(mrb
, target
, syms
[a
]);
3036 #ifdef MRB_USE_DEBUG_HOOK
3037 mrb
->debug_op_hook(mrb
, irep
, pc
, regs
);
3039 #ifndef MRB_NO_STDIO
3040 printf("OP_DEBUG %d %d %d\n", a
, b
, c
);
3049 size_t len
= pool
[a
].tt
>> 2;
3052 mrb_assert((pool
[a
].tt
&IREP_TT_NFLAG
)==0);
3053 exc
= mrb_exc_new(mrb
, E_LOCALJUMP_ERROR
, pool
[a
].u
.str
, len
);
3054 RAISE_EXC(mrb
, exc
);
3060 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3061 #include <mruby/ops.h>
3070 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3071 #include <mruby/ops.h>
3080 #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY;
3081 #include <mruby/ops.h>
3091 v
= mrb
->exc
? mrb_obj_value(mrb
->exc
) : mrb_nil_value();
3092 CHECKPOINT_RESTORE(RBREAK_TAG_STOP
) {
3093 struct RBreak
*brk
= (struct RBreak
*)mrb
->exc
;
3094 v
= mrb_break_value_get(brk
);
3096 CHECKPOINT_MAIN(RBREAK_TAG_STOP
) {
3097 UNWIND_ENSURE(mrb
, mrb
->c
->ci
, mrb
->c
->ci
->pc
, RBREAK_TAG_STOP
, mrb
->c
->ci
, v
);
3099 CHECKPOINT_END(RBREAK_TAG_STOP
);
3100 mrb
->jmp
= prev_jmp
;
3101 if (!mrb_nil_p(v
)) {
3102 mrb
->exc
= mrb_obj_ptr(v
);
3106 return regs
[irep
->nlocals
];
3113 mrb_callinfo
*ci
= mrb
->c
->ci
;
3114 while (ci
> mrb
->c
->cibase
&& ci
->cci
== CINFO_DIRECT
) {
3118 goto RETRY_TRY_BLOCK
;
3120 MRB_END_EXC(&c_jmp
);
3124 mrb_run(mrb_state
*mrb
, const struct RProc
*proc
, mrb_value self
)
3126 return mrb_vm_run(mrb
, proc
, self
, ci_bidx(mrb
->c
->ci
) + 1);
3130 mrb_top_run(mrb_state
*mrb
, const struct RProc
*proc
, mrb_value self
, mrb_int stack_keep
)
3132 if (mrb
->c
->cibase
&& mrb
->c
->ci
> mrb
->c
->cibase
) {
3133 cipush(mrb
, 0, CINFO_SKIP
, mrb
->object_class
, NULL
, NULL
, 0, 0);
3135 return mrb_vm_run(mrb
, proc
, self
, stack_keep
);