z, ? | toggle help (this) |
space, → | next slide |
shift-space, ← | previous slide |
d | toggle debug mode |
## <ret> | go to slide # |
r | reload slides |
n | toggle notes |
Hash[*1380888.times.map{ 1 }]
puts(*1380888.times)
def a(*ary)
end
a(*1380888.times)
object_id(*1380888.times)
$ ruby18 -ve 'object_id(*1380888.times)'
ruby 1.8.7 (2013-06-27 patchlevel 374) [x86_64-openbsd]
Illegal instruction (core dumped)
$ ruby18 -e 'def a(*a) end; a(*1380888.times); p 1'
Illegal instruction (core dumped)
$ ruby19 -ve 'object_id(*1380888.times)'
ruby 1.9.3p551 (2014-11-13 revision 48407) [x86_64-openbsd]
-e:1: stack level too deep (SystemStackError)
$ ruby19 -e 'def a(*a) end; a(*1380888.times); p 1'
-e:1: stack level too deep (SystemStackError)
$ ruby22 -ve 'object_id(*1380888.times)'
ruby 2.2.10p489 (2018-03-28 revision 63023) [x86_64-openbsd]
-e:1: stack level too deep (SystemStackError)
$ ruby22 -e 'def a(*a) end; a(*1380888.times); p 1'
1
$ ruby32 -ve 'object_id(*1380888.times)'
ruby 3.2.1 (2023-02-08 revision 31819e82c8) [x86_64-openbsd]
-e:1: stack level too deep (SystemStackError)
$ ruby32 -e 'def a(*a) end; a(*1380888.times); p 1'
1
$ ruby33 -ve 'object_id(*1380888.times)'
ruby 3.3.0dev (2023-02-09T20:02:26Z master d620855101) [x86_64-openbsd]
-e:1:in `object_id': wrong number of arguments (given 1380888, expected 0) (ArgumentError)
object_id(*1380888.times)
^^^^^^^^^^^^^^
from -e:1:in `<main>'
$ ruby33 -e 'def a(*a) end; a(*1380888.times); p 1'
1
$ ruby33 -e 'p Hash[*1380888.times.map{ 1 }]'
{1=>1}
$ gdb --args ruby -e itself
(gdb)
$ gdb --args ruby -e itself
(gdb) break rb_obj_itself
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
$ git grep \"SystemStackError\"
$ git grep \"SystemStackError\"
proc.c: rb_eSysStackError = rb_define_class("SystemStackError", rb_eException);
NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
static void
ec_stack_overflow(rb_execution_context_t *ec, int setup)
{
VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
ec->raised_flag = RAISED_STACKOVERFLOW;
if (setup) {
VALUE at = rb_ec_backtrace_object(ec);
mesg = ruby_vm_special_exception_copy(mesg);
rb_ivar_set(mesg, idBt, at);
rb_ivar_set(mesg, idBt_locations, at);
}
ec->errinfo = mesg;
EC_JUMP_TAG(ec, TAG_RAISE);
}
NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
static void
ec_stack_overflow(rb_execution_context_t *ec, int setup)
{
VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
ec->raised_flag = RAISED_STACKOVERFLOW;
if (setup) {
VALUE at = rb_ec_backtrace_object(ec);
mesg = ruby_vm_special_exception_copy(mesg);
rb_ivar_set(mesg, idBt, at);
rb_ivar_set(mesg, idBt_locations, at);
}
ec->errinfo = mesg;
EC_JUMP_TAG(ec, TAG_RAISE);
}
$ gdb --args ruby -e 'itself(*1380888.times)'
(gdb) break ec_stack_overflow
(gdb) backtrace
#0 ec_stack_overflow at ./vm_insnhelper.c:61
#1 vm_stackoverflow at ./vm_insnhelper.c:81
#2 vm_caller_setup_arg_splat at ./vm_args.c:795
#3 CALLER_SETUP_ARG at ./vm_insnhelper.c:2545
#4 vm_call_cfunc at ./vm_insnhelper.c:3286
#5 vm_call_method_each_type at ./vm_insnhelper.c:3920
#6 vm_call_method at ./vm_insnhelper.c:4044
#7 vm_call_general at ./vm_insnhelper.c:4088
#8 vm_sendish at ./vm_insnhelper.c:5080
#9 vm_exec_core at insns.def:820
#10 rb_vm_exec at vm.c:2374
#11 rb_iseq_eval_main at vm.c:2633
#12 rb_ec_exec_node at eval.c:289
#13 ruby_run_node at eval.c:330
#14 rb_main at ./main.c:38
#15 main at ./main.c:57
(gdb) backtrace
#0 ec_stack_overflow at ./vm_insnhelper.c:61
#1 vm_stackoverflow at ./vm_insnhelper.c:81
#2 vm_caller_setup_arg_splat at ./vm_args.c:795
#3 CALLER_SETUP_ARG at ./vm_insnhelper.c:2545
#4 vm_call_cfunc at ./vm_insnhelper.c:3286
#5 vm_call_method_each_type at ./vm_insnhelper.c:3920
#6 vm_call_method at ./vm_insnhelper.c:4044
#7 vm_call_general at ./vm_insnhelper.c:4088
#8 vm_sendish at ./vm_insnhelper.c:5080
#9 vm_exec_core at insns.def:820
#10 rb_vm_exec at vm.c:2374
#11 rb_iseq_eval_main at vm.c:2633
#12 rb_ec_exec_node at eval.c:289
#13 ruby_run_node at eval.c:330
#14 rb_main at ./main.c:38
#15 main at ./main.c:57
(gdb) backtrace
#0 ec_stack_overflow at ./vm_insnhelper.c:61
#1 vm_stackoverflow at ./vm_insnhelper.c:81
#2 vm_caller_setup_arg_splat at ./vm_args.c:795
#3 CALLER_SETUP_ARG at ./vm_insnhelper.c:2545
#4 vm_call_cfunc at ./vm_insnhelper.c:3286
#5 vm_call_method_each_type at ./vm_insnhelper.c:3920
#6 vm_call_method at ./vm_insnhelper.c:4044
#7 vm_call_general at ./vm_insnhelper.c:4088
#8 vm_sendish at ./vm_insnhelper.c:5080
#9 vm_exec_core at insns.def:820
#10 rb_vm_exec at vm.c:2374
#11 rb_iseq_eval_main at vm.c:2633
#12 rb_ec_exec_node at eval.c:289
#13 ruby_run_node at eval.c:330
#14 rb_main at ./main.c:38
#15 main at ./main.c:57
(gdb) backtrace
#0 ec_stack_overflow at ./vm_insnhelper.c:61
#1 vm_stackoverflow at ./vm_insnhelper.c:81
#2 vm_caller_setup_arg_splat at ./vm_args.c:795
#3 CALLER_SETUP_ARG at ./vm_insnhelper.c:2545
#4 vm_call_cfunc at ./vm_insnhelper.c:3286
#5 vm_call_method_each_type at ./vm_insnhelper.c:3920
#6 vm_call_method at ./vm_insnhelper.c:4044
#7 vm_call_general at ./vm_insnhelper.c:4088
#8 vm_sendish at ./vm_insnhelper.c:5080
#9 vm_exec_core at insns.def:820
#10 rb_vm_exec at vm.c:2374
#11 rb_iseq_eval_main at vm.c:2633
#12 rb_ec_exec_node at eval.c:289
#13 ruby_run_node at eval.c:330
#14 rb_main at ./main.c:38
#15 main at ./main.c:57
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
(gdb) backtrace
#0 rb_obj_itself at object.c:564
#1 ractor_safe_call_cfunc_0 at ./vm_insnhelper.c:3085
#2 vm_call_cfunc_with_frame at ./vm_insnhelper.c:3268
#3 vm_call_cfunc at ./vm_insnhelper.c:3289
#4 vm_call_method_each_type at ./vm_insnhelper.c:3920
#5 vm_call_method at ./vm_insnhelper.c:4044
#6 vm_call_general at ./vm_insnhelper.c:4088
#7 vm_sendish at ./vm_insnhelper.c:5080
#8 vm_exec_core at insns.def:820
#9 rb_vm_exec at vm.c:2374
#10 rb_iseq_eval_main at vm.c:2633
#11 rb_ec_exec_node at eval.c:289
#12 ruby_run_node at eval.c:330
#13 rb_main at ./main.c:38
#14 main at ./main.c:57
ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv,
VALUE (*func)(ANYARGS))
ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv,
VALUE (*func)(ANYARGS))
ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv,
VALUE (*func)(ANYARGS))
ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv,
VALUE (*func)(ANYARGS))
ractor_safe_call_cfunc_0(VALUE recv, int argc, const VALUE *argv,
VALUE (*func)(ANYARGS))
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
// ...
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
vm_check_canary(GET_EC(), cfp->sp);
cfp->sp--;
if (!NIL_P(ary)) {
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (some_condition) {
// do not use stack
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
// do not use stack
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
// do not use stack
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
// Use arguments from Ruby array
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
// ...
if (calling->heap_argv) {
VALUE argv_array = reg_cfp->sp[-1];
VALUE argv = RARRAY_PTR(argv_array);
argc = (int)RARRAY_LEN(argv_array);
val = (*cfunc->invoker)(recv, argc, argv, cfunc->func);
}
else {
val = (*cfunc->invoker)(recv, argc, reg_cfp->sp + 1, cfunc->func);
}
// ...
}
static VALUE
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
const struct rb_callinfo *ci = calling->ci;
RB_DEBUG_COUNTER_INC(ccf_cfunc);
CALLER_SETUP_ARG(reg_cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame,
!rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
static VALUE
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
const struct rb_callinfo *ci = calling->ci;
RB_DEBUG_COUNTER_INC(ccf_cfunc);
CALLER_SETUP_ARG(reg_cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame,
!rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
static VALUE
vm_call_cfunc_setup_argv_ary(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
long len = RARRAY_LEN(ary);
if (UNLIKELY(len + argc > VM_ARGC_STACK_MAX)) {
vm_check_canary(ec, cfp->sp);
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
VALUE argv_ary = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_ary);
rb_ary_cat(argv_ary, argv, argc-1);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
return argv_ary;
}
else {
return Qfalse;
}
}
static VALUE
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
const struct rb_callinfo *ci = calling->ci;
RB_DEBUG_COUNTER_INC(ccf_cfunc);
VALUE argv_ary;
if (UNLIKELY(IS_ARGS_SPLAT(ci)) && (argv_ary = vm_call_cfunc_setup_argv_ary(ec, reg_cfp, calling))) {
// special case of CALLER_SETUP_ARG
if (!IS_ARGS_KW_OR_KW_SPLAT(ci)) {
long hash_idx = RARRAY_LEN(argv_ary) - 1;
VALUE final_hash = RARRAY_AREF(argv_ary, hash_idx);
if (RB_TYPE_P(final_hash, T_HASH) &&
(((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
if (RHASH_EMPTY_P(final_hash)) {
rb_ary_pop(argv_ary);
}
else {
final_hash = rb_hash_dup(final_hash);
RARRAY_ASET(argv_ary, hash_idx, final_hash);
calling->kw_splat = 1;
}
}
}
if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
VM_ASSERT(!IS_ARGS_KEYWORD(ci)); // should be KW_SPLAT
long hash_idx = RARRAY_LEN(argv_ary) - 1;
VALUE keyword_hash = RARRAY_AREF(argv_ary, hash_idx);
if (!RB_TYPE_P(keyword_hash, T_HASH)) {
/* Convert a non-hash keyword splat to a new hash */
RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(rb_to_hash_type(keyword_hash)));
}
else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
/* Convert a hash keyword splat to a new hash unless
* a mutable keyword splat was passed.
*/
RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(keyword_hash));
}
}
// special case of CALLER_REMOVE_EMPTY_KW_SPLAT()
if (UNLIKELY(calling->kw_splat)) {
VALUE kw_hash = RARRAY_AREF(argv_ary, RARRAY_LEN(argv_ary)-1);
if (RHASH_EMPTY_P(kw_hash)) {
rb_ary_pop(argv_ary);
calling->kw_splat = false;
}
}
int argc = RARRAY_LENINT(argv_ary);
VALUE *argv = (void *)RARRAY_CONST_PTR_TRANSIENT(argv_ary);
VALUE *stack_bottom = reg_cfp->sp - 2;
VM_ASSERT(calling->argc == 1);
VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
}
else {
CALLER_SETUP_ARG(reg_cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame,
!rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
}
static VALUE
vm_call_cfunc_setup_argv_ary(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
long len = RARRAY_LEN(ary);
if (UNLIKELY(len + argc > VM_ARGC_STACK_MAX)) {
vm_check_canary(ec, cfp->sp);
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
VALUE argv_ary = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_ary);
rb_ary_cat(argv_ary, argv, argc-1);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
return argv_ary;
}
else {
return Qfalse;
}
}
static VALUE
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
const struct rb_callinfo *ci = calling->ci;
RB_DEBUG_COUNTER_INC(ccf_cfunc);
VALUE argv_ary;
if (UNLIKELY(IS_ARGS_SPLAT(ci)) && (argv_ary = vm_call_cfunc_setup_argv_ary(ec, reg_cfp, calling))) {
// special case of CALLER_SETUP_ARG
if (!IS_ARGS_KW_OR_KW_SPLAT(ci)) {
long hash_idx = RARRAY_LEN(argv_ary) - 1;
VALUE final_hash = RARRAY_AREF(argv_ary, hash_idx);
if (RB_TYPE_P(final_hash, T_HASH) &&
(((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
if (RHASH_EMPTY_P(final_hash)) {
rb_ary_pop(argv_ary);
}
else {
final_hash = rb_hash_dup(final_hash);
RARRAY_ASET(argv_ary, hash_idx, final_hash);
calling->kw_splat = 1;
}
}
}
if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
VM_ASSERT(!IS_ARGS_KEYWORD(ci)); // should be KW_SPLAT
long hash_idx = RARRAY_LEN(argv_ary) - 1;
VALUE keyword_hash = RARRAY_AREF(argv_ary, hash_idx);
if (!RB_TYPE_P(keyword_hash, T_HASH)) {
/* Convert a non-hash keyword splat to a new hash */
RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(rb_to_hash_type(keyword_hash)));
}
else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
/* Convert a hash keyword splat to a new hash unless
* a mutable keyword splat was passed.
*/
RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(keyword_hash));
}
}
// special case of CALLER_REMOVE_EMPTY_KW_SPLAT()
if (UNLIKELY(calling->kw_splat)) {
VALUE kw_hash = RARRAY_AREF(argv_ary, RARRAY_LEN(argv_ary)-1);
if (RHASH_EMPTY_P(kw_hash)) {
rb_ary_pop(argv_ary);
calling->kw_splat = false;
}
}
int argc = RARRAY_LENINT(argv_ary);
VALUE *argv = (void *)RARRAY_CONST_PTR_TRANSIENT(argv_ary);
VALUE *stack_bottom = reg_cfp->sp - 2;
VM_ASSERT(calling->argc == 1);
VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
}
else {
CALLER_SETUP_ARG(reg_cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame,
!rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
}
static VALUE
vm_call_cfunc_setup_argv_ary(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
long len = RARRAY_LEN(ary);
if (UNLIKELY(len + argc > VM_ARGC_STACK_MAX)) {
vm_check_canary(ec, cfp->sp);
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
VALUE argv_ary = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_ary);
rb_ary_cat(argv_ary, argv, argc-1);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
return argv_ary;
}
else {
return Qfalse;
}
}
static VALUE
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling)
{
const struct rb_callinfo *ci = calling->ci;
RB_DEBUG_COUNTER_INC(ccf_cfunc);
VALUE argv_ary;
if (UNLIKELY(IS_ARGS_SPLAT(ci)) && (argv_ary = vm_call_cfunc_setup_argv_ary(ec, reg_cfp, calling))) {
// special case of CALLER_SETUP_ARG
if (!IS_ARGS_KW_OR_KW_SPLAT(ci)) {
long hash_idx = RARRAY_LEN(argv_ary) - 1;
VALUE final_hash = RARRAY_AREF(argv_ary, hash_idx);
if (RB_TYPE_P(final_hash, T_HASH) &&
(((struct RHash *)final_hash)->basic.flags & RHASH_PASS_AS_KEYWORDS)) {
if (RHASH_EMPTY_P(final_hash)) {
rb_ary_pop(argv_ary);
}
else {
final_hash = rb_hash_dup(final_hash);
RARRAY_ASET(argv_ary, hash_idx, final_hash);
calling->kw_splat = 1;
}
}
}
if (UNLIKELY(IS_ARGS_KW_OR_KW_SPLAT(ci))) {
VM_ASSERT(!IS_ARGS_KEYWORD(ci)); // should be KW_SPLAT
long hash_idx = RARRAY_LEN(argv_ary) - 1;
VALUE keyword_hash = RARRAY_AREF(argv_ary, hash_idx);
if (!RB_TYPE_P(keyword_hash, T_HASH)) {
/* Convert a non-hash keyword splat to a new hash */
RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(rb_to_hash_type(keyword_hash)));
}
else if (!IS_ARGS_KW_SPLAT_MUT(ci)) {
/* Convert a hash keyword splat to a new hash unless
* a mutable keyword splat was passed.
*/
RARRAY_ASET(argv_ary, hash_idx, rb_hash_dup(keyword_hash));
}
}
// special case of CALLER_REMOVE_EMPTY_KW_SPLAT()
if (UNLIKELY(calling->kw_splat)) {
VALUE kw_hash = RARRAY_AREF(argv_ary, RARRAY_LEN(argv_ary)-1);
if (RHASH_EMPTY_P(kw_hash)) {
rb_ary_pop(argv_ary);
calling->kw_splat = false;
}
}
int argc = RARRAY_LENINT(argv_ary);
VALUE *argv = (void *)RARRAY_CONST_PTR_TRANSIENT(argv_ary);
VALUE *stack_bottom = reg_cfp->sp - 2;
VM_ASSERT(calling->argc == 1);
VM_ASSERT(RB_TYPE_P(argv_ary, T_ARRAY));
VM_ASSERT(RBASIC_CLASS(argv_ary) == 0); // hidden ary
return vm_call_cfunc_with_frame_(ec, reg_cfp, calling, argc, argv, stack_bottom);
}
else {
CALLER_SETUP_ARG(reg_cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci);
CC_SET_FASTPATH(calling->cc, vm_call_cfunc_with_frame,
!rb_splat_or_kwargs_p(ci) && !calling->kw_splat);
return vm_call_cfunc_with_frame(ec, reg_cfp, calling);
}
}
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
define_singleton_method(:a) do |*ary|
ary.length
end
a(*1380888.times)
# => 1380888
define_singleton_method(:a) do |*ary|
ary.length
end
a(*1380888.times)
# SystemStackError
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
switch (cme->def->type) {
case VM_METHOD_TYPE_ISEQ:
CC_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
return vm_call_iseq_setup(ec, cfp, calling);
case VM_METHOD_TYPE_CFUNC:
CC_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
return vm_call_cfunc(ec, cfp, calling);
case VM_METHOD_TYPE_BMETHOD:
CC_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
return vm_call_bmethod(ec, cfp, calling);
// ...
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci);
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci);
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
return vm_call_bmethod_body(ec, calling, argv);
}
(gdb) backtrace
#0 ec_stack_overflow at ./vm_insnhelper.c:61
#1 vm_stackoverflow at ./vm_insnhelper.c:81
#2 vm_caller_setup_arg_splat at ./vm_args.c:795
#3 CALLER_SETUP_ARG at ./vm_insnhelper.c:2545
#4 vm_call_cfunc at ./vm_insnhelper.c:3286
#5 vm_call_method_each_type at ./vm_insnhelper.c:3920
#6 vm_call_method at ./vm_insnhelper.c:4044
#7 vm_call_general at ./vm_insnhelper.c:4088
#8 vm_sendish at ./vm_insnhelper.c:5080
#9 vm_exec_core at insns.def:820
#10 rb_vm_exec at vm.c:2374
#11 rb_iseq_eval_main at vm.c:2633
#12 rb_ec_exec_node at eval.c:289
#13 ruby_run_node at eval.c:330
#14 rb_main at ./main.c:38
#15 main at ./main.c:57
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE argv_array = rb_ary_new_capa(len + argc - 1);
rb_obj_hide(argv_array);
rb_ary_cat(argv_array, argv, argc-1);
rb_ary_cat(argv_array, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_array;
calling->argc = 1;
calling->heap_argv = 1;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE *argv = cfp->sp - argc;
VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
rb_ary_cat(argv_ary, argv, argc);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
calling->heap_argv = argv_ary;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE *argv = cfp->sp - argc;
VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
rb_ary_cat(argv_ary, argv, argc);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
calling->heap_argv = argv_ary;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE *argv = cfp->sp - argc;
VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
rb_ary_cat(argv_ary, argv, argc);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
calling->heap_argv = argv_ary;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(ary);
long len = RARRAY_LEN(ary), i;
if (argc + len > 1000) {
VALUE *argv = cfp->sp - argc;
VALUE argv_ary = rb_ary_hidden_new(len + argc + 1);
rb_ary_cat(argv_ary, argv, argc);
rb_ary_cat(argv_ary, ptr, len);
cfp->sp -= argc - 1;
cfp->sp[-1] = argv_ary;
calling->argc = 1;
calling->heap_argv = argv_ary;
}
else {
CHECK_VM_STACK_OVERFLOW(cfp, len);
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
calling->argc += i - 1;
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci);
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
VALUE *use_argv = (VALUE *)argv;
VALUE av[2];
if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
}
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = use_argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, flags, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
VALUE *use_argv = (VALUE *)argv;
VALUE av[2];
if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
}
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = use_argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, flags, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
VALUE *use_argv = (VALUE *)argv;
VALUE av[2];
if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
}
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = use_argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, flags, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
VALUE *use_argv = (VALUE *)argv;
VALUE av[2];
if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
}
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = use_argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, flags, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
{
// ...
int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
VALUE *use_argv = (VALUE *)argv;
VALUE av[2];
if (UNLIKELY(argc > VM_ARGC_STACK_MAX)) {
use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
}
CHECK_VM_STACK_OVERFLOW(cfp, argc);
vm_check_canary(ec, sp);
cfp->sp = sp + argc;
for (i=0; i<argc; i++) {
sp[i] = use_argv[i];
}
opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, flags, passed_block_handler,
(is_lambda ? arg_setup_method : arg_setup_block));
// ...
}
define_singleton_method(:a) do |*ary|
ary.length
end
a(*1380888.times)
# => 1380888
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
send(:a, *1380888.times)
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
send(:a, *1380888.times)
# SystemStackError
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
:a.to_proc.(self, *1380888.times)
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
:a.to_proc.(self, *1380888.times)
# SystemStackError
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
method(:a).(*1380888.times)
def self.a(*ary)
ary.length
end
a(*1380888.times)
# => 1380888
method(:a).(*1380888.times)
# SystemStackError
def self.method_missing(method, *ary)
ary.length
end
a(*1380888.times)
def self.method_missing(method, *ary)
ary.length
end
a(*1380888.times)
def self.method_missing(method, *ary)
ary.length
end
a(*1380888.times)
# SystemStackError
def self.a(*ary)
ary.length
end
a(*1380888.times)
def self.a(*ary)
ary.length
end
a(*1380888.times)
def self.a(ary)
ary.length
end
a(*1380888.times)
def self.a(ary)
ary.length
end
a(*1380888.times)
def self.a(ary)
ary.length
end
a(*1380888.times)
# ArgumentError
def self.a(ary)
ary.length
end
a(*1380888.times)
# SystemStackError
class A
attr_reader :a
end
class A
attr_reader :a
end
A.new.a(*1380888.times)
class A
attr_reader :a
end
A.new.a(*1380888.times)
# SystemStackError
A = Struct.new(:a)
A = Struct.new(:a)
A.new.a(*1380888.times)
A = Struct.new(:a)
A.new.a(*1380888.times)
# SystemStackError
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
VALUE *argv;
int argc;
const struct rb_callinfo *ci = calling->ci;
CALLER_SETUP_ARG(cfp, calling, ci, ALLOW_HEAP_ARGV);
if (UNLIKELY(calling->heap_argv)) {
argv = RARRAY_PTR(calling->heap_argv);
cfp->sp -= 2;
}
else {
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp += - argc - 1;
}
return vm_call_bmethod_body(ec, calling, argv);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE procv = cme->def->body.bmethod.proc;
rb_proc_t *proc;
GetProcPtr(procv, proc);
const struct rb_block *block = &proc->block;
while (vm_block_type(block) == block_type_proc) {
block = vm_proc_block(block->as.proc);
}
if (vm_block_type(block) == block_type_iseq) {
CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
return vm_call_iseq_bmethod(ec, cfp, calling);
}
CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
return vm_call_noniseq_bmethod(ec, cfp, calling);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE procv = cme->def->body.bmethod.proc;
rb_proc_t *proc;
GetProcPtr(procv, proc);
const struct rb_block *block = &proc->block;
while (vm_block_type(block) == block_type_proc) {
block = vm_proc_block(block->as.proc);
}
if (vm_block_type(block) == block_type_iseq) {
CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
return vm_call_iseq_bmethod(ec, cfp, calling);
}
CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
return vm_call_noniseq_bmethod(ec, cfp, calling);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE procv = cme->def->body.bmethod.proc;
rb_proc_t *proc;
GetProcPtr(procv, proc);
const struct rb_block *block = &proc->block;
while (vm_block_type(block) == block_type_proc) {
block = vm_proc_block(block->as.proc);
}
if (vm_block_type(block) == block_type_iseq) {
CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
return vm_call_iseq_bmethod(ec, cfp, calling);
}
CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
return vm_call_noniseq_bmethod(ec, cfp, calling);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE procv = cme->def->body.bmethod.proc;
rb_proc_t *proc;
GetProcPtr(procv, proc);
const struct rb_block *block = &proc->block;
while (vm_block_type(block) == block_type_proc) {
block = vm_proc_block(block->as.proc);
}
if (vm_block_type(block) == block_type_iseq) {
CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
return vm_call_iseq_bmethod(ec, cfp, calling);
}
CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
return vm_call_noniseq_bmethod(ec, cfp, calling);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE procv = cme->def->body.bmethod.proc;
rb_proc_t *proc;
GetProcPtr(procv, proc);
const struct rb_block *block = &proc->block;
while (vm_block_type(block) == block_type_proc) {
block = vm_proc_block(block->as.proc);
}
if (vm_block_type(block) == block_type_iseq) {
CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
return vm_call_iseq_bmethod(ec, cfp, calling);
}
CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
return vm_call_noniseq_bmethod(ec, cfp, calling);
}
static VALUE
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling)
{
RB_DEBUG_COUNTER_INC(ccf_bmethod);
const struct rb_callcache *cc = calling->cc;
const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
VALUE procv = cme->def->body.bmethod.proc;
rb_proc_t *proc;
GetProcPtr(procv, proc);
const struct rb_block *block = &proc->block;
while (vm_block_type(block) == block_type_proc) {
block = vm_proc_block(block->as.proc);
}
if (vm_block_type(block) == block_type_iseq) {
CC_SET_FASTPATH(cc, vm_call_iseq_bmethod, TRUE);
return vm_call_iseq_bmethod(ec, cfp, calling);
}
CC_SET_FASTPATH(cc, vm_call_noniseq_bmethod, TRUE);
return vm_call_noniseq_bmethod(ec, cfp, calling);
}
static VALUE
vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling, const struct rb_callinfo *orig_ci,
enum method_missing_reason reason)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
unsigned int argc;
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
argc = calling->argc + 1;
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND |
(calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
calling->argc = argc;
// ...
static VALUE
vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling, const struct rb_callinfo *orig_ci,
enum method_missing_reason reason)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
unsigned int argc;
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
argc = calling->argc + 1;
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND |
(calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
calling->argc = argc;
// ...
static VALUE
vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling, const struct rb_callinfo *orig_ci,
enum method_missing_reason reason)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
unsigned int argc;
argc = calling->argc + 1;
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND |
(calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
calling->argc = argc;
// ...
static VALUE
vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling, const struct rb_callinfo *orig_ci,
enum method_missing_reason reason)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
unsigned int argc;
argc = calling->argc + 1;
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND |
(calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
calling->argc = argc;
// ...
static VALUE
vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *cfp,
struct rb_calling_info *calling, const struct rb_callinfo *orig_ci,
enum method_missing_reason reason)
{
RB_DEBUG_COUNTER_INC(ccf_method_missing);
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
unsigned int argc;
argc = calling->argc + 1;
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND |
vm_ci_flag(orig_ci);
calling->argc = argc;
// ...