Ruby 3.2.3p157 (2024-01-18 revision 52bb2ac0a6971d0391efa2275f7a66bff319087c)
vm_core.h
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#if VM_CHECK_MODE > 0
57#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
58#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
59#define RUBY_ASSERT_CRITICAL_SECTION
60#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
61#else
62#define VM_ASSERT(expr) ((void)0)
63#define VM_UNREACHABLE(func) UNREACHABLE
64#define RUBY_DEBUG_THREAD_SCHEDULE()
65#endif
66
67#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
68
69#if defined(RUBY_ASSERT_CRITICAL_SECTION)
70// TODO add documentation
71extern int ruby_assert_critical_section_entered;
72#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
73#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
74#else
75#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
76#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
77#endif
78
79#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
80# include "wasm/setjmp.h"
81#else
82# include <setjmp.h>
83#endif
84
85#if defined(__linux__) || defined(__FreeBSD__)
86# define RB_THREAD_T_HAS_NATIVE_ID
87#endif
88
90#include "ccan/list/list.h"
91#include "id.h"
92#include "internal.h"
93#include "internal/array.h"
94#include "internal/basic_operators.h"
95#include "internal/serial.h"
96#include "internal/vm.h"
97#include "method.h"
98#include "node.h"
99#include "ruby/ruby.h"
100#include "ruby/st.h"
101#include "ruby_atomic.h"
102#include "vm_opts.h"
103#include "shape.h"
104
105#include "ruby/thread_native.h"
106
107/*
108 * implementation selector of get_insn_info algorithm
109 * 0: linear search
110 * 1: binary search
111 * 2: succinct bitvector
112 */
113#ifndef VM_INSN_INFO_TABLE_IMPL
114# define VM_INSN_INFO_TABLE_IMPL 2
115#endif
116
117#if defined(NSIG_MAX) /* POSIX issue 8 */
118# undef NSIG
119# define NSIG NSIG_MAX
120#elif defined(_SIG_MAXSIG) /* FreeBSD */
121# undef NSIG
122# define NSIG _SIG_MAXSIG
123#elif defined(_SIGMAX) /* QNX */
124# define NSIG (_SIGMAX + 1)
125#elif defined(NSIG) /* 99% of everything else */
126# /* take it */
127#else /* Last resort */
128# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
129#endif
130
131#define RUBY_NSIG NSIG
132
133#if defined(SIGCLD)
134# define RUBY_SIGCHLD (SIGCLD)
135#elif defined(SIGCHLD)
136# define RUBY_SIGCHLD (SIGCHLD)
137#else
138# define RUBY_SIGCHLD (0)
139#endif
140
141/* platforms with broken or non-existent SIGCHLD work by polling */
142#if defined(__APPLE__)
143# define SIGCHLD_LOSSY (1)
144#else
145# define SIGCHLD_LOSSY (0)
146#endif
147
148/* define to 0 to test old code path */
149#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
150
151#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
152# define USE_SIGALTSTACK
153void *rb_allocate_sigaltstack(void);
154void *rb_register_sigaltstack(void *);
155# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
156# define RB_ALTSTACK_FREE(var) free(var)
157# define RB_ALTSTACK(var) var
158#else /* noop */
159# define RB_ALTSTACK_INIT(var, altstack)
160# define RB_ALTSTACK_FREE(var)
161# define RB_ALTSTACK(var) (0)
162#endif
163
164#include THREAD_IMPL_H
165#define RUBY_VM_THREAD_MODEL 2
166
167/*****************/
168/* configuration */
169/*****************/
170
171/* gcc ver. check */
172#if defined(__GNUC__) && __GNUC__ >= 2
173
174#if OPT_TOKEN_THREADED_CODE
175#if OPT_DIRECT_THREADED_CODE
176#undef OPT_DIRECT_THREADED_CODE
177#endif
178#endif
179
180#else /* defined(__GNUC__) && __GNUC__ >= 2 */
181
182/* disable threaded code options */
183#if OPT_DIRECT_THREADED_CODE
184#undef OPT_DIRECT_THREADED_CODE
185#endif
186#if OPT_TOKEN_THREADED_CODE
187#undef OPT_TOKEN_THREADED_CODE
188#endif
189#endif
190
191/* call threaded code */
192#if OPT_CALL_THREADED_CODE
193#if OPT_DIRECT_THREADED_CODE
194#undef OPT_DIRECT_THREADED_CODE
195#endif /* OPT_DIRECT_THREADED_CODE */
196#if OPT_STACK_CACHING
197#undef OPT_STACK_CACHING
198#endif /* OPT_STACK_CACHING */
199#endif /* OPT_CALL_THREADED_CODE */
200
201void rb_vm_encoded_insn_data_table_init(void);
202typedef unsigned long rb_num_t;
203typedef signed long rb_snum_t;
204
205enum ruby_tag_type {
206 RUBY_TAG_NONE = 0x0,
207 RUBY_TAG_RETURN = 0x1,
208 RUBY_TAG_BREAK = 0x2,
209 RUBY_TAG_NEXT = 0x3,
210 RUBY_TAG_RETRY = 0x4,
211 RUBY_TAG_REDO = 0x5,
212 RUBY_TAG_RAISE = 0x6,
213 RUBY_TAG_THROW = 0x7,
214 RUBY_TAG_FATAL = 0x8,
215 RUBY_TAG_MASK = 0xf
216};
217
218#define TAG_NONE RUBY_TAG_NONE
219#define TAG_RETURN RUBY_TAG_RETURN
220#define TAG_BREAK RUBY_TAG_BREAK
221#define TAG_NEXT RUBY_TAG_NEXT
222#define TAG_RETRY RUBY_TAG_RETRY
223#define TAG_REDO RUBY_TAG_REDO
224#define TAG_RAISE RUBY_TAG_RAISE
225#define TAG_THROW RUBY_TAG_THROW
226#define TAG_FATAL RUBY_TAG_FATAL
227#define TAG_MASK RUBY_TAG_MASK
228
229enum ruby_vm_throw_flags {
230 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
231 VM_THROW_STATE_MASK = 0xff
232};
233
234/* forward declarations */
235struct rb_thread_struct;
237
238/* iseq data type */
240
242 rb_serial_t raw;
243 VALUE data[2];
244};
245
246// imemo_constcache
248 VALUE flags;
249
250 VALUE value; // v0
251 VALUE _unused1; // v1
252 VALUE _unused2; // v2
253 const rb_cref_t *ic_cref; // v3
254};
255STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
256 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
257 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
258
275
277 uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
278 ID iv_set_name;
279};
280
284
286 struct {
287 struct rb_thread_struct *running_thread;
288 VALUE value;
289 } once;
290 struct iseq_inline_constant_cache ic_cache;
291 struct iseq_inline_iv_cache_entry iv_cache;
292};
293
295 const struct rb_callinfo *ci;
296 const struct rb_callcache *cc;
297 VALUE block_handler;
298 VALUE recv;
299 int argc;
300 int kw_splat;
301};
302
304
305#if 1
306#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
307#else
308#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
309#endif
310#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
311
313 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
314 VALUE base_label; /* String */
315 VALUE label; /* String */
316 int first_lineno;
317 int node_id;
318 rb_code_location_t code_location;
320
321#define PATHOBJ_PATH 0
322#define PATHOBJ_REALPATH 1
323
324static inline VALUE
325pathobj_path(VALUE pathobj)
326{
327 if (RB_TYPE_P(pathobj, T_STRING)) {
328 return pathobj;
329 }
330 else {
331 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
332 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
333 }
334}
335
336static inline VALUE
337pathobj_realpath(VALUE pathobj)
338{
339 if (RB_TYPE_P(pathobj, T_STRING)) {
340 return pathobj;
341 }
342 else {
343 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
344 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
345 }
346}
347
348/* Forward declarations */
349struct rb_mjit_unit;
350
351typedef uintptr_t iseq_bits_t;
352
353#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
354
355/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
356#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
357
358/* instruction sequence type */
359enum rb_iseq_type {
360 ISEQ_TYPE_TOP,
361 ISEQ_TYPE_METHOD,
362 ISEQ_TYPE_BLOCK,
363 ISEQ_TYPE_CLASS,
364 ISEQ_TYPE_RESCUE,
365 ISEQ_TYPE_ENSURE,
366 ISEQ_TYPE_EVAL,
367 ISEQ_TYPE_MAIN,
368 ISEQ_TYPE_PLAIN
369};
370
372 enum rb_iseq_type type;
373
374 unsigned int iseq_size;
375 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
376
400 struct {
401 struct {
402 unsigned int has_lead : 1;
403 unsigned int has_opt : 1;
404 unsigned int has_rest : 1;
405 unsigned int has_post : 1;
406 unsigned int has_kw : 1;
407 unsigned int has_kwrest : 1;
408 unsigned int has_block : 1;
409
410 unsigned int ambiguous_param0 : 1; /* {|a|} */
411 unsigned int accepts_no_kwarg : 1;
412 unsigned int ruby2_keywords: 1;
413 } flags;
414
415 unsigned int size;
416
417 int lead_num;
418 int opt_num;
419 int rest_start;
420 int post_start;
421 int post_num;
422 int block_start;
423
424 const VALUE *opt_table; /* (opt_num + 1) entries. */
425 /* opt_num and opt_table:
426 *
427 * def foo o1=e1, o2=e2, ..., oN=eN
428 * #=>
429 * # prologue code
430 * A1: e1
431 * A2: e2
432 * ...
433 * AN: eN
434 * AL: body
435 * opt_num = N
436 * opt_table = [A1, A2, ..., AN, AL]
437 */
438
439 const struct rb_iseq_param_keyword {
440 int num;
441 int required_num;
442 int bits_start;
443 int rest_start;
444 const ID *table;
445 VALUE *default_values;
446 } *keyword;
448
449 rb_iseq_location_t location;
450
451 /* insn info, must be freed */
453 const struct iseq_insn_info_entry *body;
454 unsigned int *positions;
455 unsigned int size;
456#if VM_INSN_INFO_TABLE_IMPL == 2
457 struct succ_index_table *succ_index_table;
458#endif
459 } insns_info;
460
461 const ID *local_table; /* must free */
462
463 /* catch table */
464 struct iseq_catch_table *catch_table;
465
466 /* for child iseq */
467 const struct rb_iseq_struct *parent_iseq;
468 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
469
470 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
471 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
472
473 struct {
474 rb_snum_t flip_count;
475 VALUE script_lines;
476 VALUE coverage;
477 VALUE pc2branchindex;
478 VALUE *original_iseq;
479 } variable;
480
481 unsigned int local_table_size;
482 unsigned int ic_size; // Number of IC caches
483 unsigned int ise_size; // Number of ISE caches
484 unsigned int ivc_size; // Number of IVC caches
485 unsigned int icvarc_size; // Number of ICVARC caches
486 unsigned int ci_size;
487 unsigned int stack_max; /* for stack overflow check */
488
489 bool catch_except_p; // If a frame of this ISeq may catch exception, set true.
490 // If true, this ISeq is leaf *and* backtraces are not used, for example,
491 // by rb_profile_frames. We verify only leafness on VM_CHECK_MODE though.
492 // Note that GC allocations might use backtraces due to
493 // ObjectSpace#trace_object_allocations.
494 // For more details, see: https://bugs.ruby-lang.org/issues/16956
495 bool builtin_inline_p;
496
497 union {
498 iseq_bits_t * list; /* Find references for GC */
499 iseq_bits_t single;
500 } mark_bits;
501
502 struct rb_id_table *outer_variables;
503
504 const rb_iseq_t *mandatory_only_iseq;
505
506#if USE_MJIT || USE_YJIT
507 // Function pointer for JIT code
508 VALUE (*jit_func)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
509 // Number of total calls with jit_exec()
510 long unsigned total_calls;
511#endif
512
513#if USE_MJIT
514 // MJIT stores some data on each iseq.
515 struct rb_mjit_unit *mjit_unit;
516#endif
517
518#if USE_YJIT
519 // YJIT stores some data on each iseq.
520 void *yjit_payload;
521#endif
522};
523
524/* T_IMEMO/iseq */
525/* typedef rb_iseq_t is in method.h */
527 VALUE flags; /* 1 */
528 VALUE wrapper; /* 2 */
529
530 struct rb_iseq_constant_body *body; /* 3 */
531
532 union { /* 4, 5 words */
533 struct iseq_compile_data *compile_data; /* used at compile time */
534
535 struct {
536 VALUE obj;
537 int index;
538 } loader;
539
540 struct {
541 struct rb_hook_list_struct *local_hooks;
542 rb_event_flag_t global_trace_events;
543 } exec;
544 } aux;
545};
546
547#define ISEQ_BODY(iseq) ((iseq)->body)
548
549#ifndef EXTSTATIC
550#define EXTSTATIC 0
551#endif
552
553#ifndef USE_LAZY_LOAD
554#define USE_LAZY_LOAD 0
555#endif
556
557#if USE_LAZY_LOAD
558const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
559#endif
560
561static inline const rb_iseq_t *
562rb_iseq_check(const rb_iseq_t *iseq)
563{
564#if USE_LAZY_LOAD
565 if (ISEQ_BODY(iseq) == NULL) {
566 rb_iseq_complete((rb_iseq_t *)iseq);
567 }
568#endif
569 return iseq;
570}
571
572static inline const rb_iseq_t *
573def_iseq_ptr(rb_method_definition_t *def)
574{
575//TODO: re-visit. to check the bug, enable this assertion.
576#if VM_CHECK_MODE > 0
577 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
578#endif
579 return rb_iseq_check(def->body.iseq.iseqptr);
580}
581
582enum ruby_special_exceptions {
583 ruby_error_reenter,
584 ruby_error_nomemory,
585 ruby_error_sysstack,
586 ruby_error_stackfatal,
587 ruby_error_stream_closed,
588 ruby_special_error_count
589};
590
591#define GetVMPtr(obj, ptr) \
592 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
593
594struct rb_vm_struct;
595typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
596
597typedef struct rb_at_exit_list {
598 rb_vm_at_exit_func *func;
599 struct rb_at_exit_list *next;
601
602struct rb_objspace;
603struct rb_objspace *rb_objspace_alloc(void);
604void rb_objspace_free(struct rb_objspace *);
605void rb_objspace_call_finalizer(struct rb_objspace *);
606
607typedef struct rb_hook_list_struct {
608 struct rb_event_hook_struct *hooks;
609 rb_event_flag_t events;
610 unsigned int running;
611 bool need_clean;
612 bool is_local;
614
615
616// see builtin.h for definition
617typedef const struct rb_builtin_function *RB_BUILTIN;
618
619typedef struct rb_vm_struct {
620 VALUE self;
621
622 struct {
623 struct ccan_list_head set;
624 unsigned int cnt;
625 unsigned int blocking_cnt;
626
627 struct rb_ractor_struct *main_ractor;
628 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
629
630 struct {
631 // monitor
632 rb_nativethread_lock_t lock;
633 struct rb_ractor_struct *lock_owner;
634 unsigned int lock_rec;
635
636 // barrier
637 bool barrier_waiting;
638 unsigned int barrier_cnt;
639 rb_nativethread_cond_t barrier_cond;
640
641 // join at exit
642 rb_nativethread_cond_t terminate_cond;
643 bool terminate_waiting;
644 } sync;
645 } ractor;
646
647#ifdef USE_SIGALTSTACK
648 void *main_altstack;
649#endif
650
651 rb_serial_t fork_gen;
652 rb_nativethread_lock_t waitpid_lock;
653 struct ccan_list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
654 struct ccan_list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
655 struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
656
657 /* set in single-threaded processes only: */
658 volatile int ubf_async_safe;
659
660 unsigned int running: 1;
661 unsigned int thread_abort_on_exception: 1;
662 unsigned int thread_report_on_exception: 1;
663 unsigned int thread_ignore_deadlock: 1;
664
665 /* object management */
666 VALUE mark_object_ary;
667 const VALUE special_exceptions[ruby_special_error_count];
668
669 /* object shapes */
670 rb_shape_t *shape_list;
671 rb_shape_t *root_shape;
672 shape_id_t next_shape_id;
673
674 /* load */
675 VALUE top_self;
676 VALUE load_path;
677 VALUE load_path_snapshot;
678 VALUE load_path_check_cache;
679 VALUE expanded_load_path;
680 VALUE loaded_features;
681 VALUE loaded_features_snapshot;
682 VALUE loaded_features_realpaths;
683 VALUE loaded_features_realpath_map;
684 struct st_table *loaded_features_index;
685 struct st_table *loading_table;
686#if EXTSTATIC
687 // For running the init function of statically linked
688 // extensions when they are loaded
689 struct st_table *static_ext_inits;
690#endif
691
692 /* signal */
693 struct {
694 VALUE cmd[RUBY_NSIG];
695 } trap_list;
696
697 /* relation table of ensure - rollback for callcc */
698 struct st_table *ensure_rollback_table;
699
700 /* postponed_job (async-signal-safe, NOT thread-safe) */
701 struct rb_postponed_job_struct *postponed_job_buffer;
702 rb_atomic_t postponed_job_index;
703
704 int src_encoding_index;
705
706 /* workqueue (thread-safe, NOT async-signal-safe) */
707 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
708 rb_nativethread_lock_t workqueue_lock;
709
710 VALUE orig_progname, progname;
711 VALUE coverages, me2counter;
712 int coverage_mode;
713
714 st_table * defined_module_hash;
715
716 struct rb_objspace *objspace;
717
718 rb_at_exit_list *at_exit;
719
720 st_table *frozen_strings;
721
722 const struct rb_builtin_function *builtin_function_table;
723 int builtin_inline_index;
724
725 struct rb_id_table *negative_cme_table;
726 st_table *overloaded_cme_table; // cme -> overloaded_cme
727
728 // This id table contains a mapping from ID to ICs. It does this with ID
729 // keys and nested st_tables as values. The nested tables have ICs as keys
730 // and Qtrue as values. It is used when inline constant caches need to be
731 // invalidated or ISEQs are being freed.
732 struct rb_id_table *constant_cache;
733
734#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
735#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
736#endif
737 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
738
739#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
740 uint32_t clock;
741#endif
742
743 /* params */
744 struct { /* size in byte */
745 size_t thread_vm_stack_size;
746 size_t thread_machine_stack_size;
747 size_t fiber_vm_stack_size;
748 size_t fiber_machine_stack_size;
749 } default_params;
750
751} rb_vm_t;
752
753/* default values */
754
755#define RUBY_VM_SIZE_ALIGN 4096
756
757#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
758#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
759#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
760#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
761
762#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
763#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
764#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
765#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
766#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
767#else
768#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
769#endif
770
771#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
772/* It seems sanitizers consume A LOT of machine stacks */
773#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
774#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
775#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
776#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
777#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
778#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
779#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
780#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
781#endif
782
783#ifndef VM_DEBUG_BP_CHECK
784#define VM_DEBUG_BP_CHECK 0
785#endif
786
787#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
788#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
789#endif
790
792 VALUE self;
793 const VALUE *ep;
794 union {
795 const rb_iseq_t *iseq;
796 const struct vm_ifunc *ifunc;
797 VALUE val;
798 } code;
799};
800
801enum rb_block_handler_type {
802 block_handler_type_iseq,
803 block_handler_type_ifunc,
804 block_handler_type_symbol,
805 block_handler_type_proc
806};
807
808enum rb_block_type {
809 block_type_iseq,
810 block_type_ifunc,
811 block_type_symbol,
812 block_type_proc
813};
814
815struct rb_block {
816 union {
817 struct rb_captured_block captured;
818 VALUE symbol;
819 VALUE proc;
820 } as;
821 enum rb_block_type type;
822};
823
825 const VALUE *pc; /* cfp[0] */
826 VALUE *sp; /* cfp[1] */
827 const rb_iseq_t *iseq; /* cfp[2] */
828 VALUE self; /* cfp[3] / block[0] */
829 const VALUE *ep; /* cfp[4] / block[1] */
830 const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
831 VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
832
833#if VM_DEBUG_BP_CHECK
834 VALUE *bp_check; /* cfp[7] */
835#endif
836 // Return address for YJIT code
837 void *jit_return;
839
840extern const rb_data_type_t ruby_threadptr_data_type;
841
842static inline struct rb_thread_struct *
843rb_thread_ptr(VALUE thval)
844{
845 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
846}
847
848enum rb_thread_status {
849 THREAD_RUNNABLE,
850 THREAD_STOPPED,
851 THREAD_STOPPED_FOREVER,
852 THREAD_KILLED
853};
854
855#ifdef RUBY_JMP_BUF
856typedef RUBY_JMP_BUF rb_jmpbuf_t;
857#else
858typedef void *rb_jmpbuf_t[5];
859#endif
860
861/*
862 the members which are written in EC_PUSH_TAG() should be placed at
863 the beginning and the end, so that entire region is accessible.
864*/
865struct rb_vm_tag {
866 VALUE tag;
867 VALUE retval;
868 rb_jmpbuf_t buf;
869 struct rb_vm_tag *prev;
870 enum ruby_tag_type state;
871 unsigned int lock_rec;
872};
873
874STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
875STATIC_ASSERT(rb_vm_tag_buf_end,
876 offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
877 sizeof(struct rb_vm_tag));
878
881 void *arg;
882};
883
884struct rb_mutex_struct;
885
886typedef struct rb_ensure_entry {
887 VALUE marker;
888 VALUE (*e_proc)(VALUE);
889 VALUE data2;
891
892typedef struct rb_ensure_list {
893 struct rb_ensure_list *next;
894 struct rb_ensure_entry entry;
896
897typedef struct rb_fiber_struct rb_fiber_t;
898
900 struct rb_waiting_list *next;
901 struct rb_thread_struct *thread;
902 struct rb_fiber_struct *fiber;
903};
904
906 /* execution information */
907 VALUE *vm_stack; /* must free, must mark */
908 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
910
911 struct rb_vm_tag *tag;
912
913 /* interrupt flags */
914 rb_atomic_t interrupt_flag;
915 rb_atomic_t interrupt_mask; /* size should match flag */
916#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
917 uint32_t checked_clock;
918#endif
919
920 rb_fiber_t *fiber_ptr;
921 struct rb_thread_struct *thread_ptr;
922
923 /* storage (ec (fiber) local) */
924 struct rb_id_table *local_storage;
925 VALUE local_storage_recursive_hash;
926 VALUE local_storage_recursive_hash_for_trace;
927
928 /* Inheritable fiber storage. */
929 VALUE storage;
930
931 /* eval env */
932 const VALUE *root_lep;
933 VALUE root_svar;
934
935 /* ensure & callcc */
936 rb_ensure_list_t *ensure_list;
937
938 /* trace information */
939 struct rb_trace_arg_struct *trace_arg;
940
941 /* temporary places */
942 VALUE errinfo;
943 VALUE passed_block_handler; /* for rb_iterate */
944
945 uint8_t raised_flag; /* only 3 bits needed */
946
947 /* n.b. only 7 bits needed, really: */
948 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
949
950 VALUE private_const_reference;
951
952 /* for GC */
953 struct {
954 VALUE *stack_start;
955 VALUE *stack_end;
956 size_t stack_maxsize;
958 } machine;
959};
960
961#ifndef rb_execution_context_t
963#define rb_execution_context_t rb_execution_context_t
964#endif
965
966// for builtin.h
967#define VM_CORE_H_EC_DEFINED 1
968
969// Set the vm_stack pointer in the execution context.
970void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
971
972// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
973// @param ec the execution context to update.
974// @param stack a pointer to the stack to use.
975// @param size the size of the stack, as in `VALUE stack[size]`.
976void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
977
978// Clear (set to `NULL`) the vm_stack pointer.
979// @param ec the execution context to update.
980void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
981
983 bool ractor_safe;
984};
985
986typedef struct rb_ractor_struct rb_ractor_t;
987
988struct rb_native_thread;
989
990typedef struct rb_thread_struct {
991 struct ccan_list_node lt_node; // managed by a ractor
992 VALUE self;
993 rb_ractor_t *ractor;
994 rb_vm_t *vm;
995 struct rb_native_thread *nt;
997
998 struct rb_thread_sched_item sched;
999 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1000
1001 VALUE last_status; /* $? */
1002
1003 /* for cfunc */
1004 struct rb_calling_info *calling;
1005
1006 /* for load(true) */
1007 VALUE top_self;
1008 VALUE top_wrapper;
1009
1010 /* thread control */
1011
1012 BITFIELD(enum rb_thread_status, status, 2);
1013 /* bit flags */
1014 unsigned int locking_native_thread : 1;
1015 unsigned int to_kill : 1;
1016 unsigned int abort_on_exception: 1;
1017 unsigned int report_on_exception: 1;
1018 unsigned int pending_interrupt_queue_checked: 1;
1019 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1020 uint32_t running_time_us; /* 12500..800000 */
1021
1022 void *blocking_region_buffer;
1023
1024 VALUE thgroup;
1025 VALUE value;
1026
1027 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1028#if OPT_CALL_THREADED_CODE
1029 VALUE retval;
1030#endif
1031
1032 /* async errinfo queue */
1033 VALUE pending_interrupt_queue;
1034 VALUE pending_interrupt_mask_stack;
1035
1036 /* interrupt management */
1037 rb_nativethread_lock_t interrupt_lock;
1038 struct rb_unblock_callback unblock;
1039 VALUE locking_mutex;
1040 struct rb_mutex_struct *keeping_mutexes;
1041
1042 struct rb_waiting_list *join_list;
1043
1044 union {
1045 struct {
1046 VALUE proc;
1047 VALUE args;
1048 int kw_splat;
1049 } proc;
1050 struct {
1051 VALUE (*func)(void *);
1052 void *arg;
1053 } func;
1054 } invoke_arg;
1055
1056 enum thread_invoke_type {
1057 thread_invoke_type_none = 0,
1058 thread_invoke_type_proc,
1059 thread_invoke_type_ractor_proc,
1060 thread_invoke_type_func
1061 } invoke_type;
1062
1063 /* statistics data for profiler */
1064 VALUE stat_insn_usage;
1065
1066 /* fiber */
1067 rb_fiber_t *root_fiber;
1068
1069 VALUE scheduler;
1070 unsigned int blocking;
1071
1072 /* misc */
1073 VALUE name;
1074
1075 struct rb_ext_config ext_config;
1076} rb_thread_t;
1077
1078static inline unsigned int
1079rb_th_serial(const rb_thread_t *th)
1080{
1081 return (unsigned int)th->serial;
1082}
1083
1084typedef enum {
1085 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1086 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1087 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1088 /* 0x03..0x06 is reserved */
1089 VM_DEFINECLASS_TYPE_MASK = 0x07
1090} rb_vm_defineclass_type_t;
1091
1092#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1093#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1094#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1095#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1096#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1097 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1098
1099/* iseq.c */
1100RUBY_SYMBOL_EXPORT_BEGIN
1101
1102/* node -> iseq */
1103rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1104rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1105rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1106rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1107rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1108 enum rb_iseq_type, const rb_compile_option_t*);
1109
1110struct iseq_link_anchor;
1112 VALUE flags;
1113 VALUE reserved;
1114 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1115 const void *data;
1116};
1117static inline struct rb_iseq_new_with_callback_callback_func *
1118rb_iseq_new_with_callback_new_callback(
1119 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1120{
1121 VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
1122 return (struct rb_iseq_new_with_callback_callback_func *)memo;
1123}
1124rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1125 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1126 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1127
1128VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1129int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1130attr_index_t rb_estimate_iv_count(VALUE klass, const rb_iseq_t * initialize_iseq);
1131
1132VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1133
1134RUBY_EXTERN VALUE rb_cISeq;
1135RUBY_EXTERN VALUE rb_cRubyVM;
1136RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1137RUBY_EXTERN VALUE rb_block_param_proxy;
1138RUBY_SYMBOL_EXPORT_END
1139
1140#define GetProcPtr(obj, ptr) \
1141 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1142
1143typedef struct {
1144 const struct rb_block block;
1145 unsigned int is_from_method: 1; /* bool */
1146 unsigned int is_lambda: 1; /* bool */
1147 unsigned int is_isolated: 1; /* bool */
1148} rb_proc_t;
1149
1150RUBY_SYMBOL_EXPORT_BEGIN
1151VALUE rb_proc_isolate(VALUE self);
1152VALUE rb_proc_isolate_bang(VALUE self);
1153VALUE rb_proc_ractor_make_shareable(VALUE self);
1154RUBY_SYMBOL_EXPORT_END
1155
1156typedef struct {
1157 VALUE flags; /* imemo header */
1158 rb_iseq_t *iseq;
1159 const VALUE *ep;
1160 const VALUE *env;
1161 unsigned int env_size;
1162} rb_env_t;
1163
1164extern const rb_data_type_t ruby_binding_data_type;
1165
1166#define GetBindingPtr(obj, ptr) \
1167 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1168
1169typedef struct {
1170 const struct rb_block block;
1171 const VALUE pathobj;
1172 int first_lineno;
1173} rb_binding_t;
1174
1175/* used by compile time and send insn */
1176
1177enum vm_check_match_type {
1178 VM_CHECKMATCH_TYPE_WHEN = 1,
1179 VM_CHECKMATCH_TYPE_CASE = 2,
1180 VM_CHECKMATCH_TYPE_RESCUE = 3
1181};
1182
1183#define VM_CHECKMATCH_TYPE_MASK 0x03
1184#define VM_CHECKMATCH_ARRAY 0x04
1185
1186enum vm_special_object_type {
1187 VM_SPECIAL_OBJECT_VMCORE = 1,
1188 VM_SPECIAL_OBJECT_CBASE,
1189 VM_SPECIAL_OBJECT_CONST_BASE
1190};
1191
1192enum vm_svar_index {
1193 VM_SVAR_LASTLINE = 0, /* $_ */
1194 VM_SVAR_BACKREF = 1, /* $~ */
1195
1196 VM_SVAR_EXTRA_START = 2,
1197 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1198};
1199
1200/* inline cache */
1201typedef struct iseq_inline_constant_cache *IC;
1202typedef struct iseq_inline_iv_cache_entry *IVC;
1203typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1204typedef union iseq_inline_storage_entry *ISE;
1205typedef const struct rb_callinfo *CALL_INFO;
1206typedef const struct rb_callcache *CALL_CACHE;
1207typedef struct rb_call_data *CALL_DATA;
1208
1209typedef VALUE CDHASH;
1210
1211#ifndef FUNC_FASTCALL
1212#define FUNC_FASTCALL(x) x
1213#endif
1214
1215typedef rb_control_frame_t *
1216 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1217
1218#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1219#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1220
1221#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1222#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1223#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1224
1225enum vm_frame_env_flags {
1226 /* Frame/Environment flag bits:
1227 * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1228 *
1229 * X : tag for GC marking (It seems as Fixnum)
1230 * EEE : 4 bits Env flags
1231 * FF..: 7 bits Frame flags
1232 * MM..: 15 bits frame magic (to check frame corruption)
1233 */
1234
1235 /* frame types */
1236 VM_FRAME_MAGIC_METHOD = 0x11110001,
1237 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1238 VM_FRAME_MAGIC_CLASS = 0x33330001,
1239 VM_FRAME_MAGIC_TOP = 0x44440001,
1240 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1241 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1242 VM_FRAME_MAGIC_EVAL = 0x77770001,
1243 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1244 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1245
1246 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1247
1248 /* frame flag */
1249 VM_FRAME_FLAG_FINISH = 0x0020,
1250 VM_FRAME_FLAG_BMETHOD = 0x0040,
1251 VM_FRAME_FLAG_CFRAME = 0x0080,
1252 VM_FRAME_FLAG_LAMBDA = 0x0100,
1253 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1254 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1255 VM_FRAME_FLAG_PASSED = 0x0800,
1256
1257 /* env flag */
1258 VM_ENV_FLAG_LOCAL = 0x0002,
1259 VM_ENV_FLAG_ESCAPED = 0x0004,
1260 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1261 VM_ENV_FLAG_ISOLATED = 0x0010,
1262};
1263
1264#define VM_ENV_DATA_SIZE ( 3)
1265
1266#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1267#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1268#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1269#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1270
1271#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1272
1273static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1274
1275static inline void
1276VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1277{
1278 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1279 VM_ASSERT(FIXNUM_P(flags));
1280 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1281}
1282
1283static inline void
1284VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1285{
1286 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1287 VM_ASSERT(FIXNUM_P(flags));
1288 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1289}
1290
1291static inline unsigned long
1292VM_ENV_FLAGS(const VALUE *ep, long flag)
1293{
1294 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1295 VM_ASSERT(FIXNUM_P(flags));
1296 return flags & flag;
1297}
1298
1299static inline unsigned long
1300VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1301{
1302 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1303}
1304
1305static inline int
1306VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1307{
1308 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1309}
1310
1311static inline int
1312VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1313{
1314 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1315}
1316
1317static inline int
1318VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1319{
1320 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1321}
1322
1323static inline int
1324VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1325{
1326 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1327}
1328
1329static inline int
1330rb_obj_is_iseq(VALUE iseq)
1331{
1332 return imemo_type_p(iseq, imemo_iseq);
1333}
1334
1335#if VM_CHECK_MODE > 0
1336#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1337#endif
1338
1339static inline int
1340VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1341{
1342 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1343 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1344 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1345 return cframe_p;
1346}
1347
1348static inline int
1349VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1350{
1351 return !VM_FRAME_CFRAME_P(cfp);
1352}
1353
1354#define RUBYVM_CFUNC_FRAME_P(cfp) \
1355 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1356
1357#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1358#define VM_BLOCK_HANDLER_NONE 0
1359
1360static inline int
1361VM_ENV_LOCAL_P(const VALUE *ep)
1362{
1363 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1364}
1365
1366static inline const VALUE *
1367VM_ENV_PREV_EP(const VALUE *ep)
1368{
1369 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1370 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1371}
1372
1373static inline VALUE
1374VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1375{
1376 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1377 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1378}
1379
1380#if VM_CHECK_MODE > 0
1381int rb_vm_ep_in_heap_p(const VALUE *ep);
1382#endif
1383
1384static inline int
1385VM_ENV_ESCAPED_P(const VALUE *ep)
1386{
1387 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1388 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1389}
1390
1391#if VM_CHECK_MODE > 0
1392static inline int
1393vm_assert_env(VALUE obj)
1394{
1395 VM_ASSERT(imemo_type_p(obj, imemo_env));
1396 return 1;
1397}
1398#endif
1399
1401static inline VALUE
1402VM_ENV_ENVVAL(const VALUE *ep)
1403{
1404 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1405 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1406 VM_ASSERT(vm_assert_env(envval));
1407 return envval;
1408}
1409
1411static inline const rb_env_t *
1412VM_ENV_ENVVAL_PTR(const VALUE *ep)
1413{
1414 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1415}
1416
1417static inline const rb_env_t *
1418vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1419{
1420 rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1421 env->env_size = env_size;
1422 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1423 return env;
1424}
1425
1426static inline void
1427VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1428{
1429 *((VALUE *)ptr) = v;
1430}
1431
1432static inline void
1433VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1434{
1435 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1436 VM_FORCE_WRITE(ptr, special_const_value);
1437}
1438
1439static inline void
1440VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1441{
1442 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1443 VM_FORCE_WRITE(&ep[index], v);
1444}
1445
1446const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1447const VALUE *rb_vm_proc_local_ep(VALUE proc);
1448void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1449void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1450
1451VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1452
1453#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1454#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1455
1456#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1457 ((void *)(ecfp) > (void *)(cfp))
1458
1459static inline const rb_control_frame_t *
1460RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1461{
1462 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1463}
1464
1465static inline int
1466RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1467{
1468 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1469}
1470
1471static inline int
1472VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1473{
1474 if ((block_handler & 0x03) == 0x01) {
1475#if VM_CHECK_MODE > 0
1476 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1477 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1478#endif
1479 return 1;
1480 }
1481 else {
1482 return 0;
1483 }
1484}
1485
1486static inline VALUE
1487VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1488{
1489 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1490 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1491 return block_handler;
1492}
1493
1494static inline const struct rb_captured_block *
1495VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1496{
1497 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1498 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1499 return captured;
1500}
1501
1502static inline int
1503VM_BH_IFUNC_P(VALUE block_handler)
1504{
1505 if ((block_handler & 0x03) == 0x03) {
1506#if VM_CHECK_MODE > 0
1507 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1508 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1509#endif
1510 return 1;
1511 }
1512 else {
1513 return 0;
1514 }
1515}
1516
1517static inline VALUE
1518VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1519{
1520 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1521 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1522 return block_handler;
1523}
1524
1525static inline const struct rb_captured_block *
1526VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1527{
1528 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1529 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1530 return captured;
1531}
1532
1533static inline const struct rb_captured_block *
1534VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1535{
1536 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1537 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1538 return captured;
1539}
1540
1541static inline enum rb_block_handler_type
1542vm_block_handler_type(VALUE block_handler)
1543{
1544 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1545 return block_handler_type_iseq;
1546 }
1547 else if (VM_BH_IFUNC_P(block_handler)) {
1548 return block_handler_type_ifunc;
1549 }
1550 else if (SYMBOL_P(block_handler)) {
1551 return block_handler_type_symbol;
1552 }
1553 else {
1554 VM_ASSERT(rb_obj_is_proc(block_handler));
1555 return block_handler_type_proc;
1556 }
1557}
1558
1559static inline void
1560vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1561{
1562 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1563 (vm_block_handler_type(block_handler), 1));
1564}
1565
1566static inline int
1567vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
1568{
1569 return ((VALUE) cfp->block_code) == block_handler;
1570}
1571
1572static inline enum rb_block_type
1573vm_block_type(const struct rb_block *block)
1574{
1575#if VM_CHECK_MODE > 0
1576 switch (block->type) {
1577 case block_type_iseq:
1578 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1579 break;
1580 case block_type_ifunc:
1581 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1582 break;
1583 case block_type_symbol:
1584 VM_ASSERT(SYMBOL_P(block->as.symbol));
1585 break;
1586 case block_type_proc:
1587 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1588 break;
1589 }
1590#endif
1591 return block->type;
1592}
1593
1594static inline void
1595vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1596{
1597 struct rb_block *mb = (struct rb_block *)block;
1598 mb->type = type;
1599}
1600
1601static inline const struct rb_block *
1602vm_proc_block(VALUE procval)
1603{
1604 VM_ASSERT(rb_obj_is_proc(procval));
1605 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1606}
1607
1608static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1609static inline const VALUE *vm_block_ep(const struct rb_block *block);
1610
1611static inline const rb_iseq_t *
1612vm_proc_iseq(VALUE procval)
1613{
1614 return vm_block_iseq(vm_proc_block(procval));
1615}
1616
1617static inline const VALUE *
1618vm_proc_ep(VALUE procval)
1619{
1620 return vm_block_ep(vm_proc_block(procval));
1621}
1622
1623static inline const rb_iseq_t *
1624vm_block_iseq(const struct rb_block *block)
1625{
1626 switch (vm_block_type(block)) {
1627 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1628 case block_type_proc: return vm_proc_iseq(block->as.proc);
1629 case block_type_ifunc:
1630 case block_type_symbol: return NULL;
1631 }
1632 VM_UNREACHABLE(vm_block_iseq);
1633 return NULL;
1634}
1635
1636static inline const VALUE *
1637vm_block_ep(const struct rb_block *block)
1638{
1639 switch (vm_block_type(block)) {
1640 case block_type_iseq:
1641 case block_type_ifunc: return block->as.captured.ep;
1642 case block_type_proc: return vm_proc_ep(block->as.proc);
1643 case block_type_symbol: return NULL;
1644 }
1645 VM_UNREACHABLE(vm_block_ep);
1646 return NULL;
1647}
1648
1649static inline VALUE
1650vm_block_self(const struct rb_block *block)
1651{
1652 switch (vm_block_type(block)) {
1653 case block_type_iseq:
1654 case block_type_ifunc:
1655 return block->as.captured.self;
1656 case block_type_proc:
1657 return vm_block_self(vm_proc_block(block->as.proc));
1658 case block_type_symbol:
1659 return Qundef;
1660 }
1661 VM_UNREACHABLE(vm_block_self);
1662 return Qundef;
1663}
1664
1665static inline VALUE
1666VM_BH_TO_SYMBOL(VALUE block_handler)
1667{
1668 VM_ASSERT(SYMBOL_P(block_handler));
1669 return block_handler;
1670}
1671
1672static inline VALUE
1673VM_BH_FROM_SYMBOL(VALUE symbol)
1674{
1675 VM_ASSERT(SYMBOL_P(symbol));
1676 return symbol;
1677}
1678
1679static inline VALUE
1680VM_BH_TO_PROC(VALUE block_handler)
1681{
1682 VM_ASSERT(rb_obj_is_proc(block_handler));
1683 return block_handler;
1684}
1685
1686static inline VALUE
1687VM_BH_FROM_PROC(VALUE procval)
1688{
1689 VM_ASSERT(rb_obj_is_proc(procval));
1690 return procval;
1691}
1692
1693/* VM related object allocate functions */
1694VALUE rb_thread_alloc(VALUE klass);
1695VALUE rb_binding_alloc(VALUE klass);
1696VALUE rb_proc_alloc(VALUE klass);
1697VALUE rb_proc_dup(VALUE self);
1698
1699/* for debug */
1700extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1701extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1702extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
1703#if OPT_STACK_CACHING
1704 , VALUE reg_a, VALUE reg_b
1705#endif
1706);
1707
1708#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1709#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1710void rb_vm_bugreport(const void *);
1711typedef void (*ruby_sighandler_t)(int);
1712RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1713NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1714
1715/* functions about thread/vm execution */
1716RUBY_SYMBOL_EXPORT_BEGIN
1717VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1718VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1719VALUE rb_iseq_path(const rb_iseq_t *iseq);
1720VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1721RUBY_SYMBOL_EXPORT_END
1722
1723VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1724void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1725
1726int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1727void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1728
1729VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1730
1731VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1732static inline VALUE
1733rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1734{
1735 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1736}
1737
1738static inline VALUE
1739rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1740{
1741 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1742}
1743
1744VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1745VALUE rb_vm_env_local_variables(const rb_env_t *env);
1746const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1747const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1748void rb_vm_inc_const_missing_count(void);
1749VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1750 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1751void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1752MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
1753
1754void rb_thread_start_timer_thread(void);
1755void rb_thread_stop_timer_thread(void);
1756void rb_thread_reset_timer_thread(void);
1757void rb_thread_wakeup_timer_thread(int);
1758
1759static inline void
1760rb_vm_living_threads_init(rb_vm_t *vm)
1761{
1762 ccan_list_head_init(&vm->waiting_fds);
1763 ccan_list_head_init(&vm->waiting_pids);
1764 ccan_list_head_init(&vm->workqueue);
1765 ccan_list_head_init(&vm->waiting_grps);
1766 ccan_list_head_init(&vm->ractor.set);
1767}
1768
1769typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1770rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1771rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1772int rb_vm_get_sourceline(const rb_control_frame_t *);
1773void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1774void ruby_thread_init_stack(rb_thread_t *th);
1775rb_thread_t * ruby_thread_from_native(void);
1776int ruby_thread_set_native(rb_thread_t *th);
1777int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1778void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1779MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1780
1781void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1782
1783#define rb_vm_register_special_exception(sp, e, m) \
1784 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1785
1786void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1787
1788void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1789
1790MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1791
1792#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1793
1794#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1795 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1796 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1797 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1798 if (UNLIKELY((cfp) <= &bound[1])) { \
1799 vm_stackoverflow(); \
1800 } \
1801} while (0)
1802
1803#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1804 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1805
1806VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1807
1808rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1809
1810/* for thread */
1811
1812#if RUBY_VM_THREAD_MODEL == 2
1813MJIT_SYMBOL_EXPORT_BEGIN
1814
1815RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1816RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1817RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1818RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1819RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1820
1821MJIT_SYMBOL_EXPORT_END
1822
1823#define GET_VM() rb_current_vm()
1824#define GET_RACTOR() rb_current_ractor()
1825#define GET_THREAD() rb_current_thread()
1826#define GET_EC() rb_current_execution_context(true)
1827
1828static inline rb_thread_t *
1829rb_ec_thread_ptr(const rb_execution_context_t *ec)
1830{
1831 return ec->thread_ptr;
1832}
1833
1834static inline rb_ractor_t *
1835rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1836{
1837 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1838 if (th) {
1839 VM_ASSERT(th->ractor != NULL);
1840 return th->ractor;
1841 }
1842 else {
1843 return NULL;
1844 }
1845}
1846
1847static inline rb_vm_t *
1848rb_ec_vm_ptr(const rb_execution_context_t *ec)
1849{
1850 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1851 if (th) {
1852 return th->vm;
1853 }
1854 else {
1855 return NULL;
1856 }
1857}
1858
1859static inline rb_execution_context_t *
1860rb_current_execution_context(bool expect_ec)
1861{
1862#ifdef RB_THREAD_LOCAL_SPECIFIER
1863 #ifdef __APPLE__
1864 rb_execution_context_t *ec = rb_current_ec();
1865 #else
1866 rb_execution_context_t *ec = ruby_current_ec;
1867 #endif
1868#else
1869 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1870#endif
1871 VM_ASSERT(!expect_ec || ec != NULL);
1872 return ec;
1873}
1874
1875static inline rb_thread_t *
1876rb_current_thread(void)
1877{
1878 const rb_execution_context_t *ec = GET_EC();
1879 return rb_ec_thread_ptr(ec);
1880}
1881
1882static inline rb_ractor_t *
1883rb_current_ractor(void)
1884{
1885 if (ruby_single_main_ractor) {
1886 return ruby_single_main_ractor;
1887 }
1888 else {
1889 const rb_execution_context_t *ec = GET_EC();
1890 return rb_ec_ractor_ptr(ec);
1891 }
1892}
1893
1894static inline rb_vm_t *
1895rb_current_vm(void)
1896{
1897#if 0 // TODO: reconsider the assertions
1898 VM_ASSERT(ruby_current_vm_ptr == NULL ||
1899 ruby_current_execution_context_ptr == NULL ||
1900 rb_ec_thread_ptr(GET_EC()) == NULL ||
1901 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1902 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1903#endif
1904
1905 return ruby_current_vm_ptr;
1906}
1907
1908void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
1909 unsigned int recorded_lock_rec,
1910 unsigned int current_lock_rec);
1911
1912static inline unsigned int
1913rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
1914{
1915 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1916
1917 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
1918 return 0;
1919 }
1920 else {
1921 return vm->ractor.sync.lock_rec;
1922 }
1923}
1924
1925#else
1926#error "unsupported thread model"
1927#endif
1928
1929enum {
1930 TIMER_INTERRUPT_MASK = 0x01,
1931 PENDING_INTERRUPT_MASK = 0x02,
1932 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1933 TRAP_INTERRUPT_MASK = 0x08,
1934 TERMINATE_INTERRUPT_MASK = 0x10,
1935 VM_BARRIER_INTERRUPT_MASK = 0x20,
1936};
1937
1938#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1939#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1940#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1941#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1942#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
1943#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
1944#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1945 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1946
1947static inline bool
1948RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
1949{
1950#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1951 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
1952
1953 if (current_clock != ec->checked_clock) {
1954 ec->checked_clock = current_clock;
1955 RUBY_VM_SET_TIMER_INTERRUPT(ec);
1956 }
1957#endif
1958 return ec->interrupt_flag & ~(ec)->interrupt_mask;
1959}
1960
1961VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1962int rb_signal_buff_size(void);
1963int rb_signal_exec(rb_thread_t *th, int sig);
1964void rb_threadptr_check_signal(rb_thread_t *mth);
1965void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1966void rb_threadptr_signal_exit(rb_thread_t *th);
1967int rb_threadptr_execute_interrupts(rb_thread_t *, int);
1968void rb_threadptr_interrupt(rb_thread_t *th);
1969void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
1970void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
1971void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
1972VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
1973void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1974void rb_execution_context_update(rb_execution_context_t *ec);
1975void rb_execution_context_mark(const rb_execution_context_t *ec);
1976void rb_fiber_close(rb_fiber_t *fib);
1977void Init_native_thread(rb_thread_t *th);
1978int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
1979
1980// vm_sync.h
1981void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
1982void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
1983
1984#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1985static inline void
1986rb_vm_check_ints(rb_execution_context_t *ec)
1987{
1988#ifdef RUBY_ASSERT_CRITICAL_SECTION
1989 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1990#endif
1991
1992 VM_ASSERT(ec == GET_EC());
1993
1994 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1995 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1996 }
1997}
1998
1999/* tracer */
2000
2002 rb_event_flag_t event;
2004 const rb_control_frame_t *cfp;
2005 VALUE self;
2006 ID id;
2007 ID called_id;
2008 VALUE klass;
2009 VALUE data;
2010
2011 int klass_solved;
2012
2013 /* calc from cfp */
2014 int lineno;
2015 VALUE path;
2016};
2017
2018void rb_hook_list_mark(rb_hook_list_t *hooks);
2019void rb_hook_list_free(rb_hook_list_t *hooks);
2020void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2021void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2022
2023void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2024
2025#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2026 const rb_event_flag_t flag_arg_ = (flag_); \
2027 rb_hook_list_t *hooks_arg_ = (hooks_); \
2028 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2029 /* defer evaluating the other arguments */ \
2030 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2031 } \
2032} while (0)
2033
2034static inline void
2035rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2036 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2037{
2038 struct rb_trace_arg_struct trace_arg;
2039
2040 VM_ASSERT((hooks->events & flag) != 0);
2041
2042 trace_arg.event = flag;
2043 trace_arg.ec = ec;
2044 trace_arg.cfp = ec->cfp;
2045 trace_arg.self = self;
2046 trace_arg.id = id;
2047 trace_arg.called_id = called_id;
2048 trace_arg.klass = klass;
2049 trace_arg.data = data;
2050 trace_arg.path = Qundef;
2051 trace_arg.klass_solved = 0;
2052
2053 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2054}
2055
2057 VALUE self;
2058 uint32_t id;
2059 rb_hook_list_t hooks;
2060};
2061
2062static inline rb_hook_list_t *
2063rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2064{
2065 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2066 return &cr_pub->hooks;
2067}
2068
2069#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2070 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2071
2072#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2073 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2074
2075static inline void
2076rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2077{
2078 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2079 NIL_P(eval_script) ? (VALUE)iseq :
2080 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2081}
2082
2083void rb_vm_trap_exit(rb_vm_t *vm);
2084
2085RUBY_SYMBOL_EXPORT_BEGIN
2086
2087int rb_thread_check_trap_pending(void);
2088
2089/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2090#define RUBY_EVENT_COVERAGE_LINE 0x010000
2091#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2092
2093extern VALUE rb_get_coverages(void);
2094extern void rb_set_coverages(VALUE, int, VALUE);
2095extern void rb_clear_coverages(void);
2096extern void rb_reset_coverages(void);
2097extern void rb_resume_coverages(void);
2098extern void rb_suspend_coverages(void);
2099
2100void rb_postponed_job_flush(rb_vm_t *vm);
2101
2102// ractor.c
2103RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2104RUBY_EXTERN VALUE rb_eRactorIsolationError;
2105
2106RUBY_SYMBOL_EXPORT_END
2107
2108#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:47
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:56
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1058
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:175
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:247
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:273
Definition vm_core.h:281
Definition vm_core.h:276
Definition iseq.h:234
Definition method.h:62
CREF (Class REFerence)
Definition method.h:44
Definition class.h:32
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:190
Definition vm_core.h:886
struct rb_iseq_constant_body::@132 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
Definition vm_core.h:241
Definition vm_core.h:285
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375