Ruby 3.2.3p157 (2024-01-18 revision 52bb2ac0a6971d0391efa2275f7a66bff319087c)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#include <signal.h>
23
24#define sighandler_t ruby_sighandler_t
25
26#ifndef _WIN32
27#include <unistd.h>
28#include <sys/mman.h>
29#endif
30
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
34#else
35# include <setjmp.h>
36#endif
37#include <stdarg.h>
38#include <stdio.h>
39
40/* MALLOC_HEADERS_BEGIN */
41#ifndef HAVE_MALLOC_USABLE_SIZE
42# ifdef _WIN32
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
48# endif
49#endif
50
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
53/* Alternative malloc header is included in ruby/missing.h */
54# elif defined(HAVE_MALLOC_H)
55# include <malloc.h>
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
60# endif
61#endif
62
63#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
64/* LIST_HEAD conflicts with sys/queue.h on macOS */
65# include <sys/user.h>
66#endif
67/* MALLOC_HEADERS_END */
68
69#ifdef HAVE_SYS_TIME_H
70# include <sys/time.h>
71#endif
72
73#ifdef HAVE_SYS_RESOURCE_H
74# include <sys/resource.h>
75#endif
76
77#if defined _WIN32 || defined __CYGWIN__
78# include <windows.h>
79#elif defined(HAVE_POSIX_MEMALIGN)
80#elif defined(HAVE_MEMALIGN)
81# include <malloc.h>
82#endif
83
84#include <sys/types.h>
85
86#ifdef __EMSCRIPTEN__
87#include <emscripten.h>
88#endif
89
90#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
91# include <mach/task.h>
92# include <mach/mach_init.h>
93# include <mach/mach_port.h>
94#endif
95#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
96
97#include "constant.h"
98#include "debug_counter.h"
99#include "eval_intern.h"
100#include "gc.h"
101#include "id_table.h"
102#include "internal.h"
103#include "internal/class.h"
104#include "internal/complex.h"
105#include "internal/cont.h"
106#include "internal/error.h"
107#include "internal/eval.h"
108#include "internal/gc.h"
109#include "internal/hash.h"
110#include "internal/imemo.h"
111#include "internal/io.h"
112#include "internal/numeric.h"
113#include "internal/object.h"
114#include "internal/proc.h"
115#include "internal/rational.h"
116#include "internal/sanitizers.h"
117#include "internal/struct.h"
118#include "internal/symbol.h"
119#include "internal/thread.h"
120#include "internal/variable.h"
121#include "internal/warnings.h"
122#include "mjit.h"
123#include "probes.h"
124#include "regint.h"
125#include "ruby/debug.h"
126#include "ruby/io.h"
127#include "ruby/re.h"
128#include "ruby/st.h"
129#include "ruby/thread.h"
130#include "ruby/util.h"
131#include "ruby_assert.h"
132#include "ruby_atomic.h"
133#include "symbol.h"
134#include "transient_heap.h"
135#include "vm_core.h"
136#include "vm_sync.h"
137#include "vm_callinfo.h"
138#include "ractor_core.h"
139
140#include "builtin.h"
141#include "shape.h"
142
143#define rb_setjmp(env) RUBY_SETJMP(env)
144#define rb_jmp_buf rb_jmpbuf_t
145#undef rb_data_object_wrap
146
147#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
148#define MAP_ANONYMOUS MAP_ANON
149#endif
150
151static inline struct rbimpl_size_mul_overflow_tag
152size_add_overflow(size_t x, size_t y)
153{
154 size_t z;
155 bool p;
156#if 0
157
158#elif __has_builtin(__builtin_add_overflow)
159 p = __builtin_add_overflow(x, y, &z);
160
161#elif defined(DSIZE_T)
162 RB_GNUC_EXTENSION DSIZE_T dx = x;
163 RB_GNUC_EXTENSION DSIZE_T dy = y;
164 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
165 p = dz > SIZE_MAX;
166 z = (size_t)dz;
167
168#else
169 z = x + y;
170 p = z < y;
171
172#endif
173 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
174}
175
176static inline struct rbimpl_size_mul_overflow_tag
177size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
178{
179 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
180 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
181 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
182}
183
184static inline struct rbimpl_size_mul_overflow_tag
185size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
186{
187 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
189 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
190 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
191}
192
193PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
194
195static inline size_t
196size_mul_or_raise(size_t x, size_t y, VALUE exc)
197{
198 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
199 if (LIKELY(!t.left)) {
200 return t.right;
201 }
202 else if (rb_during_gc()) {
203 rb_memerror(); /* or...? */
204 }
205 else {
206 gc_raise(
207 exc,
208 "integer overflow: %"PRIuSIZE
209 " * %"PRIuSIZE
210 " > %"PRIuSIZE,
211 x, y, (size_t)SIZE_MAX);
212 }
213}
214
215size_t
216rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
217{
218 return size_mul_or_raise(x, y, exc);
219}
220
221static inline size_t
222size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
223{
224 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
225 if (LIKELY(!t.left)) {
226 return t.right;
227 }
228 else if (rb_during_gc()) {
229 rb_memerror(); /* or...? */
230 }
231 else {
232 gc_raise(
233 exc,
234 "integer overflow: %"PRIuSIZE
235 " * %"PRIuSIZE
236 " + %"PRIuSIZE
237 " > %"PRIuSIZE,
238 x, y, z, (size_t)SIZE_MAX);
239 }
240}
241
242size_t
243rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
244{
245 return size_mul_add_or_raise(x, y, z, exc);
246}
247
248static inline size_t
249size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
250{
251 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
252 if (LIKELY(!t.left)) {
253 return t.right;
254 }
255 else if (rb_during_gc()) {
256 rb_memerror(); /* or...? */
257 }
258 else {
259 gc_raise(
260 exc,
261 "integer overflow: %"PRIdSIZE
262 " * %"PRIdSIZE
263 " + %"PRIdSIZE
264 " * %"PRIdSIZE
265 " > %"PRIdSIZE,
266 x, y, z, w, (size_t)SIZE_MAX);
267 }
268}
269
270#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
271/* trick the compiler into thinking a external signal handler uses this */
272volatile VALUE rb_gc_guarded_val;
273volatile VALUE *
274rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
275{
276 rb_gc_guarded_val = val;
277
278 return ptr;
279}
280#endif
281
282#ifndef GC_HEAP_INIT_SLOTS
283#define GC_HEAP_INIT_SLOTS 10000
284#endif
285#ifndef GC_HEAP_FREE_SLOTS
286#define GC_HEAP_FREE_SLOTS 4096
287#endif
288#ifndef GC_HEAP_GROWTH_FACTOR
289#define GC_HEAP_GROWTH_FACTOR 1.8
290#endif
291#ifndef GC_HEAP_GROWTH_MAX_SLOTS
292#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
293#endif
294#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
295#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
296#endif
297
298#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
299#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
300#endif
301#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
302#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
303#endif
304#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
305#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
306#endif
307
308#ifndef GC_MALLOC_LIMIT_MIN
309#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
310#endif
311#ifndef GC_MALLOC_LIMIT_MAX
312#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
313#endif
314#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
315#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
316#endif
317
318#ifndef GC_OLDMALLOC_LIMIT_MIN
319#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
320#endif
321#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
322#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
323#endif
324#ifndef GC_OLDMALLOC_LIMIT_MAX
325#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
326#endif
327
328#ifndef PRINT_MEASURE_LINE
329#define PRINT_MEASURE_LINE 0
330#endif
331#ifndef PRINT_ENTER_EXIT_TICK
332#define PRINT_ENTER_EXIT_TICK 0
333#endif
334#ifndef PRINT_ROOT_TICKS
335#define PRINT_ROOT_TICKS 0
336#endif
337
338#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
339#define TICK_TYPE 1
340
341typedef struct {
342 size_t heap_init_slots;
343 size_t heap_free_slots;
344 double growth_factor;
345 size_t growth_max_slots;
346
347 double heap_free_slots_min_ratio;
348 double heap_free_slots_goal_ratio;
349 double heap_free_slots_max_ratio;
350 double oldobject_limit_factor;
351
352 size_t malloc_limit_min;
353 size_t malloc_limit_max;
354 double malloc_limit_growth_factor;
355
356 size_t oldmalloc_limit_min;
357 size_t oldmalloc_limit_max;
358 double oldmalloc_limit_growth_factor;
359
360 VALUE gc_stress;
362
363static ruby_gc_params_t gc_params = {
364 GC_HEAP_INIT_SLOTS,
365 GC_HEAP_FREE_SLOTS,
366 GC_HEAP_GROWTH_FACTOR,
367 GC_HEAP_GROWTH_MAX_SLOTS,
368
369 GC_HEAP_FREE_SLOTS_MIN_RATIO,
370 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
371 GC_HEAP_FREE_SLOTS_MAX_RATIO,
372 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
373
374 GC_MALLOC_LIMIT_MIN,
375 GC_MALLOC_LIMIT_MAX,
376 GC_MALLOC_LIMIT_GROWTH_FACTOR,
377
378 GC_OLDMALLOC_LIMIT_MIN,
379 GC_OLDMALLOC_LIMIT_MAX,
380 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
381
382 FALSE,
383};
384
385/* GC_DEBUG:
386 * enable to embed GC debugging information.
387 */
388#ifndef GC_DEBUG
389#define GC_DEBUG 0
390#endif
391
392/* RGENGC_DEBUG:
393 * 1: basic information
394 * 2: remember set operation
395 * 3: mark
396 * 4:
397 * 5: sweep
398 */
399#ifndef RGENGC_DEBUG
400#ifdef RUBY_DEVEL
401#define RGENGC_DEBUG -1
402#else
403#define RGENGC_DEBUG 0
404#endif
405#endif
406#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
407# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
408#elif defined(HAVE_VA_ARGS_MACRO)
409# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
410#else
411# define RGENGC_DEBUG_ENABLED(level) 0
412#endif
413int ruby_rgengc_debug;
414
415/* RGENGC_CHECK_MODE
416 * 0: disable all assertions
417 * 1: enable assertions (to debug RGenGC)
418 * 2: enable internal consistency check at each GC (for debugging)
419 * 3: enable internal consistency check at each GC steps (for debugging)
420 * 4: enable liveness check
421 * 5: show all references
422 */
423#ifndef RGENGC_CHECK_MODE
424#define RGENGC_CHECK_MODE 0
425#endif
426
427// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
428#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
429
430/* RGENGC_OLD_NEWOBJ_CHECK
431 * 0: disable all assertions
432 * >0: make a OLD object when new object creation.
433 *
434 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
435 */
436#ifndef RGENGC_OLD_NEWOBJ_CHECK
437#define RGENGC_OLD_NEWOBJ_CHECK 0
438#endif
439
440/* RGENGC_PROFILE
441 * 0: disable RGenGC profiling
442 * 1: enable profiling for basic information
443 * 2: enable profiling for each types
444 */
445#ifndef RGENGC_PROFILE
446#define RGENGC_PROFILE 0
447#endif
448
449/* RGENGC_ESTIMATE_OLDMALLOC
450 * Enable/disable to estimate increase size of malloc'ed size by old objects.
451 * If estimation exceeds threshold, then will invoke full GC.
452 * 0: disable estimation.
453 * 1: enable estimation.
454 */
455#ifndef RGENGC_ESTIMATE_OLDMALLOC
456#define RGENGC_ESTIMATE_OLDMALLOC 1
457#endif
458
459/* RGENGC_FORCE_MAJOR_GC
460 * Force major/full GC if this macro is not 0.
461 */
462#ifndef RGENGC_FORCE_MAJOR_GC
463#define RGENGC_FORCE_MAJOR_GC 0
464#endif
465
466#ifndef GC_PROFILE_MORE_DETAIL
467#define GC_PROFILE_MORE_DETAIL 0
468#endif
469#ifndef GC_PROFILE_DETAIL_MEMORY
470#define GC_PROFILE_DETAIL_MEMORY 0
471#endif
472#ifndef GC_ENABLE_INCREMENTAL_MARK
473#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
474#endif
475#ifndef GC_ENABLE_LAZY_SWEEP
476#define GC_ENABLE_LAZY_SWEEP 1
477#endif
478#ifndef CALC_EXACT_MALLOC_SIZE
479#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
480#endif
481#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
482#ifndef MALLOC_ALLOCATED_SIZE
483#define MALLOC_ALLOCATED_SIZE 0
484#endif
485#else
486#define MALLOC_ALLOCATED_SIZE 0
487#endif
488#ifndef MALLOC_ALLOCATED_SIZE_CHECK
489#define MALLOC_ALLOCATED_SIZE_CHECK 0
490#endif
491
492#ifndef GC_DEBUG_STRESS_TO_CLASS
493#define GC_DEBUG_STRESS_TO_CLASS 0
494#endif
495
496#ifndef RGENGC_OBJ_INFO
497#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
498#endif
499
500typedef enum {
501 GPR_FLAG_NONE = 0x000,
502 /* major reason */
503 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
504 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
505 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
506 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
507#if RGENGC_ESTIMATE_OLDMALLOC
508 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
509#endif
510 GPR_FLAG_MAJOR_MASK = 0x0ff,
511
512 /* gc reason */
513 GPR_FLAG_NEWOBJ = 0x100,
514 GPR_FLAG_MALLOC = 0x200,
515 GPR_FLAG_METHOD = 0x400,
516 GPR_FLAG_CAPI = 0x800,
517 GPR_FLAG_STRESS = 0x1000,
518
519 /* others */
520 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
521 GPR_FLAG_HAVE_FINALIZE = 0x4000,
522 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
523 GPR_FLAG_FULL_MARK = 0x10000,
524 GPR_FLAG_COMPACT = 0x20000,
525
526 GPR_DEFAULT_REASON =
527 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
528 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
529} gc_profile_record_flag;
530
531typedef struct gc_profile_record {
532 unsigned int flags;
533
534 double gc_time;
535 double gc_invoke_time;
536
537 size_t heap_total_objects;
538 size_t heap_use_size;
539 size_t heap_total_size;
540 size_t moved_objects;
541
542#if GC_PROFILE_MORE_DETAIL
543 double gc_mark_time;
544 double gc_sweep_time;
545
546 size_t heap_use_pages;
547 size_t heap_live_objects;
548 size_t heap_free_objects;
549
550 size_t allocate_increase;
551 size_t allocate_limit;
552
553 double prepare_time;
554 size_t removing_objects;
555 size_t empty_objects;
556#if GC_PROFILE_DETAIL_MEMORY
557 long maxrss;
558 long minflt;
559 long majflt;
560#endif
561#endif
562#if MALLOC_ALLOCATED_SIZE
563 size_t allocated_size;
564#endif
565
566#if RGENGC_PROFILE > 0
567 size_t old_objects;
568 size_t remembered_normal_objects;
569 size_t remembered_shady_objects;
570#endif
572
573struct RMoved {
574 VALUE flags;
575 VALUE dummy;
576 VALUE destination;
577 shape_id_t original_shape_id;
578};
579
580#define RMOVED(obj) ((struct RMoved *)(obj))
581
582typedef struct RVALUE {
583 union {
584 struct {
585 VALUE flags; /* always 0 for freed obj */
586 struct RVALUE *next;
587 } free;
588 struct RMoved moved;
589 struct RBasic basic;
590 struct RObject object;
591 struct RClass klass;
592 struct RFloat flonum;
593 struct RString string;
594 struct RArray array;
595 struct RRegexp regexp;
596 struct RHash hash;
597 struct RData data;
598 struct RTypedData typeddata;
599 struct RStruct rstruct;
600 struct RBignum bignum;
601 struct RFile file;
602 struct RMatch match;
603 struct RRational rational;
604 struct RComplex complex;
605 struct RSymbol symbol;
606 union {
607 rb_cref_t cref;
608 struct vm_svar svar;
609 struct vm_throw_data throw_data;
610 struct vm_ifunc ifunc;
611 struct MEMO memo;
612 struct rb_method_entry_struct ment;
613 const rb_iseq_t iseq;
614 rb_env_t env;
615 struct rb_imemo_tmpbuf_struct alloc;
616 rb_ast_t ast;
617 } imemo;
618 struct {
619 struct RBasic basic;
620 VALUE v1;
621 VALUE v2;
622 VALUE v3;
623 } values;
624 } as;
625
626 /* Start of RVALUE_OVERHEAD.
627 * Do not directly read these members from the RVALUE as they're located
628 * at the end of the slot (which may differ in size depending on the size
629 * pool). */
630#if RACTOR_CHECK_MODE
631 uint32_t _ractor_belonging_id;
632#endif
633#if GC_DEBUG
634 const char *file;
635 int line;
636#endif
637} RVALUE;
638
639#if RACTOR_CHECK_MODE
640# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
641#elif GC_DEBUG
642# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
643#else
644# define RVALUE_OVERHEAD 0
645#endif
646
647STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5) + RVALUE_OVERHEAD);
648STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
649
650typedef uintptr_t bits_t;
651enum {
652 BITS_SIZE = sizeof(bits_t),
653 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
654};
655#define popcount_bits rb_popcount_intptr
656
658 struct heap_page *page;
659};
660
662 struct heap_page_header header;
663 /* char gap[]; */
664 /* RVALUE values[]; */
665};
666
667struct gc_list {
668 VALUE *varptr;
669 struct gc_list *next;
670};
671
672#define STACK_CHUNK_SIZE 500
673
674typedef struct stack_chunk {
675 VALUE data[STACK_CHUNK_SIZE];
676 struct stack_chunk *next;
678
679typedef struct mark_stack {
680 stack_chunk_t *chunk;
681 stack_chunk_t *cache;
682 int index;
683 int limit;
684 size_t cache_size;
685 size_t unused_cache_size;
687
688#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
689#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
690
691typedef struct rb_heap_struct {
692 struct heap_page *free_pages;
693 struct ccan_list_head pages;
694 struct heap_page *sweeping_page; /* iterator for .pages */
695 struct heap_page *compact_cursor;
696 uintptr_t compact_cursor_index;
697#if GC_ENABLE_INCREMENTAL_MARK
698 struct heap_page *pooled_pages;
699#endif
700 size_t total_pages; /* total page count in a heap */
701 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
702} rb_heap_t;
703
704typedef struct rb_size_pool_struct {
705 short slot_size;
706
707 size_t allocatable_pages;
708
709 /* Basic statistics */
710 size_t total_allocated_pages;
711 size_t total_freed_pages;
712 size_t force_major_gc_count;
713
714#if USE_RVARGC
715 /* Sweeping statistics */
716 size_t freed_slots;
717 size_t empty_slots;
718#endif
719
720 rb_heap_t eden_heap;
721 rb_heap_t tomb_heap;
723
724enum gc_mode {
725 gc_mode_none,
726 gc_mode_marking,
727 gc_mode_sweeping,
728 gc_mode_compacting,
729};
730
731typedef struct rb_objspace {
732 struct {
733 size_t limit;
734 size_t increase;
735#if MALLOC_ALLOCATED_SIZE
736 size_t allocated_size;
737 size_t allocations;
738#endif
739
740 } malloc_params;
741
742 struct {
743 unsigned int mode : 2;
744 unsigned int immediate_sweep : 1;
745 unsigned int dont_gc : 1;
746 unsigned int dont_incremental : 1;
747 unsigned int during_gc : 1;
748 unsigned int during_compacting : 1;
749 unsigned int gc_stressful: 1;
750 unsigned int has_hook: 1;
751 unsigned int during_minor_gc : 1;
752#if GC_ENABLE_INCREMENTAL_MARK
753 unsigned int during_incremental_marking : 1;
754#endif
755 unsigned int measure_gc : 1;
756 } flags;
757
758 rb_event_flag_t hook_events;
759 size_t total_allocated_objects;
760 VALUE next_object_id;
761
762 rb_size_pool_t size_pools[SIZE_POOL_COUNT];
763
764 struct {
765 rb_atomic_t finalizing;
766 } atomic_flags;
767
769 size_t marked_slots;
770
771 struct {
772 struct heap_page **sorted;
773 size_t allocated_pages;
774 size_t allocatable_pages;
775 size_t sorted_length;
776 uintptr_t range[2];
777 size_t freeable_pages;
778
779 /* final */
780 size_t final_slots;
781 VALUE deferred_final;
782 } heap_pages;
783
784 st_table *finalizer_table;
785
786 struct {
787 int run;
788 unsigned int latest_gc_info;
789 gc_profile_record *records;
790 gc_profile_record *current_record;
791 size_t next_index;
792 size_t size;
793
794#if GC_PROFILE_MORE_DETAIL
795 double prepare_time;
796#endif
797 double invoke_time;
798
799 size_t minor_gc_count;
800 size_t major_gc_count;
801 size_t compact_count;
802 size_t read_barrier_faults;
803#if RGENGC_PROFILE > 0
804 size_t total_generated_normal_object_count;
805 size_t total_generated_shady_object_count;
806 size_t total_shade_operation_count;
807 size_t total_promoted_count;
808 size_t total_remembered_normal_object_count;
809 size_t total_remembered_shady_object_count;
810
811#if RGENGC_PROFILE >= 2
812 size_t generated_normal_object_count_types[RUBY_T_MASK];
813 size_t generated_shady_object_count_types[RUBY_T_MASK];
814 size_t shade_operation_count_types[RUBY_T_MASK];
815 size_t promoted_types[RUBY_T_MASK];
816 size_t remembered_normal_object_count_types[RUBY_T_MASK];
817 size_t remembered_shady_object_count_types[RUBY_T_MASK];
818#endif
819#endif /* RGENGC_PROFILE */
820
821 /* temporary profiling space */
822 double gc_sweep_start_time;
823 size_t total_allocated_objects_at_gc_start;
824 size_t heap_used_at_gc_start;
825
826 /* basic statistics */
827 size_t count;
828 size_t total_freed_objects;
829 uint64_t total_time_ns;
830 struct timespec start_time;
831 } profile;
832 struct gc_list *global_list;
833
834 VALUE gc_stress_mode;
835
836 struct {
837 VALUE parent_object;
838 int need_major_gc;
839 size_t last_major_gc;
840 size_t uncollectible_wb_unprotected_objects;
841 size_t uncollectible_wb_unprotected_objects_limit;
842 size_t old_objects;
843 size_t old_objects_limit;
844
845#if RGENGC_ESTIMATE_OLDMALLOC
846 size_t oldmalloc_increase;
847 size_t oldmalloc_increase_limit;
848#endif
849
850#if RGENGC_CHECK_MODE >= 2
851 struct st_table *allrefs_table;
852 size_t error_count;
853#endif
854 } rgengc;
855
856 struct {
857 size_t considered_count_table[T_MASK];
858 size_t moved_count_table[T_MASK];
859 size_t moved_up_count_table[T_MASK];
860 size_t moved_down_count_table[T_MASK];
861 size_t total_moved;
862 } rcompactor;
863
864#if GC_ENABLE_INCREMENTAL_MARK
865 struct {
866 size_t pooled_slots;
867 size_t step_slots;
868 } rincgc;
869#endif
870
871 st_table *id_to_obj_tbl;
872 st_table *obj_to_id_tbl;
873
874#if GC_DEBUG_STRESS_TO_CLASS
875 VALUE stress_to_class;
876#endif
878
879
880#ifndef HEAP_PAGE_ALIGN_LOG
881/* default tiny heap size: 64KiB */
882#define HEAP_PAGE_ALIGN_LOG 16
883#endif
884
885#define BASE_SLOT_SIZE sizeof(RVALUE)
886
887#define CEILDIV(i, mod) roomof(i, mod)
888enum {
889 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
890 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
891 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
892 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE),
893 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
894 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
895};
896#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
897#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
898
899#if GC_ENABLE_INCREMENTAL_MARK && !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
900# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
901#endif
902
903#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
904/* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
905 * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
906
907#ifndef HAVE_MMAP
908/* We can't use mmap of course, if it is not available. */
909static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
910
911#elif defined(__wasm__)
912/* wasmtime does not have proper support for mmap.
913 * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
914 */
915static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
916
917#elif HAVE_CONST_PAGE_SIZE
918/* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
919static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
920
921#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
922/* If we can use the maximum page size. */
923static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
924
925#elif defined(PAGE_SIZE)
926/* If the PAGE_SIZE macro can be used dynamically. */
927# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
928
929#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
930/* If we can use sysconf to determine the page size. */
931# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
932
933#else
934/* Otherwise we can't determine the system page size, so don't use mmap. */
935static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
936#endif
937
938#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
939/* We can determine the system page size at runtime. */
940# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
941
942static bool heap_page_alloc_use_mmap;
943#endif
944
945struct heap_page {
946 short slot_size;
947 short total_slots;
948 short free_slots;
949 short final_slots;
950 struct {
951 unsigned int before_sweep : 1;
952 unsigned int has_remembered_objects : 1;
953 unsigned int has_uncollectible_shady_objects : 1;
954 unsigned int in_tomb : 1;
955 } flags;
956
957 rb_size_pool_t *size_pool;
958
959 struct heap_page *free_next;
960 uintptr_t start;
961 RVALUE *freelist;
962 struct ccan_list_node page_node;
963
964 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
965 /* the following three bitmaps are cleared at the beginning of full GC */
966 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
967 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
968 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
969
970 /* If set, the object is not movable */
971 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
972};
973
974/*
975 * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
976 */
977static void
978asan_lock_freelist(struct heap_page *page)
979{
980 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
981}
982
983/*
984 * When asan is enabled, this will enable the ability to write to the freelist
985 */
986static void
987asan_unlock_freelist(struct heap_page *page)
988{
989 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
990}
991
992#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
993#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
994#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
995
996#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
997#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
998#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
999#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1000
1001/* Bitmap Operations */
1002#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1003#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1004#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1005
1006/* getting bitmap */
1007#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1008#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1009#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1010#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1011#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1012
1013#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1014
1015/* Aliases */
1016#define rb_objspace (*rb_objspace_of(GET_VM()))
1017#define rb_objspace_of(vm) ((vm)->objspace)
1018
1019#define ruby_initial_gc_stress gc_params.gc_stress
1020
1021VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1022
1023#define malloc_limit objspace->malloc_params.limit
1024#define malloc_increase objspace->malloc_params.increase
1025#define malloc_allocated_size objspace->malloc_params.allocated_size
1026#define heap_pages_sorted objspace->heap_pages.sorted
1027#define heap_allocated_pages objspace->heap_pages.allocated_pages
1028#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1029#define heap_pages_lomem objspace->heap_pages.range[0]
1030#define heap_pages_himem objspace->heap_pages.range[1]
1031#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1032#define heap_pages_final_slots objspace->heap_pages.final_slots
1033#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1034#define size_pools objspace->size_pools
1035#define during_gc objspace->flags.during_gc
1036#define finalizing objspace->atomic_flags.finalizing
1037#define finalizer_table objspace->finalizer_table
1038#define global_list objspace->global_list
1039#define ruby_gc_stressful objspace->flags.gc_stressful
1040#define ruby_gc_stress_mode objspace->gc_stress_mode
1041#if GC_DEBUG_STRESS_TO_CLASS
1042#define stress_to_class objspace->stress_to_class
1043#else
1044#define stress_to_class 0
1045#endif
1046
1047#if 0
1048#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1049#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1050#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1051#define dont_gc_val() (objspace->flags.dont_gc)
1052#else
1053#define dont_gc_on() (objspace->flags.dont_gc = 1)
1054#define dont_gc_off() (objspace->flags.dont_gc = 0)
1055#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1056#define dont_gc_val() (objspace->flags.dont_gc)
1057#endif
1058
1059static inline enum gc_mode
1060gc_mode_verify(enum gc_mode mode)
1061{
1062#if RGENGC_CHECK_MODE > 0
1063 switch (mode) {
1064 case gc_mode_none:
1065 case gc_mode_marking:
1066 case gc_mode_sweeping:
1067 case gc_mode_compacting:
1068 break;
1069 default:
1070 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
1071 }
1072#endif
1073 return mode;
1074}
1075
1076static inline bool
1077has_sweeping_pages(rb_objspace_t *objspace)
1078{
1079 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1080 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1081 return TRUE;
1082 }
1083 }
1084 return FALSE;
1085}
1086
1087static inline size_t
1088heap_eden_total_pages(rb_objspace_t *objspace)
1089{
1090 size_t count = 0;
1091 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1092 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1093 }
1094 return count;
1095}
1096
1097static inline size_t
1098heap_eden_total_slots(rb_objspace_t *objspace)
1099{
1100 size_t count = 0;
1101 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1102 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1103 }
1104 return count;
1105}
1106
1107static inline size_t
1108heap_tomb_total_pages(rb_objspace_t *objspace)
1109{
1110 size_t count = 0;
1111 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1112 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1113 }
1114 return count;
1115}
1116
1117static inline size_t
1118heap_allocatable_pages(rb_objspace_t *objspace)
1119{
1120 size_t count = 0;
1121 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1122 count += size_pools[i].allocatable_pages;
1123 }
1124 return count;
1125}
1126
1127static inline size_t
1128heap_allocatable_slots(rb_objspace_t *objspace)
1129{
1130 size_t count = 0;
1131 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1132 rb_size_pool_t *size_pool = &size_pools[i];
1133 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1134 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1135 }
1136 return count;
1137}
1138
1139static inline size_t
1140total_allocated_pages(rb_objspace_t *objspace)
1141{
1142 size_t count = 0;
1143 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1144 rb_size_pool_t *size_pool = &size_pools[i];
1145 count += size_pool->total_allocated_pages;
1146 }
1147 return count;
1148}
1149
1150static inline size_t
1151total_freed_pages(rb_objspace_t *objspace)
1152{
1153 size_t count = 0;
1154 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1155 rb_size_pool_t *size_pool = &size_pools[i];
1156 count += size_pool->total_freed_pages;
1157 }
1158 return count;
1159}
1160
1161#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1162#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1163
1164#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1165#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1166#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1167#if GC_ENABLE_INCREMENTAL_MARK
1168#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1169#else
1170#define is_incremental_marking(objspace) FALSE
1171#endif
1172#if GC_ENABLE_INCREMENTAL_MARK
1173#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1174#else
1175#define will_be_incremental_marking(objspace) FALSE
1176#endif
1177#if GC_ENABLE_INCREMENTAL_MARK
1178#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1179#endif
1180#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1181
1182#if SIZEOF_LONG == SIZEOF_VOIDP
1183# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1184# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1185#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1186# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1187# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1188 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1189#else
1190# error not supported
1191#endif
1192
1193#define RANY(o) ((RVALUE*)(o))
1194
1195struct RZombie {
1196 struct RBasic basic;
1197 VALUE next;
1198 void (*dfree)(void *);
1199 void *data;
1200};
1201
1202#define RZOMBIE(o) ((struct RZombie *)(o))
1203
1204#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1205
1206#if RUBY_MARK_FREE_DEBUG
1207int ruby_gc_debug_indent = 0;
1208#endif
1210int ruby_disable_gc = 0;
1211int ruby_enable_autocompact = 0;
1212
1213void rb_iseq_mark(const rb_iseq_t *iseq);
1214void rb_iseq_update_references(rb_iseq_t *iseq);
1215void rb_iseq_free(const rb_iseq_t *iseq);
1216size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1217void rb_vm_update_references(void *ptr);
1218
1219void rb_gcdebug_print_obj_condition(VALUE obj);
1220
1221static VALUE define_final0(VALUE obj, VALUE block);
1222
1223NORETURN(static void *gc_vraise(void *ptr));
1224NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
1225NORETURN(static void negative_size_allocation_error(const char *));
1226
1227static void init_mark_stack(mark_stack_t *stack);
1228
1229static int ready_to_gc(rb_objspace_t *objspace);
1230
1231static int garbage_collect(rb_objspace_t *, unsigned int reason);
1232
1233static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1234static void gc_rest(rb_objspace_t *objspace);
1235
1236enum gc_enter_event {
1237 gc_enter_event_start,
1238 gc_enter_event_mark_continue,
1239 gc_enter_event_sweep_continue,
1240 gc_enter_event_rest,
1241 gc_enter_event_finalizer,
1242 gc_enter_event_rb_memerror,
1243};
1244
1245static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1246static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1247
1248static void gc_marks(rb_objspace_t *objspace, int full_mark);
1249static void gc_marks_start(rb_objspace_t *objspace, int full);
1250static void gc_marks_finish(rb_objspace_t *objspace);
1251static void gc_marks_rest(rb_objspace_t *objspace);
1252static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1253
1254static void gc_sweep(rb_objspace_t *objspace);
1255static void gc_sweep_start(rb_objspace_t *objspace);
1256#if USE_RVARGC
1257static void gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool);
1258#endif
1259static void gc_sweep_finish(rb_objspace_t *objspace);
1260static int gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1261static void gc_sweep_rest(rb_objspace_t *objspace);
1262static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1263
1264static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1265static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1266static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1267static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1268NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1269static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1270
1271static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1272static int gc_mark_stacked_objects_all(rb_objspace_t *);
1273static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1274
1275static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1276NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1277
1278static void push_mark_stack(mark_stack_t *, VALUE);
1279static int pop_mark_stack(mark_stack_t *, VALUE *);
1280static size_t mark_stack_size(mark_stack_t *stack);
1281static void shrink_stack_chunk_cache(mark_stack_t *stack);
1282
1283static size_t obj_memsize_of(VALUE obj, int use_all_types);
1284static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1285static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1286static int gc_verify_heap_pages(rb_objspace_t *objspace);
1287
1288static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1289static VALUE gc_disable_no_rest(rb_objspace_t *);
1290
1291static double getrusage_time(void);
1292static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1293static inline void gc_prof_timer_start(rb_objspace_t *);
1294static inline void gc_prof_timer_stop(rb_objspace_t *);
1295static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1296static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1297static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1298static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1299static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1300static inline void gc_prof_set_heap_info(rb_objspace_t *);
1301
1302#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1303 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1304 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1305 } \
1306} while (0)
1307
1308#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1309
1310#define gc_prof_record(objspace) (objspace)->profile.current_record
1311#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1312
1313#ifdef HAVE_VA_ARGS_MACRO
1314# define gc_report(level, objspace, ...) \
1315 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1316#else
1317# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1318#endif
1319PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1320static const char *obj_info(VALUE obj);
1321static const char *obj_type_name(VALUE obj);
1322
1323/*
1324 * 1 - TSC (H/W Time Stamp Counter)
1325 * 2 - getrusage
1326 */
1327#ifndef TICK_TYPE
1328#define TICK_TYPE 1
1329#endif
1330
1331#if USE_TICK_T
1332
1333#if TICK_TYPE == 1
1334/* the following code is only for internal tuning. */
1335
1336/* Source code to use RDTSC is quoted and modified from
1337 * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1338 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1339 */
1340
1341#if defined(__GNUC__) && defined(__i386__)
1342typedef unsigned long long tick_t;
1343#define PRItick "llu"
1344static inline tick_t
1345tick(void)
1346{
1347 unsigned long long int x;
1348 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1349 return x;
1350}
1351
1352#elif defined(__GNUC__) && defined(__x86_64__)
1353typedef unsigned long long tick_t;
1354#define PRItick "llu"
1355
1356static __inline__ tick_t
1357tick(void)
1358{
1359 unsigned long hi, lo;
1360 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1361 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1362}
1363
1364#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1365typedef unsigned long long tick_t;
1366#define PRItick "llu"
1367
1368static __inline__ tick_t
1369tick(void)
1370{
1371 unsigned long long val = __builtin_ppc_get_timebase();
1372 return val;
1373}
1374
1375/* Implementation for macOS PPC by @nobu
1376 * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1377 */
1378#elif defined(__POWERPC__) && defined(__APPLE__)
1379typedef unsigned long long tick_t;
1380#define PRItick "llu"
1381
1382static __inline__ tick_t
1383tick(void)
1384{
1385 unsigned long int upper, lower, tmp;
1386 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1387 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1388 do {
1389 mftbu(upper);
1390 mftb(lower);
1391 mftbu(tmp);
1392 } while (tmp != upper);
1393 return ((tick_t)upper << 32) | lower;
1394}
1395
1396#elif defined(__aarch64__) && defined(__GNUC__)
1397typedef unsigned long tick_t;
1398#define PRItick "lu"
1399
1400static __inline__ tick_t
1401tick(void)
1402{
1403 unsigned long val;
1404 __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1405 return val;
1406}
1407
1408
1409#elif defined(_WIN32) && defined(_MSC_VER)
1410#include <intrin.h>
1411typedef unsigned __int64 tick_t;
1412#define PRItick "llu"
1413
1414static inline tick_t
1415tick(void)
1416{
1417 return __rdtsc();
1418}
1419
1420#else /* use clock */
1421typedef clock_t tick_t;
1422#define PRItick "llu"
1423
1424static inline tick_t
1425tick(void)
1426{
1427 return clock();
1428}
1429#endif /* TSC */
1430
1431#elif TICK_TYPE == 2
1432typedef double tick_t;
1433#define PRItick "4.9f"
1434
1435static inline tick_t
1436tick(void)
1437{
1438 return getrusage_time();
1439}
1440#else /* TICK_TYPE */
1441#error "choose tick type"
1442#endif /* TICK_TYPE */
1443
1444#define MEASURE_LINE(expr) do { \
1445 volatile tick_t start_time = tick(); \
1446 volatile tick_t end_time; \
1447 expr; \
1448 end_time = tick(); \
1449 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1450} while (0)
1451
1452#else /* USE_TICK_T */
1453#define MEASURE_LINE(expr) expr
1454#endif /* USE_TICK_T */
1455
1456static inline void *
1457asan_unpoison_object_temporary(VALUE obj)
1458{
1459 void *ptr = asan_poisoned_object_p(obj);
1460 asan_unpoison_object(obj, false);
1461 return ptr;
1462}
1463
1464static inline void *
1465asan_poison_object_restore(VALUE obj, void *ptr)
1466{
1467 if (ptr) {
1468 asan_poison_object(obj);
1469 }
1470 return NULL;
1471}
1472
1473#define asan_unpoisoning_object(obj) \
1474 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1475 *unpoisoning = &poisoned; /* flag to loop just once */ \
1476 unpoisoning; \
1477 unpoisoning = asan_poison_object_restore(obj, poisoned))
1478
1479#define FL_CHECK2(name, x, pred) \
1480 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1481 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1482#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1483#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1484#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1485
1486#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1487#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1488#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1489
1490#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1491#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1492#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1493
1494#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1495#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1496#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1497
1498#define RVALUE_OLD_AGE 3
1499#define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1500
1501static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1502static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1503static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1504static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1505static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1506
1507static inline int
1508RVALUE_FLAGS_AGE(VALUE flags)
1509{
1510 return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1511}
1512
1513static int
1514check_rvalue_consistency_force(const VALUE obj, int terminate)
1515{
1516 int err = 0;
1517 rb_objspace_t *objspace = &rb_objspace;
1518
1519 RB_VM_LOCK_ENTER_NO_BARRIER();
1520 {
1521 if (SPECIAL_CONST_P(obj)) {
1522 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1523 err++;
1524 }
1525 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1526 /* check if it is in tomb_pages */
1527 struct heap_page *page = NULL;
1528 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1529 rb_size_pool_t *size_pool = &size_pools[i];
1530 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1531 if (page->start <= (uintptr_t)obj &&
1532 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1533 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1534 (void *)obj, (void *)page);
1535 err++;
1536 goto skip;
1537 }
1538 }
1539 }
1540 bp();
1541 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1542 err++;
1543 skip:
1544 ;
1545 }
1546 else {
1547 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1548 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1549 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1550 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1551 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1552
1553 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1554 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1555 err++;
1556 }
1557 if (BUILTIN_TYPE(obj) == T_NONE) {
1558 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1559 err++;
1560 }
1561 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1562 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1563 err++;
1564 }
1565
1566 obj_memsize_of((VALUE)obj, FALSE);
1567
1568 /* check generation
1569 *
1570 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1571 */
1572 if (age > 0 && wb_unprotected_bit) {
1573 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1574 err++;
1575 }
1576
1577 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1578 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1579 err++;
1580 }
1581
1582 if (!is_full_marking(objspace)) {
1583 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1584 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1585 obj_info(obj), age);
1586 err++;
1587 }
1588 if (remembered_bit && age != RVALUE_OLD_AGE) {
1589 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1590 obj_info(obj), age);
1591 err++;
1592 }
1593 }
1594
1595 /*
1596 * check coloring
1597 *
1598 * marking:false marking:true
1599 * marked:false white *invalid*
1600 * marked:true black grey
1601 */
1602 if (is_incremental_marking(objspace) && marking_bit) {
1603 if (!is_marking(objspace) && !mark_bit) {
1604 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1605 err++;
1606 }
1607 }
1608 }
1609 }
1610 RB_VM_LOCK_LEAVE_NO_BARRIER();
1611
1612 if (err > 0 && terminate) {
1613 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1614 }
1615 return err;
1616}
1617
1618#if RGENGC_CHECK_MODE == 0
1619static inline VALUE
1620check_rvalue_consistency(const VALUE obj)
1621{
1622 return obj;
1623}
1624#else
1625static VALUE
1626check_rvalue_consistency(const VALUE obj)
1627{
1628 check_rvalue_consistency_force(obj, TRUE);
1629 return obj;
1630}
1631#endif
1632
1633static inline int
1634gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1635{
1636 if (RB_SPECIAL_CONST_P(obj)) {
1637 return FALSE;
1638 }
1639 else {
1640 void *poisoned = asan_unpoison_object_temporary(obj);
1641
1642 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1643 /* Re-poison slot if it's not the one we want */
1644 if (poisoned) {
1645 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1646 asan_poison_object(obj);
1647 }
1648 return ret;
1649 }
1650}
1651
1652static inline int
1653RVALUE_MARKED(VALUE obj)
1654{
1655 check_rvalue_consistency(obj);
1656 return RVALUE_MARK_BITMAP(obj) != 0;
1657}
1658
1659static inline int
1660RVALUE_PINNED(VALUE obj)
1661{
1662 check_rvalue_consistency(obj);
1663 return RVALUE_PIN_BITMAP(obj) != 0;
1664}
1665
1666static inline int
1667RVALUE_WB_UNPROTECTED(VALUE obj)
1668{
1669 check_rvalue_consistency(obj);
1670 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1671}
1672
1673static inline int
1674RVALUE_MARKING(VALUE obj)
1675{
1676 check_rvalue_consistency(obj);
1677 return RVALUE_MARKING_BITMAP(obj) != 0;
1678}
1679
1680static inline int
1681RVALUE_REMEMBERED(VALUE obj)
1682{
1683 check_rvalue_consistency(obj);
1684 return RVALUE_MARKING_BITMAP(obj) != 0;
1685}
1686
1687static inline int
1688RVALUE_UNCOLLECTIBLE(VALUE obj)
1689{
1690 check_rvalue_consistency(obj);
1691 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1692}
1693
1694static inline int
1695RVALUE_OLD_P_RAW(VALUE obj)
1696{
1697 const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1698 return (RBASIC(obj)->flags & promoted) == promoted;
1699}
1700
1701static inline int
1702RVALUE_OLD_P(VALUE obj)
1703{
1704 check_rvalue_consistency(obj);
1705 return RVALUE_OLD_P_RAW(obj);
1706}
1707
1708#if RGENGC_CHECK_MODE || GC_DEBUG
1709static inline int
1710RVALUE_AGE(VALUE obj)
1711{
1712 check_rvalue_consistency(obj);
1713 return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1714}
1715#endif
1716
1717static inline void
1718RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1719{
1720 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1721 objspace->rgengc.old_objects++;
1722 rb_transient_heap_promote(obj);
1723
1724#if RGENGC_PROFILE >= 2
1725 objspace->profile.total_promoted_count++;
1726 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1727#endif
1728}
1729
1730static inline void
1731RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1732{
1733 RB_DEBUG_COUNTER_INC(obj_promote);
1734 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1735}
1736
1737static inline VALUE
1738RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1739{
1740 flags &= ~(FL_PROMOTED0 | FL_PROMOTED1);
1741 flags |= (age << RVALUE_AGE_SHIFT);
1742 return flags;
1743}
1744
1745/* set age to age+1 */
1746static inline void
1747RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1748{
1749 VALUE flags = RBASIC(obj)->flags;
1750 int age = RVALUE_FLAGS_AGE(flags);
1751
1752 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1753 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1754 }
1755
1756 age++;
1757 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1758
1759 if (age == RVALUE_OLD_AGE) {
1760 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1761 }
1762 check_rvalue_consistency(obj);
1763}
1764
1765/* set age to RVALUE_OLD_AGE */
1766static inline void
1767RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1768{
1769 check_rvalue_consistency(obj);
1770 GC_ASSERT(!RVALUE_OLD_P(obj));
1771
1772 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1773 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1774
1775 check_rvalue_consistency(obj);
1776}
1777
1778/* set age to RVALUE_OLD_AGE - 1 */
1779static inline void
1780RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1781{
1782 check_rvalue_consistency(obj);
1783 GC_ASSERT(!RVALUE_OLD_P(obj));
1784
1785 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1786
1787 check_rvalue_consistency(obj);
1788}
1789
1790static inline void
1791RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1792{
1793 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1794 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1795}
1796
1797static inline void
1798RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1799{
1800 check_rvalue_consistency(obj);
1801 GC_ASSERT(RVALUE_OLD_P(obj));
1802
1803 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1804 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1805 }
1806
1807 RVALUE_DEMOTE_RAW(objspace, obj);
1808
1809 if (RVALUE_MARKED(obj)) {
1810 objspace->rgengc.old_objects--;
1811 }
1812
1813 check_rvalue_consistency(obj);
1814}
1815
1816static inline void
1817RVALUE_AGE_RESET_RAW(VALUE obj)
1818{
1819 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1820}
1821
1822static inline void
1823RVALUE_AGE_RESET(VALUE obj)
1824{
1825 check_rvalue_consistency(obj);
1826 GC_ASSERT(!RVALUE_OLD_P(obj));
1827
1828 RVALUE_AGE_RESET_RAW(obj);
1829 check_rvalue_consistency(obj);
1830}
1831
1832static inline int
1833RVALUE_BLACK_P(VALUE obj)
1834{
1835 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1836}
1837
1838#if 0
1839static inline int
1840RVALUE_GREY_P(VALUE obj)
1841{
1842 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1843}
1844#endif
1845
1846static inline int
1847RVALUE_WHITE_P(VALUE obj)
1848{
1849 return RVALUE_MARKED(obj) == FALSE;
1850}
1851
1852/*
1853 --------------------------- ObjectSpace -----------------------------
1854*/
1855
1856static inline void *
1857calloc1(size_t n)
1858{
1859 return calloc(1, n);
1860}
1861
1863rb_objspace_alloc(void)
1864{
1865 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1866 objspace->flags.measure_gc = 1;
1867 malloc_limit = gc_params.malloc_limit_min;
1868
1869 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1870 rb_size_pool_t *size_pool = &size_pools[i];
1871
1872 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1873
1874 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1875 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1876 }
1877
1878 dont_gc_on();
1879
1880 return objspace;
1881}
1882
1883static void free_stack_chunks(mark_stack_t *);
1884static void mark_stack_free_cache(mark_stack_t *);
1885static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1886
1887void
1888rb_objspace_free(rb_objspace_t *objspace)
1889{
1890 if (is_lazy_sweeping(objspace))
1891 rb_bug("lazy sweeping underway when freeing object space");
1892
1893 if (objspace->profile.records) {
1894 free(objspace->profile.records);
1895 objspace->profile.records = 0;
1896 }
1897
1898 if (global_list) {
1899 struct gc_list *list, *next;
1900 for (list = global_list; list; list = next) {
1901 next = list->next;
1902 xfree(list);
1903 }
1904 }
1905 if (heap_pages_sorted) {
1906 size_t i;
1907 for (i = 0; i < heap_allocated_pages; ++i) {
1908 heap_page_free(objspace, heap_pages_sorted[i]);
1909 }
1910 free(heap_pages_sorted);
1911 heap_allocated_pages = 0;
1912 heap_pages_sorted_length = 0;
1913 heap_pages_lomem = 0;
1914 heap_pages_himem = 0;
1915
1916 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1917 rb_size_pool_t *size_pool = &size_pools[i];
1918 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1919 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1920 }
1921 }
1922 st_free_table(objspace->id_to_obj_tbl);
1923 st_free_table(objspace->obj_to_id_tbl);
1924
1925 free_stack_chunks(&objspace->mark_stack);
1926 mark_stack_free_cache(&objspace->mark_stack);
1927
1928 free(objspace);
1929}
1930
1931static void
1932heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1933{
1934 struct heap_page **sorted;
1935 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1936
1937 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1938 next_length, size);
1939
1940 if (heap_pages_sorted_length > 0) {
1941 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1942 if (sorted) heap_pages_sorted = sorted;
1943 }
1944 else {
1945 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1946 }
1947
1948 if (sorted == 0) {
1949 rb_memerror();
1950 }
1951
1952 heap_pages_sorted_length = next_length;
1953}
1954
1955static void
1956heap_pages_expand_sorted(rb_objspace_t *objspace)
1957{
1958 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1959 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1960 * however, if there are pages which do not have empty slots, then try to create new pages
1961 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1962 */
1963 size_t next_length = heap_allocatable_pages(objspace);
1964 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1965 rb_size_pool_t *size_pool = &size_pools[i];
1966 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1967 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1968 }
1969
1970 if (next_length > heap_pages_sorted_length) {
1971 heap_pages_expand_sorted_to(objspace, next_length);
1972 }
1973
1974 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1975 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1976}
1977
1978static void
1979size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
1980{
1981 size_pool->allocatable_pages = s;
1982 heap_pages_expand_sorted(objspace);
1983}
1984
1985static inline void
1986heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1987{
1988 ASSERT_vm_locking();
1989
1990 RVALUE *p = (RVALUE *)obj;
1991
1992 asan_unpoison_object(obj, false);
1993
1994 asan_unlock_freelist(page);
1995
1996 p->as.free.flags = 0;
1997 p->as.free.next = page->freelist;
1998 page->freelist = p;
1999 asan_lock_freelist(page);
2000
2001 if (RGENGC_CHECK_MODE &&
2002 /* obj should belong to page */
2003 !(page->start <= (uintptr_t)obj &&
2004 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2005 obj % BASE_SLOT_SIZE == 0)) {
2006 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
2007 }
2008
2009 asan_poison_object(obj);
2010 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
2011}
2012
2013static inline void
2014heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
2015{
2016 asan_unlock_freelist(page);
2017 GC_ASSERT(page->free_slots != 0);
2018 GC_ASSERT(page->freelist != NULL);
2019
2020 page->free_next = heap->free_pages;
2021 heap->free_pages = page;
2022
2023 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
2024
2025 asan_lock_freelist(page);
2026}
2027
2028#if GC_ENABLE_INCREMENTAL_MARK
2029static inline void
2030heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2031{
2032 asan_unlock_freelist(page);
2033 GC_ASSERT(page->free_slots != 0);
2034 GC_ASSERT(page->freelist != NULL);
2035
2036 page->free_next = heap->pooled_pages;
2037 heap->pooled_pages = page;
2038 objspace->rincgc.pooled_slots += page->free_slots;
2039
2040 asan_lock_freelist(page);
2041}
2042#endif
2043
2044static void
2045heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2046{
2047 ccan_list_del(&page->page_node);
2048 heap->total_pages--;
2049 heap->total_slots -= page->total_slots;
2050}
2051
2052static void rb_aligned_free(void *ptr, size_t size);
2053
2054static void
2055heap_page_body_free(struct heap_page_body *page_body)
2056{
2057 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2058
2059 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2060#ifdef HAVE_MMAP
2061 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2062 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2063 rb_bug("heap_page_body_free: munmap failed");
2064 }
2065#endif
2066 }
2067 else {
2068 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2069 }
2070}
2071
2072static void
2073heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
2074{
2075 heap_allocated_pages--;
2076 page->size_pool->total_freed_pages++;
2077 heap_page_body_free(GET_PAGE_BODY(page->start));
2078 free(page);
2079}
2080
2081static void
2082heap_pages_free_unused_pages(rb_objspace_t *objspace)
2083{
2084 size_t i, j;
2085
2086 bool has_pages_in_tomb_heap = FALSE;
2087 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2088 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2089 has_pages_in_tomb_heap = TRUE;
2090 break;
2091 }
2092 }
2093
2094 if (has_pages_in_tomb_heap) {
2095 for (i = j = 1; j < heap_allocated_pages; i++) {
2096 struct heap_page *page = heap_pages_sorted[i];
2097
2098 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2099 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2100 heap_page_free(objspace, page);
2101 }
2102 else {
2103 if (i != j) {
2104 heap_pages_sorted[j] = page;
2105 }
2106 j++;
2107 }
2108 }
2109
2110 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2111 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2112 GC_ASSERT(himem <= heap_pages_himem);
2113 heap_pages_himem = himem;
2114
2115 GC_ASSERT(j == heap_allocated_pages);
2116 }
2117}
2118
2119static struct heap_page_body *
2120heap_page_body_allocate(void)
2121{
2122 struct heap_page_body *page_body;
2123
2124 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2125#ifdef HAVE_MMAP
2126 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2127
2128 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2129 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2130 if (ptr == MAP_FAILED) {
2131 return NULL;
2132 }
2133
2134 char *aligned = ptr + HEAP_PAGE_ALIGN;
2135 aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2136 GC_ASSERT(aligned > ptr);
2137 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2138
2139 size_t start_out_of_range_size = aligned - ptr;
2140 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2141 if (start_out_of_range_size > 0) {
2142 if (munmap(ptr, start_out_of_range_size)) {
2143 rb_bug("heap_page_body_allocate: munmap failed for start");
2144 }
2145 }
2146
2147 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2148 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2149 if (end_out_of_range_size > 0) {
2150 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2151 rb_bug("heap_page_body_allocate: munmap failed for end");
2152 }
2153 }
2154
2155 page_body = (struct heap_page_body *)aligned;
2156#endif
2157 }
2158 else {
2159 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2160 }
2161
2162 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2163
2164 return page_body;
2165}
2166
2167static struct heap_page *
2168heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2169{
2170 uintptr_t start, end, p;
2171 struct heap_page *page;
2172 uintptr_t hi, lo, mid;
2173 size_t stride = size_pool->slot_size;
2174 unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
2175
2176 /* assign heap_page body (contains heap_page_header and RVALUEs) */
2177 struct heap_page_body *page_body = heap_page_body_allocate();
2178 if (page_body == 0) {
2179 rb_memerror();
2180 }
2181
2182 /* assign heap_page entry */
2183 page = calloc1(sizeof(struct heap_page));
2184 if (page == 0) {
2185 heap_page_body_free(page_body);
2186 rb_memerror();
2187 }
2188
2189 /* adjust obj_limit (object number available in this page) */
2190 start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
2191
2192 if (start % BASE_SLOT_SIZE != 0) {
2193 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2194 start = start + delta;
2195 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2196
2197 /* Find a num in page that is evenly divisible by `stride`.
2198 * This is to ensure that objects are aligned with bit planes.
2199 * In other words, ensure there are an even number of objects
2200 * per bit plane. */
2201 if (NUM_IN_PAGE(start) == 1) {
2202 start += stride - BASE_SLOT_SIZE;
2203 }
2204
2205 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2206
2207 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2208 }
2209 end = start + (limit * (int)stride);
2210
2211 /* setup heap_pages_sorted */
2212 lo = 0;
2213 hi = (uintptr_t)heap_allocated_pages;
2214 while (lo < hi) {
2215 struct heap_page *mid_page;
2216
2217 mid = (lo + hi) / 2;
2218 mid_page = heap_pages_sorted[mid];
2219 if ((uintptr_t)mid_page->start < start) {
2220 lo = mid + 1;
2221 }
2222 else if ((uintptr_t)mid_page->start > start) {
2223 hi = mid;
2224 }
2225 else {
2226 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2227 }
2228 }
2229
2230 if (hi < (uintptr_t)heap_allocated_pages) {
2231 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
2232 }
2233
2234 heap_pages_sorted[hi] = page;
2235
2236 heap_allocated_pages++;
2237
2238 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2239 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2240 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2241
2242 size_pool->total_allocated_pages++;
2243
2244 if (heap_allocated_pages > heap_pages_sorted_length) {
2245 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
2246 heap_allocated_pages, heap_pages_sorted_length);
2247 }
2248
2249 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2250 if (heap_pages_himem < end) heap_pages_himem = end;
2251
2252 page->start = start;
2253 page->total_slots = limit;
2254 page->slot_size = size_pool->slot_size;
2255 page->size_pool = size_pool;
2256 page_body->header.page = page;
2257
2258 for (p = start; p != end; p += stride) {
2259 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
2260 heap_page_add_freeobj(objspace, page, (VALUE)p);
2261 }
2262 page->free_slots = limit;
2263
2264 asan_lock_freelist(page);
2265 return page;
2266}
2267
2268static struct heap_page *
2269heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2270{
2271 struct heap_page *page = 0, *next;
2272
2273 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2274 asan_unlock_freelist(page);
2275 if (page->freelist != NULL) {
2276 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2277 asan_lock_freelist(page);
2278 return page;
2279 }
2280 }
2281
2282 return NULL;
2283}
2284
2285static struct heap_page *
2286heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2287{
2288 struct heap_page *page;
2289 const char *method = "recycle";
2290
2291 size_pool->allocatable_pages--;
2292
2293 page = heap_page_resurrect(objspace, size_pool);
2294
2295 if (page == NULL) {
2296 page = heap_page_allocate(objspace, size_pool);
2297 method = "allocate";
2298 }
2299 if (0) fprintf(stderr, "heap_page_create: %s - %p, "
2300 "heap_allocated_pages: %"PRIdSIZE", "
2301 "heap_allocated_pages: %"PRIdSIZE", "
2302 "tomb->total_pages: %"PRIdSIZE"\n",
2303 method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2304 return page;
2305}
2306
2307static void
2308heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
2309{
2310 /* Adding to eden heap during incremental sweeping is forbidden */
2311 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2312 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2313 ccan_list_add_tail(&heap->pages, &page->page_node);
2314 heap->total_pages++;
2315 heap->total_slots += page->total_slots;
2316}
2317
2318static void
2319heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2320{
2321 struct heap_page *page = heap_page_create(objspace, size_pool);
2322 heap_add_page(objspace, size_pool, heap, page);
2323 heap_add_freepage(heap, page);
2324}
2325
2326static void
2327heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
2328{
2329 size_t i;
2330
2331 size_pool_allocatable_pages_set(objspace, size_pool, add);
2332
2333 for (i = 0; i < add; i++) {
2334 heap_assign_page(objspace, size_pool, heap);
2335 }
2336
2337 GC_ASSERT(size_pool->allocatable_pages == 0);
2338}
2339
2340static size_t
2341heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t free_slots, size_t total_slots, size_t used)
2342{
2343 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2344 size_t next_used;
2345
2346 if (goal_ratio == 0.0) {
2347 next_used = (size_t)(used * gc_params.growth_factor);
2348 }
2349 else if (total_slots == 0) {
2350 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2351 next_used = (gc_params.heap_init_slots * multiple) / HEAP_PAGE_OBJ_LIMIT;
2352 }
2353 else {
2354 /* Find `f' where free_slots = f * total_slots * goal_ratio
2355 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2356 */
2357 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2358
2359 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2360 if (f < 1.0) f = 1.1;
2361
2362 next_used = (size_t)(f * used);
2363
2364 if (0) {
2365 fprintf(stderr,
2366 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
2367 " G(%1.2f), f(%1.2f),"
2368 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
2369 free_slots, total_slots, free_slots/(double)total_slots,
2370 goal_ratio, f, used, next_used);
2371 }
2372 }
2373
2374 if (gc_params.growth_max_slots > 0) {
2375 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2376 if (next_used > max_used) next_used = max_used;
2377 }
2378
2379 size_t extend_page_count = next_used - used;
2380 /* Extend by at least 1 page. */
2381 if (extend_page_count == 0) extend_page_count = 1;
2382
2383 return extend_page_count;
2384}
2385
2386static int
2387heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2388{
2389 if (size_pool->allocatable_pages > 0) {
2390 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2391 "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2392 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2393
2394 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2395 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2396
2397 heap_assign_page(objspace, size_pool, heap);
2398 return TRUE;
2399 }
2400 return FALSE;
2401}
2402
2403static void
2404gc_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2405{
2406 /* Continue marking if in incremental marking. */
2407 if (heap->free_pages == NULL && is_incremental_marking(objspace)) {
2408 gc_marks_continue(objspace, size_pool, heap);
2409 }
2410
2411 /* Continue sweeping if in lazy sweeping or the previous incremental
2412 * marking finished and did not yield a free page. */
2413 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2414 gc_sweep_continue(objspace, size_pool, heap);
2415 }
2416}
2417
2418static void
2419heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2420{
2421 GC_ASSERT(heap->free_pages == NULL);
2422
2423 /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2424 gc_continue(objspace, size_pool, heap);
2425
2426 /* If we still don't have a free page and not allowed to create a new page,
2427 * we should start a new GC cycle. */
2428 if (heap->free_pages == NULL &&
2429 (will_be_incremental_marking(objspace) ||
2430 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2431 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2432 rb_memerror();
2433 }
2434 else {
2435 /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2436 gc_continue(objspace, size_pool, heap);
2437
2438 /* If we're not incremental marking (e.g. a minor GC) or finished
2439 * sweeping and still don't have a free page, then
2440 * gc_sweep_finish_size_pool should allow us to create a new page. */
2441 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2442 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2443 rb_bug("cannot create a new page after GC");
2444 }
2445 else { // Major GC is required, which will allow us to create new page
2446 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2447 rb_memerror();
2448 }
2449 else {
2450 /* Do steps of incremental marking or lazy sweeping. */
2451 gc_continue(objspace, size_pool, heap);
2452
2453 if (heap->free_pages == NULL &&
2454 !heap_increment(objspace, size_pool, heap)) {
2455 rb_bug("cannot create a new page after major GC");
2456 }
2457 }
2458 }
2459 }
2460 }
2461 }
2462
2463 GC_ASSERT(heap->free_pages != NULL);
2464}
2465
2466void
2467rb_objspace_set_event_hook(const rb_event_flag_t event)
2468{
2469 rb_objspace_t *objspace = &rb_objspace;
2470 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2471 objspace->flags.has_hook = (objspace->hook_events != 0);
2472}
2473
2474static void
2475gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2476{
2477 if (UNLIKELY(!ec->cfp)) return;
2478 const VALUE *pc = ec->cfp->pc;
2479 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2480 /* increment PC because source line is calculated with PC-1 */
2481 ec->cfp->pc++;
2482 }
2483 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2484 ec->cfp->pc = pc;
2485}
2486
2487#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2488#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2489
2490#define gc_event_hook_prep(objspace, event, data, prep) do { \
2491 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2492 prep; \
2493 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2494 } \
2495} while (0)
2496
2497#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2498
2499static inline VALUE
2500newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2501{
2502#if !__has_feature(memory_sanitizer)
2503 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2504 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2505#endif
2506 RVALUE *p = RANY(obj);
2507 p->as.basic.flags = flags;
2508 *((VALUE *)&p->as.basic.klass) = klass;
2509
2510#if RACTOR_CHECK_MODE
2511 rb_ractor_setup_belonging(obj);
2512#endif
2513
2514#if RGENGC_CHECK_MODE
2515 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2516
2517 RB_VM_LOCK_ENTER_NO_BARRIER();
2518 {
2519 check_rvalue_consistency(obj);
2520
2521 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2522 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2523 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2524 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2525
2526 if (flags & FL_PROMOTED1) {
2527 if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2528 }
2529 else {
2530 if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2531 }
2532 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2533 }
2534 RB_VM_LOCK_LEAVE_NO_BARRIER();
2535#endif
2536
2537 if (UNLIKELY(wb_protected == FALSE)) {
2538 ASSERT_vm_locking();
2539 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2540 }
2541
2542 // TODO: make it atomic, or ractor local
2543 objspace->total_allocated_objects++;
2544
2545#if RGENGC_PROFILE
2546 if (wb_protected) {
2547 objspace->profile.total_generated_normal_object_count++;
2548#if RGENGC_PROFILE >= 2
2549 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2550#endif
2551 }
2552 else {
2553 objspace->profile.total_generated_shady_object_count++;
2554#if RGENGC_PROFILE >= 2
2555 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2556#endif
2557 }
2558#endif
2559
2560#if GC_DEBUG
2561 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2562 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2563#endif
2564
2565 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2566
2567#if RGENGC_OLD_NEWOBJ_CHECK > 0
2568 {
2569 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2570
2571 if (!is_incremental_marking(objspace) &&
2572 flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2573 ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2574 if (--newobj_cnt == 0) {
2575 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2576
2577 gc_mark_set(objspace, obj);
2578 RVALUE_AGE_SET_OLD(objspace, obj);
2579
2580 rb_gc_writebarrier_remember(obj);
2581 }
2582 }
2583 }
2584#endif
2585 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2586 return obj;
2587}
2588
2589size_t
2590rb_gc_obj_slot_size(VALUE obj)
2591{
2592 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2593}
2594
2595static inline size_t
2596size_pool_slot_size(unsigned char pool_id)
2597{
2598 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2599
2600 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2601
2602#if RGENGC_CHECK_MODE
2603 rb_objspace_t *objspace = &rb_objspace;
2604 GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size);
2605#endif
2606
2607 slot_size -= RVALUE_OVERHEAD;
2608
2609 return slot_size;
2610}
2611
2612size_t
2613rb_size_pool_slot_size(unsigned char pool_id)
2614{
2615 return size_pool_slot_size(pool_id);
2616}
2617
2618bool
2619rb_gc_size_allocatable_p(size_t size)
2620{
2621 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2622}
2623
2624static inline VALUE
2625ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2626 size_t size_pool_idx)
2627{
2628 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2629 RVALUE *p = size_pool_cache->freelist;
2630
2631#if GC_ENABLE_INCREMENTAL_MARK
2632 if (is_incremental_marking(objspace)) {
2633 // Not allowed to allocate without running an incremental marking step
2634 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2635 return Qfalse;
2636 }
2637
2638 if (p) {
2639 cache->incremental_mark_step_allocated_slots++;
2640 }
2641 }
2642#endif
2643
2644 if (p) {
2645 VALUE obj = (VALUE)p;
2646 MAYBE_UNUSED(const size_t) stride = size_pool_slot_size(size_pool_idx);
2647 size_pool_cache->freelist = p->as.free.next;
2648#if USE_RVARGC
2649 asan_unpoison_memory_region(p, stride, true);
2650#else
2651 asan_unpoison_object(obj, true);
2652#endif
2653#if RGENGC_CHECK_MODE
2654 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2655 // zero clear
2656 MEMZERO((char *)obj, char, stride);
2657#endif
2658 return obj;
2659 }
2660 else {
2661 return Qfalse;
2662 }
2663}
2664
2665static struct heap_page *
2666heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2667{
2668 ASSERT_vm_locking();
2669
2670 struct heap_page *page;
2671
2672 if (heap->free_pages == NULL) {
2673 heap_prepare(objspace, size_pool, heap);
2674 }
2675
2676 page = heap->free_pages;
2677 heap->free_pages = page->free_next;
2678
2679 GC_ASSERT(page->free_slots != 0);
2680 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page, (void *)page->freelist, page->free_slots);
2681
2682 asan_unlock_freelist(page);
2683
2684 return page;
2685}
2686
2687static inline void
2688ractor_cache_set_page(rb_ractor_newobj_cache_t *cache, size_t size_pool_idx,
2689 struct heap_page *page)
2690{
2691 gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
2692
2693 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2694
2695 GC_ASSERT(size_pool_cache->freelist == NULL);
2696 GC_ASSERT(page->free_slots != 0);
2697 GC_ASSERT(page->freelist != NULL);
2698
2699 size_pool_cache->using_page = page;
2700 size_pool_cache->freelist = page->freelist;
2701 page->free_slots = 0;
2702 page->freelist = NULL;
2703
2704 asan_unpoison_object((VALUE)size_pool_cache->freelist, false);
2705 GC_ASSERT(RB_TYPE_P((VALUE)size_pool_cache->freelist, T_NONE));
2706 asan_poison_object((VALUE)size_pool_cache->freelist);
2707}
2708
2709static inline VALUE
2710newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2711{
2712 RVALUE *p = (RVALUE *)obj;
2713 p->as.values.v1 = v1;
2714 p->as.values.v2 = v2;
2715 p->as.values.v3 = v3;
2716 return obj;
2717}
2718
2719static inline size_t
2720size_pool_idx_for_size(size_t size)
2721{
2722#if USE_RVARGC
2723 size += RVALUE_OVERHEAD;
2724
2725 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2726
2727 /* size_pool_idx is ceil(log2(slot_count)) */
2728 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2729
2730 if (size_pool_idx >= SIZE_POOL_COUNT) {
2731 rb_bug("size_pool_idx_for_size: allocation size too large");
2732 }
2733
2734#if RGENGC_CHECK_MODE
2735 rb_objspace_t *objspace = &rb_objspace;
2736 GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
2737 if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
2738#endif
2739
2740 return size_pool_idx;
2741#else
2742 GC_ASSERT(size <= sizeof(RVALUE));
2743 return 0;
2744#endif
2745}
2746
2747static VALUE
2748newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, bool vm_locked)
2749{
2750 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
2751 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2752 rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;
2753
2754 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2755
2756 if (UNLIKELY(obj == Qfalse)) {
2757 unsigned int lev;
2758 bool unlock_vm = false;
2759
2760 if (!vm_locked) {
2761 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2762 vm_locked = true;
2763 unlock_vm = true;
2764 }
2765
2766 {
2767 ASSERT_vm_locking();
2768
2769#if GC_ENABLE_INCREMENTAL_MARK
2770 if (is_incremental_marking(objspace)) {
2771 gc_marks_continue(objspace, size_pool, heap);
2772 cache->incremental_mark_step_allocated_slots = 0;
2773
2774 // Retry allocation after resetting incremental_mark_step_allocated_slots
2775 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2776 }
2777#endif
2778
2779 if (obj == Qfalse) {
2780 // Get next free page (possibly running GC)
2781 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2782 ractor_cache_set_page(cache, size_pool_idx, page);
2783
2784 // Retry allocation after moving to new page
2785 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2786
2787 GC_ASSERT(obj != Qfalse);
2788 }
2789 }
2790
2791 if (unlock_vm) {
2792 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2793 }
2794 }
2795
2796 return obj;
2797}
2798
2799static void
2800newobj_zero_slot(VALUE obj)
2801{
2802 memset((char *)obj + sizeof(struct RBasic), 0, rb_gc_obj_slot_size(obj) - sizeof(struct RBasic));
2803}
2804
2805ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
2806
2807static inline VALUE
2808newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
2809{
2810 VALUE obj;
2811 unsigned int lev;
2812
2813 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2814 {
2815 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2816 if (during_gc) {
2817 dont_gc_on();
2818 during_gc = 0;
2819 rb_bug("object allocation during garbage collection phase");
2820 }
2821
2822 if (ruby_gc_stressful) {
2823 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2824 rb_memerror();
2825 }
2826 }
2827 }
2828
2829 obj = newobj_alloc(objspace, cr, size_pool_idx, true);
2830#if SHAPE_IN_BASIC_FLAGS
2831 flags |= (VALUE)(size_pool_idx) << SHAPE_FLAG_SHIFT;
2832#endif
2833 newobj_init(klass, flags, wb_protected, objspace, obj);
2834
2835 gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_zero_slot(obj));
2836 }
2837 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2838
2839 return obj;
2840}
2841
2842NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2843 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2844NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2845 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2846
2847static VALUE
2848newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2849{
2850 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2851}
2852
2853static VALUE
2854newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2855{
2856 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2857}
2858
2859static inline VALUE
2860newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
2861{
2862 VALUE obj;
2863 rb_objspace_t *objspace = &rb_objspace;
2864
2865 RB_DEBUG_COUNTER_INC(obj_newobj);
2866 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2867
2868#if GC_DEBUG_STRESS_TO_CLASS
2869 if (UNLIKELY(stress_to_class)) {
2870 long i, cnt = RARRAY_LEN(stress_to_class);
2871 for (i = 0; i < cnt; ++i) {
2872 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2873 }
2874 }
2875#endif
2876
2877 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2878
2879 if (!UNLIKELY(during_gc ||
2880 ruby_gc_stressful ||
2881 gc_event_hook_available_p(objspace)) &&
2882 wb_protected) {
2883 obj = newobj_alloc(objspace, cr, size_pool_idx, false);
2884#if SHAPE_IN_BASIC_FLAGS
2885 flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2886#endif
2887 newobj_init(klass, flags, wb_protected, objspace, obj);
2888 }
2889 else {
2890 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2891
2892 obj = wb_protected ?
2893 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2894 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2895 }
2896
2897 return obj;
2898}
2899
2900static inline VALUE
2901newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2902{
2903 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2904 return newobj_fill(obj, v1, v2, v3);
2905}
2906
2907static inline VALUE
2908newobj_of_cr(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2909{
2910 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2911 return newobj_fill(obj, v1, v2, v3);
2912}
2913
2914VALUE
2915rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
2916{
2917 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2918 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2919}
2920
2921VALUE
2922rb_wb_protected_newobj_of(VALUE klass, VALUE flags, size_t size)
2923{
2924 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2925 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2926}
2927
2928VALUE
2929rb_ec_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
2930{
2931 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2932 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2933}
2934
2935/* for compatibility */
2936
2937VALUE
2938rb_newobj(void)
2939{
2940 return newobj_of(0, T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2941}
2942
2943static size_t
2944rb_obj_embedded_size(uint32_t numiv)
2945{
2946 return offsetof(struct RObject, as.ary) + (sizeof(VALUE) * numiv);
2947}
2948
2949static VALUE
2950rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
2951{
2952 GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
2953 GC_ASSERT(flags & ROBJECT_EMBED);
2954
2955 size_t size;
2956#if USE_RVARGC
2957 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2958
2959 size = rb_obj_embedded_size(index_tbl_num_entries);
2960 if (!rb_gc_size_allocatable_p(size)) {
2961 size = sizeof(struct RObject);
2962 }
2963#else
2964 size = sizeof(struct RObject);
2965#endif
2966
2967 VALUE obj = newobj_of(klass, flags, 0, 0, 0, wb_protected, size);
2968 RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT ||
2969 rb_shape_get_shape(obj)->type == SHAPE_INITIAL_CAPACITY);
2970
2971 // Set the shape to the specific T_OBJECT shape which is always
2972 // SIZE_POOL_COUNT away from the root shape.
2973 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2974
2975#if RUBY_DEBUG
2976 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
2977 VALUE *ptr = ROBJECT_IVPTR(obj);
2978 for (size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
2979 ptr[i] = Qundef;
2980 }
2981#endif
2982
2983 return obj;
2984}
2985
2986VALUE
2987rb_newobj_of(VALUE klass, VALUE flags)
2988{
2989 if ((flags & RUBY_T_MASK) == T_OBJECT) {
2990 return rb_class_instance_allocate_internal(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED, flags & FL_WB_PROTECTED);
2991 }
2992 else {
2993 return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED, RVALUE_SIZE);
2994 }
2995}
2996
2997#define UNEXPECTED_NODE(func) \
2998 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2999 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
3000
3001const char *
3002rb_imemo_name(enum imemo_type type)
3003{
3004 // put no default case to get a warning if an imemo type is missing
3005 switch (type) {
3006#define IMEMO_NAME(x) case imemo_##x: return #x;
3007 IMEMO_NAME(env);
3008 IMEMO_NAME(cref);
3009 IMEMO_NAME(svar);
3010 IMEMO_NAME(throw_data);
3011 IMEMO_NAME(ifunc);
3012 IMEMO_NAME(memo);
3013 IMEMO_NAME(ment);
3014 IMEMO_NAME(iseq);
3015 IMEMO_NAME(tmpbuf);
3016 IMEMO_NAME(ast);
3017 IMEMO_NAME(parser_strterm);
3018 IMEMO_NAME(callinfo);
3019 IMEMO_NAME(callcache);
3020 IMEMO_NAME(constcache);
3021#undef IMEMO_NAME
3022 }
3023 return "unknown";
3024}
3025
3026#undef rb_imemo_new
3027
3028VALUE
3029rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3030{
3031 size_t size = RVALUE_SIZE;
3032 VALUE flags = T_IMEMO | (type << FL_USHIFT);
3033 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
3034}
3035
3036static VALUE
3037rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3038{
3039 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
3040 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
3041 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
3042}
3043
3044static VALUE
3045rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
3046{
3047 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
3048}
3049
3051rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
3052{
3053 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
3054}
3055
3056static size_t
3057imemo_memsize(VALUE obj)
3058{
3059 size_t size = 0;
3060 switch (imemo_type(obj)) {
3061 case imemo_ment:
3062 size += sizeof(RANY(obj)->as.imemo.ment.def);
3063 break;
3064 case imemo_iseq:
3065 size += rb_iseq_memsize((rb_iseq_t *)obj);
3066 break;
3067 case imemo_env:
3068 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
3069 break;
3070 case imemo_tmpbuf:
3071 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
3072 break;
3073 case imemo_ast:
3074 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3075 break;
3076 case imemo_cref:
3077 case imemo_svar:
3078 case imemo_throw_data:
3079 case imemo_ifunc:
3080 case imemo_memo:
3081 case imemo_parser_strterm:
3082 break;
3083 default:
3084 /* unreachable */
3085 break;
3086 }
3087 return size;
3088}
3089
3090#if IMEMO_DEBUG
3091VALUE
3092rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
3093{
3094 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
3095 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
3096 return memo;
3097}
3098#endif
3099
3100MJIT_FUNC_EXPORTED VALUE
3101rb_class_allocate_instance(VALUE klass)
3102{
3103 return rb_class_instance_allocate_internal(klass, T_OBJECT | ROBJECT_EMBED, RGENGC_WB_PROTECTED_OBJECT);
3104}
3105
3106static inline void
3107rb_data_object_check(VALUE klass)
3108{
3109 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3110 rb_undef_alloc_func(klass);
3111 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3112 }
3113}
3114
3115VALUE
3116rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3117{
3119 if (klass) rb_data_object_check(klass);
3120 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE, sizeof(struct RTypedData));
3121}
3122
3123VALUE
3124rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3125{
3126 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3127 DATA_PTR(obj) = xcalloc(1, size);
3128 return obj;
3129}
3130
3131VALUE
3132rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
3133{
3134 RBIMPL_NONNULL_ARG(type);
3135 if (klass) rb_data_object_check(klass);
3136 return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED, sizeof(struct RTypedData));
3137}
3138
3139VALUE
3140rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
3141{
3142 VALUE obj = rb_data_typed_object_wrap(klass, 0, type);
3143 DATA_PTR(obj) = xcalloc(1, size);
3144 return obj;
3145}
3146
3147size_t
3148rb_objspace_data_type_memsize(VALUE obj)
3149{
3150 if (RTYPEDDATA_P(obj)) {
3151 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
3152 const void *ptr = RTYPEDDATA_DATA(obj);
3153 if (ptr && type->function.dsize) {
3154 return type->function.dsize(ptr);
3155 }
3156 }
3157 return 0;
3158}
3159
3160const char *
3161rb_objspace_data_type_name(VALUE obj)
3162{
3163 if (RTYPEDDATA_P(obj)) {
3164 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
3165 }
3166 else {
3167 return 0;
3168 }
3169}
3170
3171static int
3172ptr_in_page_body_p(const void *ptr, const void *memb)
3173{
3174 struct heap_page *page = *(struct heap_page **)memb;
3175 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3176
3177 if ((uintptr_t)ptr >= p_body) {
3178 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3179 }
3180 else {
3181 return -1;
3182 }
3183}
3184
3185PUREFUNC(static inline struct heap_page * heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
3186static inline struct heap_page *
3187heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
3188{
3189 struct heap_page **res;
3190
3191 if (ptr < (uintptr_t)heap_pages_lomem ||
3192 ptr > (uintptr_t)heap_pages_himem) {
3193 return NULL;
3194 }
3195
3196 res = bsearch((void *)ptr, heap_pages_sorted,
3197 (size_t)heap_allocated_pages, sizeof(struct heap_page *),
3198 ptr_in_page_body_p);
3199
3200 if (res) {
3201 return *res;
3202 }
3203 else {
3204 return NULL;
3205 }
3206}
3207
3208PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
3209static inline int
3210is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
3211{
3212 register uintptr_t p = (uintptr_t)ptr;
3213 register struct heap_page *page;
3214
3215 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3216
3217 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
3218 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3219
3220 if (p % BASE_SLOT_SIZE != 0) return FALSE;
3221 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3222
3223 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3224 if (page) {
3225 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3226 if (page->flags.in_tomb) {
3227 return FALSE;
3228 }
3229 else {
3230 if (p < page->start) return FALSE;
3231 if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
3232 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0) return FALSE;
3233
3234 return TRUE;
3235 }
3236 }
3237 return FALSE;
3238}
3239
3240static enum rb_id_table_iterator_result
3241free_const_entry_i(VALUE value, void *data)
3242{
3243 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3244 xfree(ce);
3245 return ID_TABLE_CONTINUE;
3246}
3247
3248void
3249rb_free_const_table(struct rb_id_table *tbl)
3250{
3251 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3252 rb_id_table_free(tbl);
3253}
3254
3255// alive: if false, target pointers can be freed already.
3256// To check it, we need objspace parameter.
3257static void
3258vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
3259{
3260 if (ccs->entries) {
3261 for (int i=0; i<ccs->len; i++) {
3262 const struct rb_callcache *cc = ccs->entries[i].cc;
3263 if (!alive) {
3264 void *ptr = asan_unpoison_object_temporary((VALUE)cc);
3265 // ccs can be free'ed.
3266 if (is_pointer_to_heap(objspace, (void *)cc) &&
3267 IMEMO_TYPE_P(cc, imemo_callcache) &&
3268 cc->klass == klass) {
3269 // OK. maybe target cc.
3270 }
3271 else {
3272 if (ptr) {
3273 asan_poison_object((VALUE)cc);
3274 }
3275 continue;
3276 }
3277 if (ptr) {
3278 asan_poison_object((VALUE)cc);
3279 }
3280 }
3281 vm_cc_invalidate(cc);
3282 }
3283 ruby_xfree(ccs->entries);
3284 }
3285 ruby_xfree(ccs);
3286}
3287
3288void
3289rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
3290{
3291 RB_DEBUG_COUNTER_INC(ccs_free);
3292 vm_ccs_free(ccs, TRUE, NULL, Qundef);
3293}
3294
3296 rb_objspace_t *objspace;
3297 VALUE klass;
3298 bool alive;
3299};
3300
3301static enum rb_id_table_iterator_result
3302cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
3303{
3304 struct cc_tbl_i_data *data = data_ptr;
3305 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3306 VM_ASSERT(vm_ccs_p(ccs));
3307 VM_ASSERT(id == ccs->cme->called_id);
3308
3309 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3310 rb_vm_ccs_free(ccs);
3311 return ID_TABLE_DELETE;
3312 }
3313 else {
3314 gc_mark(data->objspace, (VALUE)ccs->cme);
3315
3316 for (int i=0; i<ccs->len; i++) {
3317 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3318 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3319
3320 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
3321 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
3322 }
3323 return ID_TABLE_CONTINUE;
3324 }
3325}
3326
3327static void
3328cc_table_mark(rb_objspace_t *objspace, VALUE klass)
3329{
3330 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3331 if (cc_tbl) {
3332 struct cc_tbl_i_data data = {
3333 .objspace = objspace,
3334 .klass = klass,
3335 };
3336 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3337 }
3338}
3339
3340static enum rb_id_table_iterator_result
3341cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
3342{
3343 struct cc_tbl_i_data *data = data_ptr;
3344 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3345 VM_ASSERT(vm_ccs_p(ccs));
3346 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3347 return ID_TABLE_CONTINUE;
3348}
3349
3350static void
3351cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
3352{
3353 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3354
3355 if (cc_tbl) {
3356 struct cc_tbl_i_data data = {
3357 .objspace = objspace,
3358 .klass = klass,
3359 .alive = alive,
3360 };
3361 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3362 rb_id_table_free(cc_tbl);
3363 }
3364}
3365
3366static enum rb_id_table_iterator_result
3367cvar_table_free_i(VALUE value, void * ctx)
3368{
3369 xfree((void *) value);
3370 return ID_TABLE_CONTINUE;
3371}
3372
3373void
3374rb_cc_table_free(VALUE klass)
3375{
3376 cc_table_free(&rb_objspace, klass, TRUE);
3377}
3378
3379static inline void
3380make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
3381{
3382 struct RZombie *zombie = RZOMBIE(obj);
3383 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
3384 zombie->dfree = dfree;
3385 zombie->data = data;
3386 VALUE prev, next = heap_pages_deferred_final;
3387 do {
3388 zombie->next = prev = next;
3389 next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
3390 } while (next != prev);
3391
3392 struct heap_page *page = GET_HEAP_PAGE(obj);
3393 page->final_slots++;
3394 heap_pages_final_slots++;
3395}
3396
3397static inline void
3398make_io_zombie(rb_objspace_t *objspace, VALUE obj)
3399{
3400 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3401 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3402}
3403
3404static void
3405obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
3406{
3407 ASSERT_vm_locking();
3408 st_data_t o = (st_data_t)obj, id;
3409
3410 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
3412
3413 if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
3414 GC_ASSERT(id);
3415 st_delete(objspace->id_to_obj_tbl, &id, NULL);
3416 }
3417 else {
3418 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3419 }
3420}
3421
3422static int
3423obj_free(rb_objspace_t *objspace, VALUE obj)
3424{
3425 RB_DEBUG_COUNTER_INC(obj_free);
3426 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3427
3428 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
3429
3430 switch (BUILTIN_TYPE(obj)) {
3431 case T_NIL:
3432 case T_FIXNUM:
3433 case T_TRUE:
3434 case T_FALSE:
3435 rb_bug("obj_free() called for broken object");
3436 break;
3437 default:
3438 break;
3439 }
3440
3441 if (FL_TEST(obj, FL_EXIVAR)) {
3443 FL_UNSET(obj, FL_EXIVAR);
3444 }
3445
3446 if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
3447 obj_free_object_id(objspace, obj);
3448 }
3449
3450 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3451
3452#if RGENGC_CHECK_MODE
3453#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3454 CHECK(RVALUE_WB_UNPROTECTED);
3455 CHECK(RVALUE_MARKED);
3456 CHECK(RVALUE_MARKING);
3457 CHECK(RVALUE_UNCOLLECTIBLE);
3458#undef CHECK
3459#endif
3460
3461 switch (BUILTIN_TYPE(obj)) {
3462 case T_OBJECT:
3463 if (rb_shape_obj_too_complex(obj)) {
3464 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3465 st_free_table(ROBJECT_IV_HASH(obj));
3466 }
3467 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3468 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3469 }
3470 else if (ROBJ_TRANSIENT_P(obj)) {
3471 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3472 }
3473 else {
3474 xfree(RANY(obj)->as.object.as.heap.ivptr);
3475 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3476 }
3477 break;
3478 case T_MODULE:
3479 case T_CLASS:
3480 rb_id_table_free(RCLASS_M_TBL(obj));
3481 cc_table_free(objspace, obj, FALSE);
3482 if (RCLASS_IVPTR(obj)) {
3483 xfree(RCLASS_IVPTR(obj));
3484 }
3485 if (RCLASS_CONST_TBL(obj)) {
3486 rb_free_const_table(RCLASS_CONST_TBL(obj));
3487 }
3488 if (RCLASS_CVC_TBL(obj)) {
3489 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3490 rb_id_table_free(RCLASS_CVC_TBL(obj));
3491 }
3492 rb_class_remove_subclass_head(obj);
3493 rb_class_remove_from_module_subclasses(obj);
3494 rb_class_remove_from_super_subclasses(obj);
3495 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3496 xfree(RCLASS_SUPERCLASSES(obj));
3497 }
3498
3499#if SIZE_POOL_COUNT == 1
3500 if (RCLASS_EXT(obj))
3501 xfree(RCLASS_EXT(obj));
3502#endif
3503
3504 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
3505 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
3506 break;
3507 case T_STRING:
3508 rb_str_free(obj);
3509 break;
3510 case T_ARRAY:
3511 rb_ary_free(obj);
3512 break;
3513 case T_HASH:
3514#if USE_DEBUG_COUNTER
3515 switch (RHASH_SIZE(obj)) {
3516 case 0:
3517 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3518 break;
3519 case 1:
3520 RB_DEBUG_COUNTER_INC(obj_hash_1);
3521 break;
3522 case 2:
3523 RB_DEBUG_COUNTER_INC(obj_hash_2);
3524 break;
3525 case 3:
3526 RB_DEBUG_COUNTER_INC(obj_hash_3);
3527 break;
3528 case 4:
3529 RB_DEBUG_COUNTER_INC(obj_hash_4);
3530 break;
3531 case 5:
3532 case 6:
3533 case 7:
3534 case 8:
3535 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3536 break;
3537 default:
3538 GC_ASSERT(RHASH_SIZE(obj) > 8);
3539 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3540 }
3541
3542 if (RHASH_AR_TABLE_P(obj)) {
3543 if (RHASH_AR_TABLE(obj) == NULL) {
3544 RB_DEBUG_COUNTER_INC(obj_hash_null);
3545 }
3546 else {
3547 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3548 }
3549 }
3550 else {
3551 RB_DEBUG_COUNTER_INC(obj_hash_st);
3552 }
3553#endif
3554 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
3555 struct ar_table_struct *tab = RHASH(obj)->as.ar;
3556
3557 if (tab) {
3558 if (RHASH_TRANSIENT_P(obj)) {
3559 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3560 }
3561 else {
3562 ruby_xfree(tab);
3563 }
3564 }
3565 }
3566 else {
3567 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3568 st_free_table(RHASH(obj)->as.st);
3569 }
3570 break;
3571 case T_REGEXP:
3572 if (RANY(obj)->as.regexp.ptr) {
3573 onig_free(RANY(obj)->as.regexp.ptr);
3574 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3575 }
3576 break;
3577 case T_DATA:
3578 if (DATA_PTR(obj)) {
3579 int free_immediately = FALSE;
3580 void (*dfree)(void *);
3581 void *data = DATA_PTR(obj);
3582
3583 if (RTYPEDDATA_P(obj)) {
3584 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3585 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3586 if (0 && free_immediately == 0) {
3587 /* to expose non-free-immediate T_DATA */
3588 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3589 }
3590 }
3591 else {
3592 dfree = RANY(obj)->as.data.dfree;
3593 }
3594
3595 if (dfree) {
3596 if (dfree == RUBY_DEFAULT_FREE) {
3597 xfree(data);
3598 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3599 }
3600 else if (free_immediately) {
3601 (*dfree)(data);
3602 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3603 }
3604 else {
3605 make_zombie(objspace, obj, dfree, data);
3606 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3607 return FALSE;
3608 }
3609 }
3610 else {
3611 RB_DEBUG_COUNTER_INC(obj_data_empty);
3612 }
3613 }
3614 break;
3615 case T_MATCH:
3616 if (RANY(obj)->as.match.rmatch) {
3617 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3618#if USE_DEBUG_COUNTER
3619 if (rm->regs.num_regs >= 8) {
3620 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3621 }
3622 else if (rm->regs.num_regs >= 4) {
3623 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3624 }
3625 else if (rm->regs.num_regs >= 1) {
3626 RB_DEBUG_COUNTER_INC(obj_match_under4);
3627 }
3628#endif
3629 onig_region_free(&rm->regs, 0);
3630 if (rm->char_offset)
3631 xfree(rm->char_offset);
3632 xfree(rm);
3633
3634 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3635 }
3636 break;
3637 case T_FILE:
3638 if (RANY(obj)->as.file.fptr) {
3639 make_io_zombie(objspace, obj);
3640 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3641 return FALSE;
3642 }
3643 break;
3644 case T_RATIONAL:
3645 RB_DEBUG_COUNTER_INC(obj_rational);
3646 break;
3647 case T_COMPLEX:
3648 RB_DEBUG_COUNTER_INC(obj_complex);
3649 break;
3650 case T_MOVED:
3651 break;
3652 case T_ICLASS:
3653 /* Basically , T_ICLASS shares table with the module */
3654 if (RICLASS_OWNS_M_TBL_P(obj)) {
3655 /* Method table is not shared for origin iclasses of classes */
3656 rb_id_table_free(RCLASS_M_TBL(obj));
3657 }
3658 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3659 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3660 }
3661 rb_class_remove_subclass_head(obj);
3662 cc_table_free(objspace, obj, FALSE);
3663 rb_class_remove_from_module_subclasses(obj);
3664 rb_class_remove_from_super_subclasses(obj);
3665#if !RCLASS_EXT_EMBEDDED
3666 xfree(RCLASS_EXT(obj));
3667#endif
3668
3669 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3670 break;
3671
3672 case T_FLOAT:
3673 RB_DEBUG_COUNTER_INC(obj_float);
3674 break;
3675
3676 case T_BIGNUM:
3677 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3678 xfree(BIGNUM_DIGITS(obj));
3679 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3680 }
3681 else {
3682 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3683 }
3684 break;
3685
3686 case T_NODE:
3687 UNEXPECTED_NODE(obj_free);
3688 break;
3689
3690 case T_STRUCT:
3691 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3692 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3693 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3694 }
3695 else if (RSTRUCT_TRANSIENT_P(obj)) {
3696 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3697 }
3698 else {
3699 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3700 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3701 }
3702 break;
3703
3704 case T_SYMBOL:
3705 {
3706 rb_gc_free_dsymbol(obj);
3707 RB_DEBUG_COUNTER_INC(obj_symbol);
3708 }
3709 break;
3710
3711 case T_IMEMO:
3712 switch (imemo_type(obj)) {
3713 case imemo_ment:
3714 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3715 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3716 break;
3717 case imemo_iseq:
3718 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3719 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3720 break;
3721 case imemo_env:
3722 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3723 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3724 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3725 break;
3726 case imemo_tmpbuf:
3727 xfree(RANY(obj)->as.imemo.alloc.ptr);
3728 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3729 break;
3730 case imemo_ast:
3731 rb_ast_free(&RANY(obj)->as.imemo.ast);
3732 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3733 break;
3734 case imemo_cref:
3735 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3736 break;
3737 case imemo_svar:
3738 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3739 break;
3740 case imemo_throw_data:
3741 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3742 break;
3743 case imemo_ifunc:
3744 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3745 break;
3746 case imemo_memo:
3747 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3748 break;
3749 case imemo_parser_strterm:
3750 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3751 break;
3752 case imemo_callinfo:
3753 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3754 break;
3755 case imemo_callcache:
3756 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3757 break;
3758 case imemo_constcache:
3759 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3760 break;
3761 }
3762 return TRUE;
3763
3764 default:
3765 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3766 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3767 }
3768
3769 if (FL_TEST(obj, FL_FINALIZE)) {
3770 make_zombie(objspace, obj, 0, 0);
3771 return FALSE;
3772 }
3773 else {
3774 return TRUE;
3775 }
3776}
3777
3778
3779#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3780#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3781
3782static int
3783object_id_cmp(st_data_t x, st_data_t y)
3784{
3785 if (RB_BIGNUM_TYPE_P(x)) {
3786 return !rb_big_eql(x, y);
3787 }
3788 else {
3789 return x != y;
3790 }
3791}
3792
3793static st_index_t
3794object_id_hash(st_data_t n)
3795{
3796 if (RB_BIGNUM_TYPE_P(n)) {
3797 return FIX2LONG(rb_big_hash(n));
3798 }
3799 else {
3800 return st_numhash(n);
3801 }
3802}
3803static const struct st_hash_type object_id_hash_type = {
3804 object_id_cmp,
3805 object_id_hash,
3806};
3807
3808void
3809Init_heap(void)
3810{
3811 rb_objspace_t *objspace = &rb_objspace;
3812
3813#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3814 /* Need to determine if we can use mmap at runtime. */
3815 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3816#endif
3817
3818 objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
3819 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3820 objspace->obj_to_id_tbl = st_init_numtable();
3821
3822#if RGENGC_ESTIMATE_OLDMALLOC
3823 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3824#endif
3825
3826 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3827
3828 /* Give other size pools allocatable pages. */
3829 for (int i = 1; i < SIZE_POOL_COUNT; i++) {
3830 rb_size_pool_t *size_pool = &size_pools[i];
3831 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
3832 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3833 }
3834 heap_pages_expand_sorted(objspace);
3835
3836 init_mark_stack(&objspace->mark_stack);
3837
3838 objspace->profile.invoke_time = getrusage_time();
3839 finalizer_table = st_init_numtable();
3840}
3841
3842void
3843Init_gc_stress(void)
3844{
3845 rb_objspace_t *objspace = &rb_objspace;
3846
3847 gc_stress_set(objspace, ruby_initial_gc_stress);
3848}
3849
3850typedef int each_obj_callback(void *, void *, size_t, void *);
3851
3852static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
3853static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3854
3856 rb_objspace_t *objspace;
3857 bool reenable_incremental;
3858
3859 each_obj_callback *callback;
3860 void *data;
3861
3862 struct heap_page **pages[SIZE_POOL_COUNT];
3863 size_t pages_counts[SIZE_POOL_COUNT];
3864};
3865
3866static VALUE
3867objspace_each_objects_ensure(VALUE arg)
3868{
3869 struct each_obj_data *data = (struct each_obj_data *)arg;
3870 rb_objspace_t *objspace = data->objspace;
3871
3872 /* Reenable incremental GC */
3873 if (data->reenable_incremental) {
3874 objspace->flags.dont_incremental = FALSE;
3875 }
3876
3877 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3878 struct heap_page **pages = data->pages[i];
3879 /* pages could be NULL if an error was raised during setup (e.g.
3880 * malloc failed due to out of memory). */
3881 if (pages) {
3882 free(pages);
3883 }
3884 }
3885
3886 return Qnil;
3887}
3888
3889static VALUE
3890objspace_each_objects_try(VALUE arg)
3891{
3892 struct each_obj_data *data = (struct each_obj_data *)arg;
3893 rb_objspace_t *objspace = data->objspace;
3894
3895 /* Copy pages from all size_pools to their respective buffers. */
3896 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3897 rb_size_pool_t *size_pool = &size_pools[i];
3898 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
3899
3900 struct heap_page **pages = malloc(size);
3901 if (!pages) rb_memerror();
3902
3903 /* Set up pages buffer by iterating over all pages in the current eden
3904 * heap. This will be a snapshot of the state of the heap before we
3905 * call the callback over each page that exists in this buffer. Thus it
3906 * is safe for the callback to allocate objects without possibly entering
3907 * an infinite loop. */
3908 struct heap_page *page = 0;
3909 size_t pages_count = 0;
3910 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3911 pages[pages_count] = page;
3912 pages_count++;
3913 }
3914 data->pages[i] = pages;
3915 data->pages_counts[i] = pages_count;
3916 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3917 }
3918
3919 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3920 rb_size_pool_t *size_pool = &size_pools[i];
3921 size_t pages_count = data->pages_counts[i];
3922 struct heap_page **pages = data->pages[i];
3923
3924 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
3925 for (size_t i = 0; i < pages_count; i++) {
3926 /* If we have reached the end of the linked list then there are no
3927 * more pages, so break. */
3928 if (page == NULL) break;
3929
3930 /* If this page does not match the one in the buffer, then move to
3931 * the next page in the buffer. */
3932 if (pages[i] != page) continue;
3933
3934 uintptr_t pstart = (uintptr_t)page->start;
3935 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3936
3937 if (!__asan_region_is_poisoned((void *)pstart, pend - pstart) &&
3938 (*data->callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
3939 break;
3940 }
3941
3942 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3943 }
3944 }
3945
3946 return Qnil;
3947}
3948
3949/*
3950 * rb_objspace_each_objects() is special C API to walk through
3951 * Ruby object space. This C API is too difficult to use it.
3952 * To be frank, you should not use it. Or you need to read the
3953 * source code of this function and understand what this function does.
3954 *
3955 * 'callback' will be called several times (the number of heap page,
3956 * at current implementation) with:
3957 * vstart: a pointer to the first living object of the heap_page.
3958 * vend: a pointer to next to the valid heap_page area.
3959 * stride: a distance to next VALUE.
3960 *
3961 * If callback() returns non-zero, the iteration will be stopped.
3962 *
3963 * This is a sample callback code to iterate liveness objects:
3964 *
3965 * int
3966 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3967 * VALUE v = (VALUE)vstart;
3968 * for (; v != (VALUE)vend; v += stride) {
3969 * if (RBASIC(v)->flags) { // liveness check
3970 * // do something with live object 'v'
3971 * }
3972 * return 0; // continue to iteration
3973 * }
3974 *
3975 * Note: 'vstart' is not a top of heap_page. This point the first
3976 * living object to grasp at least one object to avoid GC issue.
3977 * This means that you can not walk through all Ruby object page
3978 * including freed object page.
3979 *
3980 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3981 * However, there are possibilities to pass variable values with
3982 * 'stride' with some reasons. You must use stride instead of
3983 * use some constant value in the iteration.
3984 */
3985void
3986rb_objspace_each_objects(each_obj_callback *callback, void *data)
3987{
3988 objspace_each_objects(&rb_objspace, callback, data, TRUE);
3989}
3990
3991static void
3992objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
3993{
3994 /* Disable incremental GC */
3995 bool reenable_incremental = FALSE;
3996 if (protected) {
3997 reenable_incremental = !objspace->flags.dont_incremental;
3998
3999 gc_rest(objspace);
4000 objspace->flags.dont_incremental = TRUE;
4001 }
4002
4003 struct each_obj_data each_obj_data = {
4004 .objspace = objspace,
4005 .reenable_incremental = reenable_incremental,
4006
4007 .callback = callback,
4008 .data = data,
4009
4010 .pages = {NULL},
4011 .pages_counts = {0},
4012 };
4013 rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
4014 objspace_each_objects_ensure, (VALUE)&each_obj_data);
4015}
4016
4017void
4018rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
4019{
4020 objspace_each_objects(&rb_objspace, callback, data, FALSE);
4021}
4022
4024 size_t num;
4025 VALUE of;
4026};
4027
4028static int
4029internal_object_p(VALUE obj)
4030{
4031 RVALUE *p = (RVALUE *)obj;
4032 void *ptr = asan_unpoison_object_temporary(obj);
4033 bool used_p = p->as.basic.flags;
4034
4035 if (used_p) {
4036 switch (BUILTIN_TYPE(obj)) {
4037 case T_NODE:
4038 UNEXPECTED_NODE(internal_object_p);
4039 break;
4040 case T_NONE:
4041 case T_MOVED:
4042 case T_IMEMO:
4043 case T_ICLASS:
4044 case T_ZOMBIE:
4045 break;
4046 case T_CLASS:
4047 if (!p->as.basic.klass) break;
4048 if (FL_TEST(obj, FL_SINGLETON)) {
4049 return rb_singleton_class_internal_p(obj);
4050 }
4051 return 0;
4052 default:
4053 if (!p->as.basic.klass) break;
4054 return 0;
4055 }
4056 }
4057 if (ptr || ! used_p) {
4058 asan_poison_object(obj);
4059 }
4060 return 1;
4061}
4062
4063int
4064rb_objspace_internal_object_p(VALUE obj)
4065{
4066 return internal_object_p(obj);
4067}
4068
4069static int
4070os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
4071{
4072 struct os_each_struct *oes = (struct os_each_struct *)data;
4073
4074 VALUE v = (VALUE)vstart;
4075 for (; v != (VALUE)vend; v += stride) {
4076 if (!internal_object_p(v)) {
4077 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
4078 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
4079 rb_yield(v);
4080 oes->num++;
4081 }
4082 }
4083 }
4084 }
4085
4086 return 0;
4087}
4088
4089static VALUE
4090os_obj_of(VALUE of)
4091{
4092 struct os_each_struct oes;
4093
4094 oes.num = 0;
4095 oes.of = of;
4096 rb_objspace_each_objects(os_obj_of_i, &oes);
4097 return SIZET2NUM(oes.num);
4098}
4099
4100/*
4101 * call-seq:
4102 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
4103 * ObjectSpace.each_object([module]) -> an_enumerator
4104 *
4105 * Calls the block once for each living, nonimmediate object in this
4106 * Ruby process. If <i>module</i> is specified, calls the block
4107 * for only those classes or modules that match (or are a subclass of)
4108 * <i>module</i>. Returns the number of objects found. Immediate
4109 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
4110 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
4111 * never returned. In the example below, #each_object returns both
4112 * the numbers we defined and several constants defined in the Math
4113 * module.
4114 *
4115 * If no block is given, an enumerator is returned instead.
4116 *
4117 * a = 102.7
4118 * b = 95 # Won't be returned
4119 * c = 12345678987654321
4120 * count = ObjectSpace.each_object(Numeric) {|x| p x }
4121 * puts "Total count: #{count}"
4122 *
4123 * <em>produces:</em>
4124 *
4125 * 12345678987654321
4126 * 102.7
4127 * 2.71828182845905
4128 * 3.14159265358979
4129 * 2.22044604925031e-16
4130 * 1.7976931348623157e+308
4131 * 2.2250738585072e-308
4132 * Total count: 7
4133 *
4134 */
4135
4136static VALUE
4137os_each_obj(int argc, VALUE *argv, VALUE os)
4138{
4139 VALUE of;
4140
4141 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
4142 RETURN_ENUMERATOR(os, 1, &of);
4143 return os_obj_of(of);
4144}
4145
4146/*
4147 * call-seq:
4148 * ObjectSpace.undefine_finalizer(obj)
4149 *
4150 * Removes all finalizers for <i>obj</i>.
4151 *
4152 */
4153
4154static VALUE
4155undefine_final(VALUE os, VALUE obj)
4156{
4157 return rb_undefine_finalizer(obj);
4158}
4159
4160VALUE
4161rb_undefine_finalizer(VALUE obj)
4162{
4163 rb_objspace_t *objspace = &rb_objspace;
4164 st_data_t data = obj;
4165 rb_check_frozen(obj);
4166 st_delete(finalizer_table, &data, 0);
4167 FL_UNSET(obj, FL_FINALIZE);
4168 return obj;
4169}
4170
4171static void
4172should_be_callable(VALUE block)
4173{
4174 if (!rb_obj_respond_to(block, idCall, TRUE)) {
4175 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
4176 rb_obj_class(block));
4177 }
4178}
4179
4180static void
4181should_be_finalizable(VALUE obj)
4182{
4183 if (!FL_ABLE(obj)) {
4184 rb_raise(rb_eArgError, "cannot define finalizer for %s",
4185 rb_obj_classname(obj));
4186 }
4187 rb_check_frozen(obj);
4188}
4189
4190/*
4191 * call-seq:
4192 * ObjectSpace.define_finalizer(obj, aProc=proc())
4193 *
4194 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
4195 * was destroyed. The object ID of the <i>obj</i> will be passed
4196 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
4197 * method, make sure it can be called with a single argument.
4198 *
4199 * The return value is an array <code>[0, aProc]</code>.
4200 *
4201 * The two recommended patterns are to either create the finaliser proc
4202 * in a non-instance method where it can safely capture the needed state,
4203 * or to use a custom callable object that stores the needed state
4204 * explicitly as instance variables.
4205 *
4206 * class Foo
4207 * def initialize(data_needed_for_finalization)
4208 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
4209 * end
4210 *
4211 * def self.create_finalizer(data_needed_for_finalization)
4212 * proc {
4213 * puts "finalizing #{data_needed_for_finalization}"
4214 * }
4215 * end
4216 * end
4217 *
4218 * class Bar
4219 * class Remover
4220 * def initialize(data_needed_for_finalization)
4221 * @data_needed_for_finalization = data_needed_for_finalization
4222 * end
4223 *
4224 * def call(id)
4225 * puts "finalizing #{@data_needed_for_finalization}"
4226 * end
4227 * end
4228 *
4229 * def initialize(data_needed_for_finalization)
4230 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
4231 * end
4232 * end
4233 *
4234 * Note that if your finalizer references the object to be
4235 * finalized it will never be run on GC, although it will still be
4236 * run at exit. You will get a warning if you capture the object
4237 * to be finalized as the receiver of the finalizer.
4238 *
4239 * class CapturesSelf
4240 * def initialize(name)
4241 * ObjectSpace.define_finalizer(self, proc {
4242 * # this finalizer will only be run on exit
4243 * puts "finalizing #{name}"
4244 * })
4245 * end
4246 * end
4247 *
4248 * Also note that finalization can be unpredictable and is never guaranteed
4249 * to be run except on exit.
4250 */
4251
4252static VALUE
4253define_final(int argc, VALUE *argv, VALUE os)
4254{
4255 VALUE obj, block;
4256
4257 rb_scan_args(argc, argv, "11", &obj, &block);
4258 should_be_finalizable(obj);
4259 if (argc == 1) {
4260 block = rb_block_proc();
4261 }
4262 else {
4263 should_be_callable(block);
4264 }
4265
4266 if (rb_callable_receiver(block) == obj) {
4267 rb_warn("finalizer references object to be finalized");
4268 }
4269
4270 return define_final0(obj, block);
4271}
4272
4273static VALUE
4274define_final0(VALUE obj, VALUE block)
4275{
4276 rb_objspace_t *objspace = &rb_objspace;
4277 VALUE table;
4278 st_data_t data;
4279
4280 RBASIC(obj)->flags |= FL_FINALIZE;
4281
4282 if (st_lookup(finalizer_table, obj, &data)) {
4283 table = (VALUE)data;
4284
4285 /* avoid duplicate block, table is usually small */
4286 {
4287 long len = RARRAY_LEN(table);
4288 long i;
4289
4290 for (i = 0; i < len; i++) {
4291 VALUE recv = RARRAY_AREF(table, i);
4292 if (rb_equal(recv, block)) {
4293 block = recv;
4294 goto end;
4295 }
4296 }
4297 }
4298
4299 rb_ary_push(table, block);
4300 }
4301 else {
4302 table = rb_ary_new3(1, block);
4303 RBASIC_CLEAR_CLASS(table);
4304 st_add_direct(finalizer_table, obj, table);
4305 }
4306 end:
4307 block = rb_ary_new3(2, INT2FIX(0), block);
4308 OBJ_FREEZE(block);
4309 return block;
4310}
4311
4312VALUE
4313rb_define_finalizer(VALUE obj, VALUE block)
4314{
4315 should_be_finalizable(obj);
4316 should_be_callable(block);
4317 return define_final0(obj, block);
4318}
4319
4320void
4321rb_gc_copy_finalizer(VALUE dest, VALUE obj)
4322{
4323 rb_objspace_t *objspace = &rb_objspace;
4324 VALUE table;
4325 st_data_t data;
4326
4327 if (!FL_TEST(obj, FL_FINALIZE)) return;
4328 if (st_lookup(finalizer_table, obj, &data)) {
4329 table = (VALUE)data;
4330 st_insert(finalizer_table, dest, table);
4331 }
4332 FL_SET(dest, FL_FINALIZE);
4333}
4334
4335static VALUE
4336run_single_final(VALUE cmd, VALUE objid)
4337{
4338 return rb_check_funcall(cmd, idCall, 1, &objid);
4339}
4340
4341static void
4342warn_exception_in_finalizer(rb_execution_context_t *ec, VALUE final)
4343{
4344 if (!UNDEF_P(final) && !NIL_P(ruby_verbose)) {
4345 VALUE errinfo = ec->errinfo;
4346 rb_warn("Exception in finalizer %+"PRIsVALUE, final);
4347 rb_ec_error_print(ec, errinfo);
4348 }
4349}
4350
4351static void
4352run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
4353{
4354 long i;
4355 enum ruby_tag_type state;
4356 volatile struct {
4357 VALUE errinfo;
4358 VALUE objid;
4359 VALUE final;
4360 rb_control_frame_t *cfp;
4361 VALUE *sp;
4362 long finished;
4363 } saved;
4364
4365 rb_execution_context_t * volatile ec = GET_EC();
4366#define RESTORE_FINALIZER() (\
4367 ec->cfp = saved.cfp, \
4368 ec->cfp->sp = saved.sp, \
4369 ec->errinfo = saved.errinfo)
4370
4371 saved.errinfo = ec->errinfo;
4372 saved.objid = rb_obj_id(obj);
4373 saved.cfp = ec->cfp;
4374 saved.sp = ec->cfp->sp;
4375 saved.finished = 0;
4376 saved.final = Qundef;
4377
4378 EC_PUSH_TAG(ec);
4379 state = EC_EXEC_TAG();
4380 if (state != TAG_NONE) {
4381 ++saved.finished; /* skip failed finalizer */
4382 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final, Qundef));
4383 }
4384 for (i = saved.finished;
4385 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
4386 saved.finished = ++i) {
4387 run_single_final(saved.final = RARRAY_AREF(table, i), saved.objid);
4388 }
4389 EC_POP_TAG();
4390#undef RESTORE_FINALIZER
4391}
4392
4393static void
4394run_final(rb_objspace_t *objspace, VALUE zombie)
4395{
4396 st_data_t key, table;
4397
4398 if (RZOMBIE(zombie)->dfree) {
4399 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4400 }
4401
4402 key = (st_data_t)zombie;
4403 if (st_delete(finalizer_table, &key, &table)) {
4404 run_finalizer(objspace, zombie, (VALUE)table);
4405 }
4406}
4407
4408static void
4409finalize_list(rb_objspace_t *objspace, VALUE zombie)
4410{
4411 while (zombie) {
4412 VALUE next_zombie;
4413 struct heap_page *page;
4414 asan_unpoison_object(zombie, false);
4415 next_zombie = RZOMBIE(zombie)->next;
4416 page = GET_HEAP_PAGE(zombie);
4417
4418 run_final(objspace, zombie);
4419
4420 RB_VM_LOCK_ENTER();
4421 {
4422 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
4423 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
4424 obj_free_object_id(objspace, zombie);
4425 }
4426
4427 GC_ASSERT(heap_pages_final_slots > 0);
4428 GC_ASSERT(page->final_slots > 0);
4429
4430 heap_pages_final_slots--;
4431 page->final_slots--;
4432 page->free_slots++;
4433 heap_page_add_freeobj(objspace, page, zombie);
4434 objspace->profile.total_freed_objects++;
4435 }
4436 RB_VM_LOCK_LEAVE();
4437
4438 zombie = next_zombie;
4439 }
4440}
4441
4442static void
4443finalize_deferred_heap_pages(rb_objspace_t *objspace)
4444{
4445 VALUE zombie;
4446 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4447 finalize_list(objspace, zombie);
4448 }
4449}
4450
4451static void
4452finalize_deferred(rb_objspace_t *objspace)
4453{
4454 rb_execution_context_t *ec = GET_EC();
4455 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4456 finalize_deferred_heap_pages(objspace);
4457 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4458}
4459
4460static void
4461gc_finalize_deferred(void *dmy)
4462{
4463 rb_objspace_t *objspace = dmy;
4464 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4465
4466 finalize_deferred(objspace);
4467 ATOMIC_SET(finalizing, 0);
4468}
4469
4470static void
4471gc_finalize_deferred_register(rb_objspace_t *objspace)
4472{
4473 if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
4474 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
4475 }
4476}
4477
4479 VALUE obj;
4480 VALUE table;
4481 struct force_finalize_list *next;
4482};
4483
4484static int
4485force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4486{
4487 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
4488 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
4489 curr->obj = key;
4490 curr->table = val;
4491 curr->next = *prev;
4492 *prev = curr;
4493 return ST_CONTINUE;
4494}
4495
4496bool rb_obj_is_main_ractor(VALUE gv);
4497
4498void
4499rb_objspace_call_finalizer(rb_objspace_t *objspace)
4500{
4501 size_t i;
4502
4503#if RGENGC_CHECK_MODE >= 2
4504 gc_verify_internal_consistency(objspace);
4505#endif
4506 gc_rest(objspace);
4507
4508 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4509
4510 /* run finalizers */
4511 finalize_deferred(objspace);
4512 GC_ASSERT(heap_pages_deferred_final == 0);
4513
4514 gc_rest(objspace);
4515 /* prohibit incremental GC */
4516 objspace->flags.dont_incremental = 1;
4517
4518 /* force to run finalizer */
4519 while (finalizer_table->num_entries) {
4520 struct force_finalize_list *list = 0;
4521 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4522 while (list) {
4523 struct force_finalize_list *curr = list;
4524 st_data_t obj = (st_data_t)curr->obj;
4525 run_finalizer(objspace, curr->obj, curr->table);
4526 st_delete(finalizer_table, &obj, 0);
4527 list = curr->next;
4528 xfree(curr);
4529 }
4530 }
4531
4532 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4533 dont_gc_on();
4534
4535 /* running data/file finalizers are part of garbage collection */
4536 unsigned int lock_lev;
4537 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4538
4539 /* run data/file object's finalizers */
4540 for (i = 0; i < heap_allocated_pages; i++) {
4541 struct heap_page *page = heap_pages_sorted[i];
4542 short stride = page->slot_size;
4543
4544 uintptr_t p = (uintptr_t)page->start;
4545 uintptr_t pend = p + page->total_slots * stride;
4546 for (; p < pend; p += stride) {
4547 VALUE vp = (VALUE)p;
4548 void *poisoned = asan_unpoison_object_temporary(vp);
4549 switch (BUILTIN_TYPE(vp)) {
4550 case T_DATA:
4551 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
4552 if (rb_obj_is_thread(vp)) break;
4553 if (rb_obj_is_mutex(vp)) break;
4554 if (rb_obj_is_fiber(vp)) break;
4555 if (rb_obj_is_main_ractor(vp)) break;
4556 if (RTYPEDDATA_P(vp)) {
4557 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4558 }
4559 RANY(p)->as.free.flags = 0;
4560 if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
4561 xfree(DATA_PTR(p));
4562 }
4563 else if (RANY(p)->as.data.dfree) {
4564 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4565 }
4566 break;
4567 case T_FILE:
4568 if (RANY(p)->as.file.fptr) {
4569 make_io_zombie(objspace, vp);
4570 }
4571 break;
4572 default:
4573 break;
4574 }
4575 if (poisoned) {
4576 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4577 asan_poison_object(vp);
4578 }
4579 }
4580 }
4581
4582 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4583
4584 finalize_deferred_heap_pages(objspace);
4585
4586 st_free_table(finalizer_table);
4587 finalizer_table = 0;
4588 ATOMIC_SET(finalizing, 0);
4589}
4590
4591static inline int
4592is_swept_object(rb_objspace_t *objspace, VALUE ptr)
4593{
4594 struct heap_page *page = GET_HEAP_PAGE(ptr);
4595 return page->flags.before_sweep ? FALSE : TRUE;
4596}
4597
4598/* garbage objects will be collected soon. */
4599static inline int
4600is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
4601{
4602 if (!is_lazy_sweeping(objspace) ||
4603 is_swept_object(objspace, ptr) ||
4604 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4605
4606 return FALSE;
4607 }
4608 else {
4609 return TRUE;
4610 }
4611}
4612
4613static inline int
4614is_live_object(rb_objspace_t *objspace, VALUE ptr)
4615{
4616 switch (BUILTIN_TYPE(ptr)) {
4617 case T_NONE:
4618 case T_MOVED:
4619 case T_ZOMBIE:
4620 return FALSE;
4621 default:
4622 break;
4623 }
4624
4625 if (!is_garbage_object(objspace, ptr)) {
4626 return TRUE;
4627 }
4628 else {
4629 return FALSE;
4630 }
4631}
4632
4633static inline int
4634is_markable_object(rb_objspace_t *objspace, VALUE obj)
4635{
4636 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
4637 check_rvalue_consistency(obj);
4638 return TRUE;
4639}
4640
4641int
4642rb_objspace_markable_object_p(VALUE obj)
4643{
4644 rb_objspace_t *objspace = &rb_objspace;
4645 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4646}
4647
4648int
4649rb_objspace_garbage_object_p(VALUE obj)
4650{
4651 rb_objspace_t *objspace = &rb_objspace;
4652 return is_garbage_object(objspace, obj);
4653}
4654
4655static VALUE
4656id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
4657{
4658 VALUE orig;
4659 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4660 return orig;
4661 }
4662 else {
4663 return Qundef;
4664 }
4665}
4666
4667/*
4668 * call-seq:
4669 * ObjectSpace._id2ref(object_id) -> an_object
4670 *
4671 * Converts an object id to a reference to the object. May not be
4672 * called on an object id passed as a parameter to a finalizer.
4673 *
4674 * s = "I am a string" #=> "I am a string"
4675 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4676 * r == s #=> true
4677 *
4678 * On multi-ractor mode, if the object is not shareable, it raises
4679 * RangeError.
4680 */
4681
4682static VALUE
4683id2ref(VALUE objid)
4684{
4685#if SIZEOF_LONG == SIZEOF_VOIDP
4686#define NUM2PTR(x) NUM2ULONG(x)
4687#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4688#define NUM2PTR(x) NUM2ULL(x)
4689#endif
4690 rb_objspace_t *objspace = &rb_objspace;
4691 VALUE ptr;
4692 VALUE orig;
4693 void *p0;
4694
4695 objid = rb_to_int(objid);
4696 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4697 ptr = NUM2PTR(objid);
4698 if (ptr == Qtrue) return Qtrue;
4699 if (ptr == Qfalse) return Qfalse;
4700 if (NIL_P(ptr)) return Qnil;
4701 if (FIXNUM_P(ptr)) return (VALUE)ptr;
4702 if (FLONUM_P(ptr)) return (VALUE)ptr;
4703
4704 ptr = obj_id_to_ref(objid);
4705 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
4706 ID symid = ptr / sizeof(RVALUE);
4707 p0 = (void *)ptr;
4708 if (!rb_static_id_valid_p(symid))
4709 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
4710 return ID2SYM(symid);
4711 }
4712 }
4713
4714 if (!UNDEF_P(orig = id2ref_obj_tbl(objspace, objid)) &&
4715 is_live_object(objspace, orig)) {
4716
4717 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4718 return orig;
4719 }
4720 else {
4721 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4722 }
4723 }
4724
4725 if (rb_int_ge(objid, objspace->next_object_id)) {
4726 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4727 }
4728 else {
4729 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4730 }
4731}
4732
4733/* :nodoc: */
4734static VALUE
4735os_id2ref(VALUE os, VALUE objid)
4736{
4737 return id2ref(objid);
4738}
4739
4740static VALUE
4741rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4742{
4743 if (STATIC_SYM_P(obj)) {
4744 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4745 }
4746 else if (FLONUM_P(obj)) {
4747#if SIZEOF_LONG == SIZEOF_VOIDP
4748 return LONG2NUM((SIGNED_VALUE)obj);
4749#else
4750 return LL2NUM((SIGNED_VALUE)obj);
4751#endif
4752 }
4753 else if (SPECIAL_CONST_P(obj)) {
4754 return LONG2NUM((SIGNED_VALUE)obj);
4755 }
4756
4757 return get_heap_object_id(obj);
4758}
4759
4760static VALUE
4761cached_object_id(VALUE obj)
4762{
4763 VALUE id;
4764 rb_objspace_t *objspace = &rb_objspace;
4765
4766 RB_VM_LOCK_ENTER();
4767 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4768 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
4769 }
4770 else {
4771 GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
4772
4773 id = objspace->next_object_id;
4774 objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
4775
4776 VALUE already_disabled = rb_gc_disable_no_rest();
4777 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4778 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4779 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4780 FL_SET(obj, FL_SEEN_OBJ_ID);
4781 }
4782 RB_VM_LOCK_LEAVE();
4783
4784 return id;
4785}
4786
4787static VALUE
4788nonspecial_obj_id_(VALUE obj)
4789{
4790 return nonspecial_obj_id(obj);
4791}
4792
4793
4794VALUE
4795rb_memory_id(VALUE obj)
4796{
4797 return rb_find_object_id(obj, nonspecial_obj_id_);
4798}
4799
4800/*
4801 * Document-method: __id__
4802 * Document-method: object_id
4803 *
4804 * call-seq:
4805 * obj.__id__ -> integer
4806 * obj.object_id -> integer
4807 *
4808 * Returns an integer identifier for +obj+.
4809 *
4810 * The same number will be returned on all calls to +object_id+ for a given
4811 * object, and no two active objects will share an id.
4812 *
4813 * Note: that some objects of builtin classes are reused for optimization.
4814 * This is the case for immediate values and frozen string literals.
4815 *
4816 * BasicObject implements +__id__+, Kernel implements +object_id+.
4817 *
4818 * Immediate values are not passed by reference but are passed by value:
4819 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4820 *
4821 * Object.new.object_id == Object.new.object_id # => false
4822 * (21 * 2).object_id == (21 * 2).object_id # => true
4823 * "hello".object_id == "hello".object_id # => false
4824 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4825 */
4826
4827VALUE
4828rb_obj_id(VALUE obj)
4829{
4830 /*
4831 * 32-bit VALUE space
4832 * MSB ------------------------ LSB
4833 * false 00000000000000000000000000000000
4834 * true 00000000000000000000000000000010
4835 * nil 00000000000000000000000000000100
4836 * undef 00000000000000000000000000000110
4837 * symbol ssssssssssssssssssssssss00001110
4838 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4839 * fixnum fffffffffffffffffffffffffffffff1
4840 *
4841 * object_id space
4842 * LSB
4843 * false 00000000000000000000000000000000
4844 * true 00000000000000000000000000000010
4845 * nil 00000000000000000000000000000100
4846 * undef 00000000000000000000000000000110
4847 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4848 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4849 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4850 *
4851 * where A = sizeof(RVALUE)/4
4852 *
4853 * sizeof(RVALUE) is
4854 * 20 if 32-bit, double is 4-byte aligned
4855 * 24 if 32-bit, double is 8-byte aligned
4856 * 40 if 64-bit
4857 */
4858
4859 return rb_find_object_id(obj, cached_object_id);
4860}
4861
4862static enum rb_id_table_iterator_result
4863cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
4864{
4865 size_t *total_size = data_ptr;
4866 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
4867 *total_size += sizeof(*ccs);
4868 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
4869 return ID_TABLE_CONTINUE;
4870}
4871
4872static size_t
4873cc_table_memsize(struct rb_id_table *cc_table)
4874{
4875 size_t total = rb_id_table_memsize(cc_table);
4876 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4877 return total;
4878}
4879
4880static size_t
4881obj_memsize_of(VALUE obj, int use_all_types)
4882{
4883 size_t size = 0;
4884
4885 if (SPECIAL_CONST_P(obj)) {
4886 return 0;
4887 }
4888
4889 if (FL_TEST(obj, FL_EXIVAR)) {
4890 size += rb_generic_ivar_memsize(obj);
4891 }
4892
4893 switch (BUILTIN_TYPE(obj)) {
4894 case T_OBJECT:
4895 if (rb_shape_obj_too_complex(obj)) {
4896 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
4897 }
4898 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
4899 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
4900 }
4901 break;
4902 case T_MODULE:
4903 case T_CLASS:
4904 if (RCLASS_EXT(obj)) {
4905 if (RCLASS_M_TBL(obj)) {
4906 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4907 }
4908 // class IV sizes are allocated as powers of two
4909 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
4910 if (RCLASS_CVC_TBL(obj)) {
4911 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4912 }
4913 if (RCLASS_EXT(obj)->const_tbl) {
4914 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4915 }
4916 if (RCLASS_CC_TBL(obj)) {
4917 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4918 }
4919 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
4920 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
4921 }
4922#if SIZE_POOL_COUNT == 1
4923 size += sizeof(rb_classext_t);
4924#endif
4925 }
4926 break;
4927 case T_ICLASS:
4928 if (RICLASS_OWNS_M_TBL_P(obj)) {
4929 if (RCLASS_M_TBL(obj)) {
4930 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4931 }
4932 }
4933 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4934 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4935 }
4936 break;
4937 case T_STRING:
4938 size += rb_str_memsize(obj);
4939 break;
4940 case T_ARRAY:
4941 size += rb_ary_memsize(obj);
4942 break;
4943 case T_HASH:
4944 if (RHASH_AR_TABLE_P(obj)) {
4945 if (RHASH_AR_TABLE(obj) != NULL) {
4946 size_t rb_hash_ar_table_size(void);
4947 size += rb_hash_ar_table_size();
4948 }
4949 }
4950 else {
4951 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4952 size += st_memsize(RHASH_ST_TABLE(obj));
4953 }
4954 break;
4955 case T_REGEXP:
4956 if (RREGEXP_PTR(obj)) {
4957 size += onig_memsize(RREGEXP_PTR(obj));
4958 }
4959 break;
4960 case T_DATA:
4961 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4962 break;
4963 case T_MATCH:
4964 if (RMATCH(obj)->rmatch) {
4965 struct rmatch *rm = RMATCH(obj)->rmatch;
4966 size += onig_region_memsize(&rm->regs);
4967 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
4968 size += sizeof(struct rmatch);
4969 }
4970 break;
4971 case T_FILE:
4972 if (RFILE(obj)->fptr) {
4973 size += rb_io_memsize(RFILE(obj)->fptr);
4974 }
4975 break;
4976 case T_RATIONAL:
4977 case T_COMPLEX:
4978 break;
4979 case T_IMEMO:
4980 size += imemo_memsize(obj);
4981 break;
4982
4983 case T_FLOAT:
4984 case T_SYMBOL:
4985 break;
4986
4987 case T_BIGNUM:
4988 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4989 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
4990 }
4991 break;
4992
4993 case T_NODE:
4994 UNEXPECTED_NODE(obj_memsize_of);
4995 break;
4996
4997 case T_STRUCT:
4998 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4999 RSTRUCT(obj)->as.heap.ptr) {
5000 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
5001 }
5002 break;
5003
5004 case T_ZOMBIE:
5005 case T_MOVED:
5006 break;
5007
5008 default:
5009 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
5010 BUILTIN_TYPE(obj), (void*)obj);
5011 }
5012
5013 return size + rb_gc_obj_slot_size(obj);
5014}
5015
5016size_t
5017rb_obj_memsize_of(VALUE obj)
5018{
5019 return obj_memsize_of(obj, TRUE);
5020}
5021
5022static int
5023set_zero(st_data_t key, st_data_t val, st_data_t arg)
5024{
5025 VALUE k = (VALUE)key;
5026 VALUE hash = (VALUE)arg;
5027 rb_hash_aset(hash, k, INT2FIX(0));
5028 return ST_CONTINUE;
5029}
5030
5031static VALUE
5032type_sym(size_t type)
5033{
5034 switch (type) {
5035#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5036 COUNT_TYPE(T_NONE);
5037 COUNT_TYPE(T_OBJECT);
5038 COUNT_TYPE(T_CLASS);
5039 COUNT_TYPE(T_MODULE);
5040 COUNT_TYPE(T_FLOAT);
5041 COUNT_TYPE(T_STRING);
5042 COUNT_TYPE(T_REGEXP);
5043 COUNT_TYPE(T_ARRAY);
5044 COUNT_TYPE(T_HASH);
5045 COUNT_TYPE(T_STRUCT);
5046 COUNT_TYPE(T_BIGNUM);
5047 COUNT_TYPE(T_FILE);
5048 COUNT_TYPE(T_DATA);
5049 COUNT_TYPE(T_MATCH);
5050 COUNT_TYPE(T_COMPLEX);
5051 COUNT_TYPE(T_RATIONAL);
5052 COUNT_TYPE(T_NIL);
5053 COUNT_TYPE(T_TRUE);
5054 COUNT_TYPE(T_FALSE);
5055 COUNT_TYPE(T_SYMBOL);
5056 COUNT_TYPE(T_FIXNUM);
5057 COUNT_TYPE(T_IMEMO);
5058 COUNT_TYPE(T_UNDEF);
5059 COUNT_TYPE(T_NODE);
5060 COUNT_TYPE(T_ICLASS);
5061 COUNT_TYPE(T_ZOMBIE);
5062 COUNT_TYPE(T_MOVED);
5063#undef COUNT_TYPE
5064 default: return SIZET2NUM(type); break;
5065 }
5066}
5067
5068/*
5069 * call-seq:
5070 * ObjectSpace.count_objects([result_hash]) -> hash
5071 *
5072 * Counts all objects grouped by type.
5073 *
5074 * It returns a hash, such as:
5075 * {
5076 * :TOTAL=>10000,
5077 * :FREE=>3011,
5078 * :T_OBJECT=>6,
5079 * :T_CLASS=>404,
5080 * # ...
5081 * }
5082 *
5083 * The contents of the returned hash are implementation specific.
5084 * It may be changed in future.
5085 *
5086 * The keys starting with +:T_+ means live objects.
5087 * For example, +:T_ARRAY+ is the number of arrays.
5088 * +:FREE+ means object slots which is not used now.
5089 * +:TOTAL+ means sum of above.
5090 *
5091 * If the optional argument +result_hash+ is given,
5092 * it is overwritten and returned. This is intended to avoid probe effect.
5093 *
5094 * h = {}
5095 * ObjectSpace.count_objects(h)
5096 * puts h
5097 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
5098 *
5099 * This method is only expected to work on C Ruby.
5100 *
5101 */
5102
5103static VALUE
5104count_objects(int argc, VALUE *argv, VALUE os)
5105{
5106 rb_objspace_t *objspace = &rb_objspace;
5107 size_t counts[T_MASK+1];
5108 size_t freed = 0;
5109 size_t total = 0;
5110 size_t i;
5111 VALUE hash = Qnil;
5112
5113 if (rb_check_arity(argc, 0, 1) == 1) {
5114 hash = argv[0];
5115 if (!RB_TYPE_P(hash, T_HASH))
5116 rb_raise(rb_eTypeError, "non-hash given");
5117 }
5118
5119 for (i = 0; i <= T_MASK; i++) {
5120 counts[i] = 0;
5121 }
5122
5123 for (i = 0; i < heap_allocated_pages; i++) {
5124 struct heap_page *page = heap_pages_sorted[i];
5125 short stride = page->slot_size;
5126
5127 uintptr_t p = (uintptr_t)page->start;
5128 uintptr_t pend = p + page->total_slots * stride;
5129 for (;p < pend; p += stride) {
5130 VALUE vp = (VALUE)p;
5131 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5132
5133 void *poisoned = asan_unpoison_object_temporary(vp);
5134 if (RANY(p)->as.basic.flags) {
5135 counts[BUILTIN_TYPE(vp)]++;
5136 }
5137 else {
5138 freed++;
5139 }
5140 if (poisoned) {
5141 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
5142 asan_poison_object(vp);
5143 }
5144 }
5145 total += page->total_slots;
5146 }
5147
5148 if (NIL_P(hash)) {
5149 hash = rb_hash_new();
5150 }
5151 else if (!RHASH_EMPTY_P(hash)) {
5152 rb_hash_stlike_foreach(hash, set_zero, hash);
5153 }
5154 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
5155 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
5156
5157 for (i = 0; i <= T_MASK; i++) {
5158 VALUE type = type_sym(i);
5159 if (counts[i])
5160 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
5161 }
5162
5163 return hash;
5164}
5165
5166/*
5167 ------------------------ Garbage Collection ------------------------
5168*/
5169
5170/* Sweeping */
5171
5172static size_t
5173objspace_available_slots(rb_objspace_t *objspace)
5174{
5175 size_t total_slots = 0;
5176 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5177 rb_size_pool_t *size_pool = &size_pools[i];
5178 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5179 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5180 }
5181 return total_slots;
5182}
5183
5184static size_t
5185objspace_live_slots(rb_objspace_t *objspace)
5186{
5187 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
5188}
5189
5190static size_t
5191objspace_free_slots(rb_objspace_t *objspace)
5192{
5193 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5194}
5195
5196static void
5197gc_setup_mark_bits(struct heap_page *page)
5198{
5199 /* copy oldgen bitmap to mark bitmap */
5200 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5201}
5202
5203static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
5204static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size);
5205
5206#if defined(_WIN32)
5207enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5208
5209static BOOL
5210protect_page_body(struct heap_page_body *body, DWORD protect)
5211{
5212 DWORD old_protect;
5213 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5214}
5215#else
5216enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5217#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5218#endif
5219
5220static void
5221lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5222{
5223 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5224 rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
5225 }
5226 else {
5227 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
5228 }
5229}
5230
5231static void
5232unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5233{
5234 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5235 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
5236 }
5237 else {
5238 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
5239 }
5240}
5241
5242static bool
5243try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
5244{
5245 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5246
5247 struct heap_page *src_page = GET_HEAP_PAGE(src);
5248 if (!free_page) {
5249 return false;
5250 }
5251
5252 /* We should return true if either src is successfully moved, or src is
5253 * unmoveable. A false return will cause the sweeping cursor to be
5254 * incremented to the next page, and src will attempt to move again */
5255 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5256
5257 asan_unlock_freelist(free_page);
5258 VALUE dest = (VALUE)free_page->freelist;
5259 asan_lock_freelist(free_page);
5260 asan_unpoison_object(dest, false);
5261 if (!dest) {
5262 /* if we can't get something from the freelist then the page must be
5263 * full */
5264 return false;
5265 }
5266 free_page->freelist = RANY(dest)->as.free.next;
5267
5268 GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
5269
5270 if (src_page->slot_size > free_page->slot_size) {
5271 objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
5272 }
5273 else if (free_page->slot_size > src_page->slot_size) {
5274 objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
5275 }
5276 objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
5277 objspace->rcompactor.total_moved++;
5278
5279 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5280 gc_pin(objspace, src);
5281 free_page->free_slots--;
5282
5283 return true;
5284}
5285
5286static void
5287gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
5288{
5289 struct heap_page *cursor = heap->compact_cursor;
5290
5291 while (cursor) {
5292 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5293 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5294 }
5295}
5296
5297static void gc_update_references(rb_objspace_t * objspace);
5298static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
5299
5300#ifndef GC_CAN_COMPILE_COMPACTION
5301#if defined(__wasi__) /* WebAssembly doesn't support signals */
5302# define GC_CAN_COMPILE_COMPACTION 0
5303#else
5304# define GC_CAN_COMPILE_COMPACTION 1
5305#endif
5306#endif
5307
5308#if defined(__MINGW32__) || defined(_WIN32)
5309# define GC_COMPACTION_SUPPORTED 1
5310#else
5311/* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
5312 * the read barrier, so we must disable compaction. */
5313# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5314#endif
5315
5316#if GC_CAN_COMPILE_COMPACTION
5317static void
5318read_barrier_handler(uintptr_t original_address)
5319{
5320 VALUE obj;
5321 rb_objspace_t * objspace = &rb_objspace;
5322
5323 /* Calculate address aligned to slots. */
5324 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5325
5326 obj = (VALUE)address;
5327
5328 struct heap_page_body *page_body = GET_PAGE_BODY(obj);
5329
5330 /* If the page_body is NULL, then mprotect cannot handle it and will crash
5331 * with "Cannot allocate memory". */
5332 if (page_body == NULL) {
5333 rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address);
5334 }
5335
5336 RB_VM_LOCK_ENTER();
5337 {
5338 unlock_page_body(objspace, page_body);
5339
5340 objspace->profile.read_barrier_faults++;
5341
5342 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5343 }
5344 RB_VM_LOCK_LEAVE();
5345}
5346#endif
5347
5348#if !GC_CAN_COMPILE_COMPACTION
5349static void
5350uninstall_handlers(void)
5351{
5352 /* no-op */
5353}
5354
5355static void
5356install_handlers(void)
5357{
5358 /* no-op */
5359}
5360#elif defined(_WIN32)
5361static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5362typedef void (*signal_handler)(int);
5363static signal_handler old_sigsegv_handler;
5364
5365static LONG WINAPI
5366read_barrier_signal(EXCEPTION_POINTERS * info)
5367{
5368 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5369 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5370 /* > The second array element specifies the virtual address of the inaccessible data.
5371 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5372 *
5373 * Use this address to invalidate the page */
5374 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5375 return EXCEPTION_CONTINUE_EXECUTION;
5376 }
5377 else {
5378 return EXCEPTION_CONTINUE_SEARCH;
5379 }
5380}
5381
5382static void
5383uninstall_handlers(void)
5384{
5385 signal(SIGSEGV, old_sigsegv_handler);
5386 SetUnhandledExceptionFilter(old_handler);
5387}
5388
5389static void
5390install_handlers(void)
5391{
5392 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5393 old_sigsegv_handler = signal(SIGSEGV, NULL);
5394 /* Unhandled Exception Filter has access to the violation address similar
5395 * to si_addr from sigaction */
5396 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5397}
5398#else
5399static struct sigaction old_sigbus_handler;
5400static struct sigaction old_sigsegv_handler;
5401
5402#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5403static exception_mask_t old_exception_masks[32];
5404static mach_port_t old_exception_ports[32];
5405static exception_behavior_t old_exception_behaviors[32];
5406static thread_state_flavor_t old_exception_flavors[32];
5407static mach_msg_type_number_t old_exception_count;
5408
5409static void
5410disable_mach_bad_access_exc(void)
5411{
5412 old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
5413 task_swap_exception_ports(
5414 mach_task_self(), EXC_MASK_BAD_ACCESS,
5415 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5416 old_exception_masks, &old_exception_count,
5417 old_exception_ports, old_exception_behaviors, old_exception_flavors
5418 );
5419}
5420
5421static void
5422restore_mach_bad_access_exc(void)
5423{
5424 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5425 task_set_exception_ports(
5426 mach_task_self(),
5427 old_exception_masks[i], old_exception_ports[i],
5428 old_exception_behaviors[i], old_exception_flavors[i]
5429 );
5430 }
5431}
5432#endif
5433
5434static void
5435read_barrier_signal(int sig, siginfo_t * info, void * data)
5436{
5437 // setup SEGV/BUS handlers for errors
5438 struct sigaction prev_sigbus, prev_sigsegv;
5439 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5440 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5441
5442 // enable SIGBUS/SEGV
5443 sigset_t set, prev_set;
5444 sigemptyset(&set);
5445 sigaddset(&set, SIGBUS);
5446 sigaddset(&set, SIGSEGV);
5447 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5448#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5449 disable_mach_bad_access_exc();
5450#endif
5451 // run handler
5452 read_barrier_handler((uintptr_t)info->si_addr);
5453
5454 // reset SEGV/BUS handlers
5455#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5456 restore_mach_bad_access_exc();
5457#endif
5458 sigaction(SIGBUS, &prev_sigbus, NULL);
5459 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5460 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5461}
5462
5463static void
5464uninstall_handlers(void)
5465{
5466#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5467 restore_mach_bad_access_exc();
5468#endif
5469 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5470 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5471}
5472
5473static void
5474install_handlers(void)
5475{
5476 struct sigaction action;
5477 memset(&action, 0, sizeof(struct sigaction));
5478 sigemptyset(&action.sa_mask);
5479 action.sa_sigaction = read_barrier_signal;
5480 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5481
5482 sigaction(SIGBUS, &action, &old_sigbus_handler);
5483 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5484#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5485 disable_mach_bad_access_exc();
5486#endif
5487}
5488#endif
5489
5490static void
5491revert_stack_objects(VALUE stack_obj, void *ctx)
5492{
5493 rb_objspace_t * objspace = (rb_objspace_t*)ctx;
5494
5495 if (BUILTIN_TYPE(stack_obj) == T_MOVED) {
5496 /* For now we'll revert the whole page if the object made it to the
5497 * stack. I think we can change this to move just the one object
5498 * back though */
5499 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5500 }
5501}
5502
5503static void
5504revert_machine_stack_references(rb_objspace_t *objspace, VALUE v)
5505{
5506 if (is_pointer_to_heap(objspace, (void *)v)) {
5507 if (BUILTIN_TYPE(v) == T_MOVED) {
5508 /* For now we'll revert the whole page if the object made it to the
5509 * stack. I think we can change this to move just the one object
5510 * back though */
5511 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5512 }
5513 }
5514}
5515
5516static void each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE));
5517
5518static void
5519check_stack_for_moved(rb_objspace_t *objspace)
5520{
5521 rb_execution_context_t *ec = GET_EC();
5522 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5523 rb_vm_each_stack_value(vm, revert_stack_objects, (void*)objspace);
5524 each_machine_stack_value(ec, revert_machine_stack_references);
5525}
5526
5527static void gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode);
5528
5529static void
5530gc_compact_finish(rb_objspace_t *objspace)
5531{
5532 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5533 rb_size_pool_t *size_pool = &size_pools[i];
5534 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5535 gc_unprotect_pages(objspace, heap);
5536 }
5537
5538 uninstall_handlers();
5539
5540 /* The mutator is allowed to run during incremental sweeping. T_MOVED
5541 * objects can get pushed on the stack and when the compaction process
5542 * finishes up, it may remove the read barrier before anything has a
5543 * chance to read from the T_MOVED address. To fix this, we scan the stack
5544 * then revert any moved objects that made it to the stack. */
5545 check_stack_for_moved(objspace);
5546
5547 gc_update_references(objspace);
5548 objspace->profile.compact_count++;
5549
5550 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5551 rb_size_pool_t *size_pool = &size_pools[i];
5552 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5553 heap->compact_cursor = NULL;
5554 heap->free_pages = NULL;
5555 heap->compact_cursor_index = 0;
5556 }
5557
5558 if (gc_prof_enabled(objspace)) {
5559 gc_profile_record *record = gc_prof_record(objspace);
5560 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5561 }
5562 objspace->flags.during_compacting = FALSE;
5563}
5564
5566 struct heap_page *page;
5567 int final_slots;
5568 int freed_slots;
5569 int empty_slots;
5570};
5571
5572static inline void
5573gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
5574{
5575 struct heap_page * sweep_page = ctx->page;
5576 short slot_size = sweep_page->slot_size;
5577 short slot_bits = slot_size / BASE_SLOT_SIZE;
5578 GC_ASSERT(slot_bits > 0);
5579
5580 do {
5581 VALUE vp = (VALUE)p;
5582 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5583
5584 asan_unpoison_object(vp, false);
5585 if (bitset & 1) {
5586 switch (BUILTIN_TYPE(vp)) {
5587 default: /* majority case */
5588 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
5589#if RGENGC_CHECK_MODE
5590 if (!is_full_marking(objspace)) {
5591 if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
5592 if (rgengc_remembered_sweep(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
5593 }
5594#endif
5595 if (obj_free(objspace, vp)) {
5596 // always add free slots back to the swept pages freelist,
5597 // so that if we're comapacting, we can re-use the slots
5598 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
5599 heap_page_add_freeobj(objspace, sweep_page, vp);
5600 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5601 ctx->freed_slots++;
5602 }
5603 else {
5604 ctx->final_slots++;
5605 }
5606 break;
5607
5608 case T_MOVED:
5609 if (objspace->flags.during_compacting) {
5610 /* The sweep cursor shouldn't have made it to any
5611 * T_MOVED slots while the compact flag is enabled.
5612 * The sweep cursor and compact cursor move in
5613 * opposite directions, and when they meet references will
5614 * get updated and "during_compacting" should get disabled */
5615 rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
5616 }
5617 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5618 ctx->empty_slots++;
5619 heap_page_add_freeobj(objspace, sweep_page, vp);
5620 break;
5621 case T_ZOMBIE:
5622 /* already counted */
5623 break;
5624 case T_NONE:
5625 ctx->empty_slots++; /* already freed */
5626 break;
5627 }
5628 }
5629 p += slot_size;
5630 bitset >>= slot_bits;
5631 } while (bitset);
5632}
5633
5634static inline void
5635gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
5636{
5637 struct heap_page *sweep_page = ctx->page;
5638 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5639
5640 uintptr_t p;
5641 bits_t *bits, bitset;
5642
5643 gc_report(2, objspace, "page_sweep: start.\n");
5644
5645#if RGENGC_CHECK_MODE
5646 if (!objspace->flags.immediate_sweep) {
5647 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5648 }
5649#endif
5650 sweep_page->flags.before_sweep = FALSE;
5651 sweep_page->free_slots = 0;
5652
5653 p = (uintptr_t)sweep_page->start;
5654 bits = sweep_page->mark_bits;
5655
5656 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5657 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5658 if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
5659 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5660 }
5661
5662 /* The last bitmap plane may not be used if the last plane does not
5663 * have enough space for the slot_size. In that case, the last plane must
5664 * be skipped since none of the bits will be set. */
5665 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5666 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5667 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5668
5669 // Skip out of range slots at the head of the page
5670 bitset = ~bits[0];
5671 bitset >>= NUM_IN_PAGE(p);
5672 if (bitset) {
5673 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5674 }
5675 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5676
5677 for (int i = 1; i < bitmap_plane_count; i++) {
5678 bitset = ~bits[i];
5679 if (bitset) {
5680 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5681 }
5682 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5683 }
5684
5685 if (!heap->compact_cursor) {
5686 gc_setup_mark_bits(sweep_page);
5687 }
5688
5689#if GC_PROFILE_MORE_DETAIL
5690 if (gc_prof_enabled(objspace)) {
5691 gc_profile_record *record = gc_prof_record(objspace);
5692 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5693 record->empty_objects += ctx->empty_slots;
5694 }
5695#endif
5696 if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5697 rb_gc_count(),
5698 sweep_page->total_slots,
5699 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5700
5701 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5702 objspace->profile.total_freed_objects += ctx->freed_slots;
5703
5704 if (heap_pages_deferred_final && !finalizing) {
5705 rb_thread_t *th = GET_THREAD();
5706 if (th) {
5707 gc_finalize_deferred_register(objspace);
5708 }
5709 }
5710
5711#if RGENGC_CHECK_MODE
5712 short freelist_len = 0;
5713 asan_unlock_freelist(sweep_page);
5714 RVALUE *ptr = sweep_page->freelist;
5715 while (ptr) {
5716 freelist_len++;
5717 ptr = ptr->as.free.next;
5718 }
5719 asan_lock_freelist(sweep_page);
5720 if (freelist_len != sweep_page->free_slots) {
5721 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5722 }
5723#endif
5724
5725 gc_report(2, objspace, "page_sweep: end.\n");
5726}
5727
5728#if !USE_RVARGC
5729/* allocate additional minimum page to work */
5730static void
5731gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5732{
5733 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5734 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5735 /* there is no free after page_sweep() */
5736 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5737 if (!heap_increment(objspace, size_pool, heap)) { /* can't allocate additional free objects */
5738 rb_memerror();
5739 }
5740 }
5741 }
5742}
5743#endif
5744
5745static const char *
5746gc_mode_name(enum gc_mode mode)
5747{
5748 switch (mode) {
5749 case gc_mode_none: return "none";
5750 case gc_mode_marking: return "marking";
5751 case gc_mode_sweeping: return "sweeping";
5752 case gc_mode_compacting: return "compacting";
5753 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5754 }
5755}
5756
5757static void
5758gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5759{
5760#if RGENGC_CHECK_MODE
5761 enum gc_mode prev_mode = gc_mode(objspace);
5762 switch (prev_mode) {
5763 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5764 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
5765 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
5766 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
5767 }
5768#endif
5769 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5770 gc_mode_set(objspace, mode);
5771}
5772
5773static void
5774heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
5775{
5776 if (freelist) {
5777 asan_unlock_freelist(page);
5778 if (page->freelist) {
5779 RVALUE *p = page->freelist;
5780 asan_unpoison_object((VALUE)p, false);
5781 while (p->as.free.next) {
5782 RVALUE *prev = p;
5783 p = p->as.free.next;
5784 asan_poison_object((VALUE)prev);
5785 asan_unpoison_object((VALUE)p, false);
5786 }
5787 p->as.free.next = freelist;
5788 asan_poison_object((VALUE)p);
5789 }
5790 else {
5791 page->freelist = freelist;
5792 }
5793 asan_lock_freelist(page);
5794 }
5795}
5796
5797static void
5798gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5799{
5800 heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
5801 heap->free_pages = NULL;
5802#if GC_ENABLE_INCREMENTAL_MARK
5803 heap->pooled_pages = NULL;
5804#endif
5805 if (!objspace->flags.immediate_sweep) {
5806 struct heap_page *page = NULL;
5807
5808 ccan_list_for_each(&heap->pages, page, page_node) {
5809 page->flags.before_sweep = TRUE;
5810 }
5811 }
5812}
5813
5814#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5815__attribute__((noinline))
5816#endif
5817static void
5818gc_sweep_start(rb_objspace_t *objspace)
5819{
5820 gc_mode_transition(objspace, gc_mode_sweeping);
5821
5822#if GC_ENABLE_INCREMENTAL_MARK
5823 objspace->rincgc.pooled_slots = 0;
5824#endif
5825
5826 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5827 rb_size_pool_t *size_pool = &size_pools[i];
5828 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5829
5830 gc_sweep_start_heap(objspace, heap);
5831
5832#if USE_RVARGC
5833 /* We should call gc_sweep_finish_size_pool for size pools with no pages. */
5834 if (heap->sweeping_page == NULL) {
5835 GC_ASSERT(heap->total_pages == 0);
5836 GC_ASSERT(heap->total_slots == 0);
5837 gc_sweep_finish_size_pool(objspace, size_pool);
5838 }
5839#endif
5840 }
5841
5842 rb_ractor_t *r = NULL;
5843 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5844 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5845 }
5846}
5847
5848#if USE_RVARGC
5849static void
5850gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
5851{
5852 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5853 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5854 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5855 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5856
5857 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5858
5859 /* If we don't have enough slots and we have pages on the tomb heap, move
5860 * pages from the tomb heap to the eden heap. This may prevent page
5861 * creation thrashing (frequently allocating and deallocting pages) and
5862 * GC thrashing (running GC more frequently than required). */
5863 struct heap_page *resurrected_page;
5864 while ((swept_slots < min_free_slots || swept_slots < gc_params.heap_init_slots) &&
5865 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5866 swept_slots += resurrected_page->free_slots;
5867
5868 heap_add_page(objspace, size_pool, heap, resurrected_page);
5869 heap_add_freepage(heap, resurrected_page);
5870 }
5871
5872 /* Some size pools may have very few pages (or even no pages). These size pools
5873 * should still have allocatable pages. */
5874 if (min_free_slots < gc_params.heap_init_slots && swept_slots < gc_params.heap_init_slots) {
5875 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
5876 size_t extra_slots = gc_params.heap_init_slots - swept_slots;
5877 size_t extend_page_count = CEILDIV(extra_slots * multiple, HEAP_PAGE_OBJ_LIMIT);
5878 if (extend_page_count > size_pool->allocatable_pages) {
5879 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5880 }
5881 }
5882
5883 if (swept_slots < min_free_slots) {
5884 bool grow_heap = is_full_marking(objspace);
5885
5886 if (!is_full_marking(objspace)) {
5887 /* The heap is a growth heap if it freed more slots than had empty
5888 * slots and used up all of its allocatable pages. */
5889 bool is_growth_heap = (size_pool->empty_slots == 0 ||
5890 size_pool->freed_slots > size_pool->empty_slots) &&
5891 size_pool->allocatable_pages == 0;
5892
5893 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5894 grow_heap = TRUE;
5895 }
5896 else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
5897 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5898 size_pool->force_major_gc_count++;
5899 }
5900 }
5901
5902 if (grow_heap) {
5903 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5904
5905 if (extend_page_count > size_pool->allocatable_pages) {
5906 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5907 }
5908 }
5909 }
5910}
5911#endif
5912
5913static void
5914gc_sweep_finish(rb_objspace_t *objspace)
5915{
5916 gc_report(1, objspace, "gc_sweep_finish\n");
5917
5918 gc_prof_set_heap_info(objspace);
5919 heap_pages_free_unused_pages(objspace);
5920
5921 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5922 rb_size_pool_t *size_pool = &size_pools[i];
5923
5924 /* if heap_pages has unused pages, then assign them to increment */
5925 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5926 if (size_pool->allocatable_pages < tomb_pages) {
5927 size_pool->allocatable_pages = tomb_pages;
5928 }
5929
5930#if USE_RVARGC
5931 size_pool->freed_slots = 0;
5932 size_pool->empty_slots = 0;
5933
5934#if GC_ENABLE_INCREMENTAL_MARK
5935 if (!will_be_incremental_marking(objspace)) {
5936 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5937 struct heap_page *end_page = eden_heap->free_pages;
5938 if (end_page) {
5939 while (end_page->free_next) end_page = end_page->free_next;
5940 end_page->free_next = eden_heap->pooled_pages;
5941 }
5942 else {
5943 eden_heap->free_pages = eden_heap->pooled_pages;
5944 }
5945 eden_heap->pooled_pages = NULL;
5946 objspace->rincgc.pooled_slots = 0;
5947 }
5948#endif
5949#endif
5950 }
5951 heap_pages_expand_sorted(objspace);
5952
5953 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
5954 gc_mode_transition(objspace, gc_mode_none);
5955}
5956
5957static int
5958gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5959{
5960 struct heap_page *sweep_page = heap->sweeping_page;
5961 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
5962
5963#if GC_ENABLE_INCREMENTAL_MARK
5964 int swept_slots = 0;
5965#if USE_RVARGC
5966 bool need_pool = TRUE;
5967#else
5968 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5969#endif
5970
5971 gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
5972#else
5973 gc_report(2, objspace, "gc_sweep_step\n");
5974#endif
5975
5976 if (sweep_page == NULL) return FALSE;
5977
5978#if GC_ENABLE_LAZY_SWEEP
5979 gc_prof_sweep_timer_start(objspace);
5980#endif
5981
5982 do {
5983 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
5984
5985 struct gc_sweep_context ctx = {
5986 .page = sweep_page,
5987 .final_slots = 0,
5988 .freed_slots = 0,
5989 .empty_slots = 0,
5990 };
5991 gc_sweep_page(objspace, heap, &ctx);
5992 int free_slots = ctx.freed_slots + ctx.empty_slots;
5993
5994 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
5995
5996 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5997 heap_pages_freeable_pages > 0 &&
5998 unlink_limit > 0) {
5999 heap_pages_freeable_pages--;
6000 unlink_limit--;
6001 /* there are no living objects -> move this page to tomb heap */
6002 heap_unlink_page(objspace, heap, sweep_page);
6003 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
6004 }
6005 else if (free_slots > 0) {
6006#if USE_RVARGC
6007 size_pool->freed_slots += ctx.freed_slots;
6008 size_pool->empty_slots += ctx.empty_slots;
6009#endif
6010
6011#if GC_ENABLE_INCREMENTAL_MARK
6012 if (need_pool) {
6013 heap_add_poolpage(objspace, heap, sweep_page);
6014 need_pool = FALSE;
6015 }
6016 else {
6017 heap_add_freepage(heap, sweep_page);
6018 swept_slots += free_slots;
6019 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6020 break;
6021 }
6022 }
6023#else
6024 heap_add_freepage(heap, sweep_page);
6025 break;
6026#endif
6027 }
6028 else {
6029 sweep_page->free_next = NULL;
6030 }
6031 } while ((sweep_page = heap->sweeping_page));
6032
6033 if (!heap->sweeping_page) {
6034#if USE_RVARGC
6035 gc_sweep_finish_size_pool(objspace, size_pool);
6036#endif
6037
6038 if (!has_sweeping_pages(objspace)) {
6039 gc_sweep_finish(objspace);
6040 }
6041 }
6042
6043#if GC_ENABLE_LAZY_SWEEP
6044 gc_prof_sweep_timer_stop(objspace);
6045#endif
6046
6047 return heap->free_pages != NULL;
6048}
6049
6050static void
6051gc_sweep_rest(rb_objspace_t *objspace)
6052{
6053 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6054 rb_size_pool_t *size_pool = &size_pools[i];
6055
6056 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6057 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6058 }
6059 }
6060}
6061
6062static void
6063gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
6064{
6065 GC_ASSERT(dont_gc_val() == FALSE);
6066 if (!GC_ENABLE_LAZY_SWEEP) return;
6067
6068 unsigned int lock_lev;
6069 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
6070
6071 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6072 rb_size_pool_t *size_pool = &size_pools[i];
6073 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6074#if USE_RVARGC
6075 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
6076 if (size_pool == sweep_size_pool) {
6077 if (size_pool->allocatable_pages > 0) {
6078 heap_increment(objspace, size_pool, heap);
6079 }
6080 else {
6081 /* Not allowed to create a new page so finish sweeping. */
6082 gc_sweep_rest(objspace);
6083 break;
6084 }
6085 }
6086#endif
6087 }
6088 }
6089
6090 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
6091}
6092
6093static void
6094invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
6095{
6096 if (bitset) {
6097 do {
6098 if (bitset & 1) {
6099 VALUE forwarding_object = (VALUE)p;
6100 VALUE object;
6101
6102 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
6103 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6104 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6105
6106 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6107
6108 object = rb_gc_location(forwarding_object);
6109
6110 shape_id_t original_shape_id = 0;
6111 if (RB_TYPE_P(object, T_OBJECT)) {
6112 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6113 }
6114
6115 gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
6116 /* forwarding_object is now our actual object, and "object"
6117 * is the free slot for the original page */
6118
6119 if (original_shape_id) {
6120 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6121 }
6122
6123 struct heap_page *orig_page = GET_HEAP_PAGE(object);
6124 orig_page->free_slots++;
6125 heap_page_add_freeobj(objspace, orig_page, object);
6126
6127 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6128 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
6129 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
6130 }
6131 }
6132 p += BASE_SLOT_SIZE;
6133 bitset >>= 1;
6134 } while (bitset);
6135 }
6136}
6137
6138static void
6139invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
6140{
6141 int i;
6142 bits_t *mark_bits, *pin_bits;
6143 bits_t bitset;
6144
6145 mark_bits = page->mark_bits;
6146 pin_bits = page->pinned_bits;
6147
6148 uintptr_t p = page->start;
6149
6150 // Skip out of range slots at the head of the page
6151 bitset = pin_bits[0] & ~mark_bits[0];
6152 bitset >>= NUM_IN_PAGE(p);
6153 invalidate_moved_plane(objspace, page, p, bitset);
6154 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6155
6156 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6157 /* Moved objects are pinned but never marked. We reuse the pin bits
6158 * to indicate there is a moved object in this slot. */
6159 bitset = pin_bits[i] & ~mark_bits[i];
6160
6161 invalidate_moved_plane(objspace, page, p, bitset);
6162 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6163 }
6164}
6165
6166static void
6167gc_compact_start(rb_objspace_t *objspace)
6168{
6169 struct heap_page *page = NULL;
6170 gc_mode_transition(objspace, gc_mode_compacting);
6171
6172 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6173 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6174 ccan_list_for_each(&heap->pages, page, page_node) {
6175 page->flags.before_sweep = TRUE;
6176 }
6177
6178 heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
6179 heap->compact_cursor_index = 0;
6180 }
6181
6182 if (gc_prof_enabled(objspace)) {
6183 gc_profile_record *record = gc_prof_record(objspace);
6184 record->moved_objects = objspace->rcompactor.total_moved;
6185 }
6186
6187 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
6188 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
6189 memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
6190 memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
6191
6192 /* Set up read barrier for pages containing MOVED objects */
6193 install_handlers();
6194}
6195
6196static void gc_sweep_compact(rb_objspace_t *objspace);
6197
6198static void
6199gc_sweep(rb_objspace_t *objspace)
6200{
6201 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6202
6203 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
6204
6205 gc_sweep_start(objspace);
6206 if (objspace->flags.during_compacting) {
6207 gc_sweep_compact(objspace);
6208 }
6209
6210 if (immediate_sweep) {
6211#if !GC_ENABLE_LAZY_SWEEP
6212 gc_prof_sweep_timer_start(objspace);
6213#endif
6214 gc_sweep_rest(objspace);
6215#if !GC_ENABLE_LAZY_SWEEP
6216 gc_prof_sweep_timer_stop(objspace);
6217#endif
6218 }
6219 else {
6220
6221 /* Sweep every size pool. */
6222 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6223 rb_size_pool_t *size_pool = &size_pools[i];
6224 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6225 }
6226 }
6227
6228#if !USE_RVARGC
6229 rb_size_pool_t *size_pool = &size_pools[0];
6230 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6231#endif
6232}
6233
6234/* Marking - Marking stack */
6235
6236static stack_chunk_t *
6237stack_chunk_alloc(void)
6238{
6239 stack_chunk_t *res;
6240
6241 res = malloc(sizeof(stack_chunk_t));
6242 if (!res)
6243 rb_memerror();
6244
6245 return res;
6246}
6247
6248static inline int
6249is_mark_stack_empty(mark_stack_t *stack)
6250{
6251 return stack->chunk == NULL;
6252}
6253
6254static size_t
6255mark_stack_size(mark_stack_t *stack)
6256{
6257 size_t size = stack->index;
6258 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6259
6260 while (chunk) {
6261 size += stack->limit;
6262 chunk = chunk->next;
6263 }
6264 return size;
6265}
6266
6267static void
6268add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
6269{
6270 chunk->next = stack->cache;
6271 stack->cache = chunk;
6272 stack->cache_size++;
6273}
6274
6275static void
6276shrink_stack_chunk_cache(mark_stack_t *stack)
6277{
6278 stack_chunk_t *chunk;
6279
6280 if (stack->unused_cache_size > (stack->cache_size/2)) {
6281 chunk = stack->cache;
6282 stack->cache = stack->cache->next;
6283 stack->cache_size--;
6284 free(chunk);
6285 }
6286 stack->unused_cache_size = stack->cache_size;
6287}
6288
6289static void
6290push_mark_stack_chunk(mark_stack_t *stack)
6291{
6292 stack_chunk_t *next;
6293
6294 GC_ASSERT(stack->index == stack->limit);
6295
6296 if (stack->cache_size > 0) {
6297 next = stack->cache;
6298 stack->cache = stack->cache->next;
6299 stack->cache_size--;
6300 if (stack->unused_cache_size > stack->cache_size)
6301 stack->unused_cache_size = stack->cache_size;
6302 }
6303 else {
6304 next = stack_chunk_alloc();
6305 }
6306 next->next = stack->chunk;
6307 stack->chunk = next;
6308 stack->index = 0;
6309}
6310
6311static void
6312pop_mark_stack_chunk(mark_stack_t *stack)
6313{
6314 stack_chunk_t *prev;
6315
6316 prev = stack->chunk->next;
6317 GC_ASSERT(stack->index == 0);
6318 add_stack_chunk_cache(stack, stack->chunk);
6319 stack->chunk = prev;
6320 stack->index = stack->limit;
6321}
6322
6323static void
6324mark_stack_chunk_list_free(stack_chunk_t *chunk)
6325{
6326 stack_chunk_t *next = NULL;
6327
6328 while (chunk != NULL) {
6329 next = chunk->next;
6330 free(chunk);
6331 chunk = next;
6332 }
6333}
6334
6335static void
6336free_stack_chunks(mark_stack_t *stack)
6337{
6338 mark_stack_chunk_list_free(stack->chunk);
6339}
6340
6341static void
6342mark_stack_free_cache(mark_stack_t *stack)
6343{
6344 mark_stack_chunk_list_free(stack->cache);
6345 stack->cache_size = 0;
6346 stack->unused_cache_size = 0;
6347}
6348
6349static void
6350push_mark_stack(mark_stack_t *stack, VALUE data)
6351{
6352 VALUE obj = data;
6353 switch (BUILTIN_TYPE(obj)) {
6354 case T_OBJECT:
6355 case T_CLASS:
6356 case T_MODULE:
6357 case T_FLOAT:
6358 case T_STRING:
6359 case T_REGEXP:
6360 case T_ARRAY:
6361 case T_HASH:
6362 case T_STRUCT:
6363 case T_BIGNUM:
6364 case T_FILE:
6365 case T_DATA:
6366 case T_MATCH:
6367 case T_COMPLEX:
6368 case T_RATIONAL:
6369 case T_TRUE:
6370 case T_FALSE:
6371 case T_SYMBOL:
6372 case T_IMEMO:
6373 case T_ICLASS:
6374 if (stack->index == stack->limit) {
6375 push_mark_stack_chunk(stack);
6376 }
6377 stack->chunk->data[stack->index++] = data;
6378 return;
6379
6380 case T_NONE:
6381 case T_NIL:
6382 case T_FIXNUM:
6383 case T_MOVED:
6384 case T_ZOMBIE:
6385 case T_UNDEF:
6386 case T_MASK:
6387 rb_bug("push_mark_stack() called for broken object");
6388 break;
6389
6390 case T_NODE:
6391 UNEXPECTED_NODE(push_mark_stack);
6392 break;
6393 }
6394
6395 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6396 BUILTIN_TYPE(obj), (void *)data,
6397 is_pointer_to_heap(&rb_objspace, (void *)data) ? "corrupted object" : "non object");
6398}
6399
6400static int
6401pop_mark_stack(mark_stack_t *stack, VALUE *data)
6402{
6403 if (is_mark_stack_empty(stack)) {
6404 return FALSE;
6405 }
6406 if (stack->index == 1) {
6407 *data = stack->chunk->data[--stack->index];
6408 pop_mark_stack_chunk(stack);
6409 }
6410 else {
6411 *data = stack->chunk->data[--stack->index];
6412 }
6413 return TRUE;
6414}
6415
6416static void
6417init_mark_stack(mark_stack_t *stack)
6418{
6419 int i;
6420
6421 MEMZERO(stack, mark_stack_t, 1);
6422 stack->index = stack->limit = STACK_CHUNK_SIZE;
6423
6424 for (i=0; i < 4; i++) {
6425 add_stack_chunk_cache(stack, stack_chunk_alloc());
6426 }
6427 stack->unused_cache_size = stack->cache_size;
6428}
6429
6430/* Marking */
6431
6432#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6433
6434#define STACK_START (ec->machine.stack_start)
6435#define STACK_END (ec->machine.stack_end)
6436#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6437
6438#if STACK_GROW_DIRECTION < 0
6439# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6440#elif STACK_GROW_DIRECTION > 0
6441# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6442#else
6443# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6444 : (size_t)(STACK_END - STACK_START + 1))
6445#endif
6446#if !STACK_GROW_DIRECTION
6447int ruby_stack_grow_direction;
6448int
6449ruby_get_stack_grow_direction(volatile VALUE *addr)
6450{
6451 VALUE *end;
6452 SET_MACHINE_STACK_END(&end);
6453
6454 if (end > addr) return ruby_stack_grow_direction = 1;
6455 return ruby_stack_grow_direction = -1;
6456}
6457#endif
6458
6459size_t
6461{
6462 rb_execution_context_t *ec = GET_EC();
6463 SET_STACK_END;
6464 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6465 return STACK_LENGTH;
6466}
6467
6468#define PREVENT_STACK_OVERFLOW 1
6469#ifndef PREVENT_STACK_OVERFLOW
6470#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6471# define PREVENT_STACK_OVERFLOW 1
6472#else
6473# define PREVENT_STACK_OVERFLOW 0
6474#endif
6475#endif
6476#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6477static int
6478stack_check(rb_execution_context_t *ec, int water_mark)
6479{
6480 SET_STACK_END;
6481
6482 size_t length = STACK_LENGTH;
6483 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6484
6485 return length > maximum_length;
6486}
6487#else
6488#define stack_check(ec, water_mark) FALSE
6489#endif
6490
6491#define STACKFRAME_FOR_CALL_CFUNC 2048
6492
6493MJIT_FUNC_EXPORTED int
6494rb_ec_stack_check(rb_execution_context_t *ec)
6495{
6496 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6497}
6498
6499int
6501{
6502 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6503}
6504
6505ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE)));
6506static void
6507each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE))
6508{
6509 VALUE v;
6510 while (n--) {
6511 v = *x;
6512 cb(objspace, v);
6513 x++;
6514 }
6515}
6516
6517static void
6518gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end, void (*cb)(rb_objspace_t *, VALUE))
6519{
6520 long n;
6521
6522 if (end <= start) return;
6523 n = end - start;
6524 each_location(objspace, start, n, cb);
6525}
6526
6527void
6528rb_gc_mark_locations(const VALUE *start, const VALUE *end)
6529{
6530 gc_mark_locations(&rb_objspace, start, end, gc_mark_maybe);
6531}
6532
6533static void
6534gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
6535{
6536 long i;
6537
6538 for (i=0; i<n; i++) {
6539 gc_mark(objspace, values[i]);
6540 }
6541}
6542
6543void
6544rb_gc_mark_values(long n, const VALUE *values)
6545{
6546 long i;
6547 rb_objspace_t *objspace = &rb_objspace;
6548
6549 for (i=0; i<n; i++) {
6550 gc_mark_and_pin(objspace, values[i]);
6551 }
6552}
6553
6554static void
6555gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
6556{
6557 long i;
6558
6559 for (i=0; i<n; i++) {
6560 if (is_markable_object(objspace, values[i])) {
6561 gc_mark_and_pin(objspace, values[i]);
6562 }
6563 }
6564}
6565
6566void
6567rb_gc_mark_vm_stack_values(long n, const VALUE *values)
6568{
6569 rb_objspace_t *objspace = &rb_objspace;
6570 gc_mark_stack_values(objspace, n, values);
6571}
6572
6573static int
6574mark_value(st_data_t key, st_data_t value, st_data_t data)
6575{
6576 rb_objspace_t *objspace = (rb_objspace_t *)data;
6577 gc_mark(objspace, (VALUE)value);
6578 return ST_CONTINUE;
6579}
6580
6581static int
6582mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6583{
6584 rb_objspace_t *objspace = (rb_objspace_t *)data;
6585 gc_mark_and_pin(objspace, (VALUE)value);
6586 return ST_CONTINUE;
6587}
6588
6589static void
6590mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
6591{
6592 if (!tbl || tbl->num_entries == 0) return;
6593 st_foreach(tbl, mark_value, (st_data_t)objspace);
6594}
6595
6596static void
6597mark_tbl(rb_objspace_t *objspace, st_table *tbl)
6598{
6599 if (!tbl || tbl->num_entries == 0) return;
6600 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6601}
6602
6603static int
6604mark_key(st_data_t key, st_data_t value, st_data_t data)
6605{
6606 rb_objspace_t *objspace = (rb_objspace_t *)data;
6607 gc_mark_and_pin(objspace, (VALUE)key);
6608 return ST_CONTINUE;
6609}
6610
6611static void
6612mark_set(rb_objspace_t *objspace, st_table *tbl)
6613{
6614 if (!tbl) return;
6615 st_foreach(tbl, mark_key, (st_data_t)objspace);
6616}
6617
6618static int
6619pin_value(st_data_t key, st_data_t value, st_data_t data)
6620{
6621 rb_objspace_t *objspace = (rb_objspace_t *)data;
6622 gc_mark_and_pin(objspace, (VALUE)value);
6623 return ST_CONTINUE;
6624}
6625
6626static void
6627mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
6628{
6629 if (!tbl) return;
6630 st_foreach(tbl, pin_value, (st_data_t)objspace);
6631}
6632
6633void
6634rb_mark_set(st_table *tbl)
6635{
6636 mark_set(&rb_objspace, tbl);
6637}
6638
6639static int
6640mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6641{
6642 rb_objspace_t *objspace = (rb_objspace_t *)data;
6643
6644 gc_mark(objspace, (VALUE)key);
6645 gc_mark(objspace, (VALUE)value);
6646 return ST_CONTINUE;
6647}
6648
6649static int
6650pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6651{
6652 rb_objspace_t *objspace = (rb_objspace_t *)data;
6653
6654 gc_mark_and_pin(objspace, (VALUE)key);
6655 gc_mark_and_pin(objspace, (VALUE)value);
6656 return ST_CONTINUE;
6657}
6658
6659static int
6660pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6661{
6662 rb_objspace_t *objspace = (rb_objspace_t *)data;
6663
6664 gc_mark_and_pin(objspace, (VALUE)key);
6665 gc_mark(objspace, (VALUE)value);
6666 return ST_CONTINUE;
6667}
6668
6669static void
6670mark_hash(rb_objspace_t *objspace, VALUE hash)
6671{
6672 if (rb_hash_compare_by_id_p(hash)) {
6673 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6674 }
6675 else {
6676 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6677 }
6678
6679 if (RHASH_AR_TABLE_P(hash)) {
6680 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6681 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6682 }
6683 }
6684 else {
6685 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6686 }
6687 gc_mark(objspace, RHASH(hash)->ifnone);
6688}
6689
6690static void
6691mark_st(rb_objspace_t *objspace, st_table *tbl)
6692{
6693 if (!tbl) return;
6694 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6695}
6696
6697void
6698rb_mark_hash(st_table *tbl)
6699{
6700 mark_st(&rb_objspace, tbl);
6701}
6702
6703static void
6704mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
6705{
6706 const rb_method_definition_t *def = me->def;
6707
6708 gc_mark(objspace, me->owner);
6709 gc_mark(objspace, me->defined_class);
6710
6711 if (def) {
6712 switch (def->type) {
6713 case VM_METHOD_TYPE_ISEQ:
6714 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
6715 gc_mark(objspace, (VALUE)def->body.iseq.cref);
6716
6717 if (def->iseq_overload && me->defined_class) {
6718 // it can be a key of "overloaded_cme" table
6719 // so it should be pinned.
6720 gc_mark_and_pin(objspace, (VALUE)me);
6721 }
6722 break;
6723 case VM_METHOD_TYPE_ATTRSET:
6724 case VM_METHOD_TYPE_IVAR:
6725 gc_mark(objspace, def->body.attr.location);
6726 break;
6727 case VM_METHOD_TYPE_BMETHOD:
6728 gc_mark(objspace, def->body.bmethod.proc);
6729 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6730 break;
6731 case VM_METHOD_TYPE_ALIAS:
6732 gc_mark(objspace, (VALUE)def->body.alias.original_me);
6733 return;
6734 case VM_METHOD_TYPE_REFINED:
6735 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6736 gc_mark(objspace, (VALUE)def->body.refined.owner);
6737 break;
6738 case VM_METHOD_TYPE_CFUNC:
6739 case VM_METHOD_TYPE_ZSUPER:
6740 case VM_METHOD_TYPE_MISSING:
6741 case VM_METHOD_TYPE_OPTIMIZED:
6742 case VM_METHOD_TYPE_UNDEF:
6743 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6744 break;
6745 }
6746 }
6747}
6748
6749static enum rb_id_table_iterator_result
6750mark_method_entry_i(VALUE me, void *data)
6751{
6752 rb_objspace_t *objspace = (rb_objspace_t *)data;
6753
6754 gc_mark(objspace, me);
6755 return ID_TABLE_CONTINUE;
6756}
6757
6758static void
6759mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6760{
6761 if (tbl) {
6762 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6763 }
6764}
6765
6766static enum rb_id_table_iterator_result
6767mark_const_entry_i(VALUE value, void *data)
6768{
6769 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
6770 rb_objspace_t *objspace = data;
6771
6772 gc_mark(objspace, ce->value);
6773 gc_mark(objspace, ce->file);
6774 return ID_TABLE_CONTINUE;
6775}
6776
6777static void
6778mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6779{
6780 if (!tbl) return;
6781 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6782}
6783
6784#if STACK_GROW_DIRECTION < 0
6785#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6786#elif STACK_GROW_DIRECTION > 0
6787#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6788#else
6789#define GET_STACK_BOUNDS(start, end, appendix) \
6790 ((STACK_END < STACK_START) ? \
6791 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6792#endif
6793
6794static void each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6795 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE));
6796
6797#if defined(__wasm__)
6798
6799
6800static VALUE *rb_stack_range_tmp[2];
6801
6802static void
6803rb_mark_locations(void *begin, void *end)
6804{
6805 rb_stack_range_tmp[0] = begin;
6806 rb_stack_range_tmp[1] = end;
6807}
6808
6809# if defined(__EMSCRIPTEN__)
6810
6811static void
6812mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6813{
6814 emscripten_scan_stack(rb_mark_locations);
6815 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6816
6817 emscripten_scan_registers(rb_mark_locations);
6818 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6819}
6820# else // use Asyncify version
6821
6822static void
6823mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6824{
6825 VALUE *stack_start, *stack_end;
6826 SET_STACK_END;
6827 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6828 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6829
6830 rb_wasm_scan_locals(rb_mark_locations);
6831 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6832}
6833
6834# endif
6835
6836#else // !defined(__wasm__)
6837
6838static void
6839mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6840{
6841 union {
6842 rb_jmp_buf j;
6843 VALUE v[sizeof(rb_jmp_buf) / (sizeof(VALUE))];
6844 } save_regs_gc_mark;
6845 VALUE *stack_start, *stack_end;
6846
6847 FLUSH_REGISTER_WINDOWS;
6848 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
6849 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6850 rb_setjmp(save_regs_gc_mark.j);
6851
6852 /* SET_STACK_END must be called in this function because
6853 * the stack frame of this function may contain
6854 * callee save registers and they should be marked. */
6855 SET_STACK_END;
6856 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6857
6858 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6859
6860 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6861}
6862#endif
6863
6864static void
6865each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE))
6866{
6867 rb_objspace_t *objspace = &rb_objspace;
6868 VALUE *stack_start, *stack_end;
6869
6870 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6871 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6872}
6873
6874void
6875rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
6876{
6877 each_machine_stack_value(ec, gc_mark_maybe);
6878}
6879
6880static void
6881each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6882 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE))
6883{
6884
6885 gc_mark_locations(objspace, stack_start, stack_end, cb);
6886
6887#if defined(__mc68000__)
6888 gc_mark_locations(objspace,
6889 (VALUE*)((char*)stack_start + 2),
6890 (VALUE*)((char*)stack_end - 2), cb);
6891#endif
6892}
6893
6894void
6895rb_mark_tbl(st_table *tbl)
6896{
6897 mark_tbl(&rb_objspace, tbl);
6898}
6899
6900void
6901rb_mark_tbl_no_pin(st_table *tbl)
6902{
6903 mark_tbl_no_pin(&rb_objspace, tbl);
6904}
6905
6906static void
6907gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
6908{
6909 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
6910
6911 if (is_pointer_to_heap(objspace, (void *)obj)) {
6912 void *ptr = asan_unpoison_object_temporary(obj);
6913
6914 /* Garbage can live on the stack, so do not mark or pin */
6915 switch (BUILTIN_TYPE(obj)) {
6916 case T_ZOMBIE:
6917 case T_NONE:
6918 break;
6919 default:
6920 gc_mark_and_pin(objspace, obj);
6921 break;
6922 }
6923
6924 if (ptr) {
6925 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6926 asan_poison_object(obj);
6927 }
6928 }
6929}
6930
6931void
6932rb_gc_mark_maybe(VALUE obj)
6933{
6934 gc_mark_maybe(&rb_objspace, obj);
6935}
6936
6937static inline int
6938gc_mark_set(rb_objspace_t *objspace, VALUE obj)
6939{
6940 ASSERT_vm_locking();
6941 if (RVALUE_MARKED(obj)) return 0;
6942 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6943 return 1;
6944}
6945
6946static int
6947gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
6948{
6949 struct heap_page *page = GET_HEAP_PAGE(obj);
6950 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6951
6952 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6953 page->flags.has_uncollectible_shady_objects = TRUE;
6954 MARK_IN_BITMAP(uncollectible_bits, obj);
6955 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6956
6957#if RGENGC_PROFILE > 0
6958 objspace->profile.total_remembered_shady_object_count++;
6959#if RGENGC_PROFILE >= 2
6960 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
6961#endif
6962#endif
6963 return TRUE;
6964 }
6965 else {
6966 return FALSE;
6967 }
6968}
6969
6970static void
6971rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
6972{
6973 const VALUE old_parent = objspace->rgengc.parent_object;
6974
6975 if (old_parent) { /* parent object is old */
6976 if (RVALUE_WB_UNPROTECTED(obj)) {
6977 if (gc_remember_unprotected(objspace, obj)) {
6978 gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6979 }
6980 }
6981 else {
6982 if (!RVALUE_OLD_P(obj)) {
6983 if (RVALUE_MARKED(obj)) {
6984 /* An object pointed from an OLD object should be OLD. */
6985 gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6986 RVALUE_AGE_SET_OLD(objspace, obj);
6987 if (is_incremental_marking(objspace)) {
6988 if (!RVALUE_MARKING(obj)) {
6989 gc_grey(objspace, obj);
6990 }
6991 }
6992 else {
6993 rgengc_remember(objspace, obj);
6994 }
6995 }
6996 else {
6997 gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6998 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6999 }
7000 }
7001 }
7002 }
7003
7004 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
7005}
7006
7007static void
7008gc_grey(rb_objspace_t *objspace, VALUE obj)
7009{
7010#if RGENGC_CHECK_MODE
7011 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
7012 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
7013#endif
7014
7015#if GC_ENABLE_INCREMENTAL_MARK
7016 if (is_incremental_marking(objspace)) {
7017 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7018 }
7019#endif
7020
7021 push_mark_stack(&objspace->mark_stack, obj);
7022}
7023
7024static void
7025gc_aging(rb_objspace_t *objspace, VALUE obj)
7026{
7027 struct heap_page *page = GET_HEAP_PAGE(obj);
7028
7029 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7030 check_rvalue_consistency(obj);
7031
7032 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7033 if (!RVALUE_OLD_P(obj)) {
7034 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
7035 RVALUE_AGE_INC(objspace, obj);
7036 }
7037 else if (is_full_marking(objspace)) {
7038 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7039 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7040 }
7041 }
7042 check_rvalue_consistency(obj);
7043
7044 objspace->marked_slots++;
7045}
7046
7047NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
7048static void reachable_objects_from_callback(VALUE obj);
7049
7050static void
7051gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
7052{
7053 if (LIKELY(during_gc)) {
7054 rgengc_check_relation(objspace, obj);
7055 if (!gc_mark_set(objspace, obj)) return; /* already marked */
7056
7057 if (0) { // for debug GC marking miss
7058 if (objspace->rgengc.parent_object) {
7059 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
7060 (void *)obj, obj_type_name(obj),
7061 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7062 }
7063 else {
7064 RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
7065 }
7066 }
7067
7068 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
7069 rp(obj);
7070 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
7071 }
7072 gc_aging(objspace, obj);
7073 gc_grey(objspace, obj);
7074 }
7075 else {
7076 reachable_objects_from_callback(obj);
7077 }
7078}
7079
7080static inline void
7081gc_pin(rb_objspace_t *objspace, VALUE obj)
7082{
7083 GC_ASSERT(is_markable_object(objspace, obj));
7084 if (UNLIKELY(objspace->flags.during_compacting)) {
7085 if (LIKELY(during_gc)) {
7086 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7087 }
7088 }
7089}
7090
7091static inline void
7092gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
7093{
7094 if (!is_markable_object(objspace, obj)) return;
7095 gc_pin(objspace, obj);
7096 gc_mark_ptr(objspace, obj);
7097}
7098
7099static inline void
7100gc_mark(rb_objspace_t *objspace, VALUE obj)
7101{
7102 if (!is_markable_object(objspace, obj)) return;
7103 gc_mark_ptr(objspace, obj);
7104}
7105
7106void
7107rb_gc_mark_movable(VALUE ptr)
7108{
7109 gc_mark(&rb_objspace, ptr);
7110}
7111
7112void
7113rb_gc_mark(VALUE ptr)
7114{
7115 gc_mark_and_pin(&rb_objspace, ptr);
7116}
7117
7118/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
7119 * This function is only for GC_END_MARK timing.
7120 */
7121
7122int
7123rb_objspace_marked_object_p(VALUE obj)
7124{
7125 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7126}
7127
7128static inline void
7129gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
7130{
7131 if (RVALUE_OLD_P(obj)) {
7132 objspace->rgengc.parent_object = obj;
7133 }
7134 else {
7135 objspace->rgengc.parent_object = Qfalse;
7136 }
7137}
7138
7139static void
7140gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
7141{
7142 switch (imemo_type(obj)) {
7143 case imemo_env:
7144 {
7145 const rb_env_t *env = (const rb_env_t *)obj;
7146
7147 if (LIKELY(env->ep)) {
7148 // just after newobj() can be NULL here.
7149 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7150 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7151 gc_mark_values(objspace, (long)env->env_size, env->env);
7152 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7153 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
7154 gc_mark(objspace, (VALUE)env->iseq);
7155 }
7156 }
7157 return;
7158 case imemo_cref:
7159 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7160 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
7161 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7162 return;
7163 case imemo_svar:
7164 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7165 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7166 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7167 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7168 return;
7169 case imemo_throw_data:
7170 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7171 return;
7172 case imemo_ifunc:
7173 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
7174 return;
7175 case imemo_memo:
7176 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7177 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7178 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7179 return;
7180 case imemo_ment:
7181 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7182 return;
7183 case imemo_iseq:
7184 rb_iseq_mark((rb_iseq_t *)obj);
7185 return;
7186 case imemo_tmpbuf:
7187 {
7188 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
7189 do {
7190 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7191 } while ((m = m->next) != NULL);
7192 }
7193 return;
7194 case imemo_ast:
7195 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7196 return;
7197 case imemo_parser_strterm:
7198 rb_strterm_mark(obj);
7199 return;
7200 case imemo_callinfo:
7201 return;
7202 case imemo_callcache:
7203 {
7204 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
7205 // should not mark klass here
7206 gc_mark(objspace, (VALUE)vm_cc_cme(cc));
7207 }
7208 return;
7209 case imemo_constcache:
7210 {
7212 gc_mark(objspace, ice->value);
7213 }
7214 return;
7215#if VM_CHECK_MODE > 0
7216 default:
7217 VM_UNREACHABLE(gc_mark_imemo);
7218#endif
7219 }
7220}
7221
7222static void mark_cvc_tbl(rb_objspace_t *objspace, VALUE klass);
7223
7224static void
7225gc_mark_children(rb_objspace_t *objspace, VALUE obj)
7226{
7227 register RVALUE *any = RANY(obj);
7228 gc_mark_set_parent(objspace, obj);
7229
7230 if (FL_TEST(obj, FL_EXIVAR)) {
7231 rb_mark_generic_ivar(obj);
7232 }
7233
7234 switch (BUILTIN_TYPE(obj)) {
7235 case T_FLOAT:
7236 case T_BIGNUM:
7237 case T_SYMBOL:
7238 /* Not immediates, but does not have references and singleton
7239 * class */
7240 return;
7241
7242 case T_NIL:
7243 case T_FIXNUM:
7244 rb_bug("rb_gc_mark() called for broken object");
7245 break;
7246
7247 case T_NODE:
7248 UNEXPECTED_NODE(rb_gc_mark);
7249 break;
7250
7251 case T_IMEMO:
7252 gc_mark_imemo(objspace, obj);
7253 return;
7254
7255 default:
7256 break;
7257 }
7258
7259 gc_mark(objspace, any->as.basic.klass);
7260
7261 switch (BUILTIN_TYPE(obj)) {
7262 case T_CLASS:
7263 case T_MODULE:
7264 if (RCLASS_SUPER(obj)) {
7265 gc_mark(objspace, RCLASS_SUPER(obj));
7266 }
7267 if (!RCLASS_EXT(obj)) break;
7268
7269 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7270 mark_cvc_tbl(objspace, obj);
7271 cc_table_mark(objspace, obj);
7272 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7273 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7274 }
7275 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7276 break;
7277
7278 case T_ICLASS:
7279 if (RICLASS_OWNS_M_TBL_P(obj)) {
7280 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7281 }
7282 if (RCLASS_SUPER(obj)) {
7283 gc_mark(objspace, RCLASS_SUPER(obj));
7284 }
7285 if (!RCLASS_EXT(obj)) break;
7286
7287 if (RCLASS_INCLUDER(obj)) {
7288 gc_mark(objspace, RCLASS_INCLUDER(obj));
7289 }
7290 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7291 cc_table_mark(objspace, obj);
7292 break;
7293
7294 case T_ARRAY:
7295 if (ARY_SHARED_P(obj)) {
7296 VALUE root = ARY_SHARED_ROOT(obj);
7297 gc_mark(objspace, root);
7298 }
7299 else {
7300 long i, len = RARRAY_LEN(obj);
7301 const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
7302 for (i=0; i < len; i++) {
7303 gc_mark(objspace, ptr[i]);
7304 }
7305
7306 if (LIKELY(during_gc)) {
7307 if (!ARY_EMBED_P(obj) && RARRAY_TRANSIENT_P(obj)) {
7308 rb_transient_heap_mark(obj, ptr);
7309 }
7310 }
7311 }
7312 break;
7313
7314 case T_HASH:
7315 mark_hash(objspace, obj);
7316 break;
7317
7318 case T_STRING:
7319 if (STR_SHARED_P(obj)) {
7320 gc_mark(objspace, any->as.string.as.heap.aux.shared);
7321 }
7322 break;
7323
7324 case T_DATA:
7325 {
7326 void *const ptr = DATA_PTR(obj);
7327 if (ptr) {
7328 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
7329 any->as.typeddata.type->function.dmark :
7330 any->as.data.dmark;
7331 if (mark_func) (*mark_func)(ptr);
7332 }
7333 }
7334 break;
7335
7336 case T_OBJECT:
7337 {
7338 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7339 if (rb_shape_obj_too_complex(obj)) {
7340 mark_tbl_no_pin(objspace, ROBJECT_IV_HASH(obj));
7341 }
7342 else {
7343 const VALUE * const ptr = ROBJECT_IVPTR(obj);
7344
7345 uint32_t i, len = ROBJECT_IV_COUNT(obj);
7346 for (i = 0; i < len; i++) {
7347 gc_mark(objspace, ptr[i]);
7348 }
7349
7350 if (LIKELY(during_gc) &&
7351 ROBJ_TRANSIENT_P(obj)) {
7352 rb_transient_heap_mark(obj, ptr);
7353 }
7354 }
7355 if (shape) {
7356 VALUE klass = RBASIC_CLASS(obj);
7357
7358 // Increment max_iv_count if applicable, used to determine size pool allocation
7359 uint32_t num_of_ivs = shape->next_iv_index;
7360 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7361 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7362 }
7363 }
7364 }
7365 break;
7366
7367 case T_FILE:
7368 if (any->as.file.fptr) {
7369 gc_mark(objspace, any->as.file.fptr->self);
7370 gc_mark(objspace, any->as.file.fptr->pathv);
7371 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
7372 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
7373 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
7374 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
7375 gc_mark(objspace, any->as.file.fptr->write_lock);
7376 gc_mark(objspace, any->as.file.fptr->timeout);
7377 }
7378 break;
7379
7380 case T_REGEXP:
7381 gc_mark(objspace, any->as.regexp.src);
7382 break;
7383
7384 case T_MATCH:
7385 gc_mark(objspace, any->as.match.regexp);
7386 if (any->as.match.str) {
7387 gc_mark(objspace, any->as.match.str);
7388 }
7389 break;
7390
7391 case T_RATIONAL:
7392 gc_mark(objspace, any->as.rational.num);
7393 gc_mark(objspace, any->as.rational.den);
7394 break;
7395
7396 case T_COMPLEX:
7397 gc_mark(objspace, any->as.complex.real);
7398 gc_mark(objspace, any->as.complex.imag);
7399 break;
7400
7401 case T_STRUCT:
7402 {
7403 long i;
7404 const long len = RSTRUCT_LEN(obj);
7405 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
7406
7407 for (i=0; i<len; i++) {
7408 gc_mark(objspace, ptr[i]);
7409 }
7410
7411 if (LIKELY(during_gc) &&
7412 RSTRUCT_TRANSIENT_P(obj)) {
7413 rb_transient_heap_mark(obj, ptr);
7414 }
7415 }
7416 break;
7417
7418 default:
7419#if GC_DEBUG
7420 rb_gcdebug_print_obj_condition((VALUE)obj);
7421#endif
7422 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
7423 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
7424 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
7425 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7426 BUILTIN_TYPE(obj), (void *)any,
7427 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
7428 }
7429}
7430
7435static inline int
7436gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
7437{
7438 mark_stack_t *mstack = &objspace->mark_stack;
7439 VALUE obj;
7440#if GC_ENABLE_INCREMENTAL_MARK
7441 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7442 size_t popped_count = 0;
7443#endif
7444
7445 while (pop_mark_stack(mstack, &obj)) {
7446 if (UNDEF_P(obj)) continue; /* skip */
7447
7448 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7449 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7450 }
7451 gc_mark_children(objspace, obj);
7452
7453#if GC_ENABLE_INCREMENTAL_MARK
7454 if (incremental) {
7455 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7456 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7457 }
7458 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7459 popped_count++;
7460
7461 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7462 break;
7463 }
7464 }
7465 else {
7466 /* just ignore marking bits */
7467 }
7468#endif
7469 }
7470
7471 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7472
7473 if (is_mark_stack_empty(mstack)) {
7474 shrink_stack_chunk_cache(mstack);
7475 return TRUE;
7476 }
7477 else {
7478 return FALSE;
7479 }
7480}
7481
7482static int
7483gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
7484{
7485 return gc_mark_stacked_objects(objspace, TRUE, count);
7486}
7487
7488static int
7489gc_mark_stacked_objects_all(rb_objspace_t *objspace)
7490{
7491 return gc_mark_stacked_objects(objspace, FALSE, 0);
7492}
7493
7494#if PRINT_ROOT_TICKS
7495#define MAX_TICKS 0x100
7496static tick_t mark_ticks[MAX_TICKS];
7497static const char *mark_ticks_categories[MAX_TICKS];
7498
7499static void
7500show_mark_ticks(void)
7501{
7502 int i;
7503 fprintf(stderr, "mark ticks result:\n");
7504 for (i=0; i<MAX_TICKS; i++) {
7505 const char *category = mark_ticks_categories[i];
7506 if (category) {
7507 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
7508 }
7509 else {
7510 break;
7511 }
7512 }
7513}
7514
7515#endif /* PRINT_ROOT_TICKS */
7516
7517static void
7518gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
7519{
7520 struct gc_list *list;
7521 rb_execution_context_t *ec = GET_EC();
7522 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7523
7524#if PRINT_ROOT_TICKS
7525 tick_t start_tick = tick();
7526 int tick_count = 0;
7527 const char *prev_category = 0;
7528
7529 if (mark_ticks_categories[0] == 0) {
7530 atexit(show_mark_ticks);
7531 }
7532#endif
7533
7534 if (categoryp) *categoryp = "xxx";
7535
7536 objspace->rgengc.parent_object = Qfalse;
7537
7538#if PRINT_ROOT_TICKS
7539#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7540 if (prev_category) { \
7541 tick_t t = tick(); \
7542 mark_ticks[tick_count] = t - start_tick; \
7543 mark_ticks_categories[tick_count] = prev_category; \
7544 tick_count++; \
7545 } \
7546 prev_category = category; \
7547 start_tick = tick(); \
7548} while (0)
7549#else /* PRINT_ROOT_TICKS */
7550#define MARK_CHECKPOINT_PRINT_TICK(category)
7551#endif
7552
7553#define MARK_CHECKPOINT(category) do { \
7554 if (categoryp) *categoryp = category; \
7555 MARK_CHECKPOINT_PRINT_TICK(category); \
7556} while (0)
7557
7558 MARK_CHECKPOINT("vm");
7559 SET_STACK_END;
7560 rb_vm_mark(vm);
7561 if (vm->self) gc_mark(objspace, vm->self);
7562
7563 MARK_CHECKPOINT("finalizers");
7564 mark_finalizer_tbl(objspace, finalizer_table);
7565
7566 MARK_CHECKPOINT("machine_context");
7567 mark_current_machine_context(objspace, ec);
7568
7569 /* mark protected global variables */
7570 MARK_CHECKPOINT("global_list");
7571 for (list = global_list; list; list = list->next) {
7572 gc_mark_maybe(objspace, *list->varptr);
7573 }
7574
7575 MARK_CHECKPOINT("end_proc");
7576 rb_mark_end_proc();
7577
7578 MARK_CHECKPOINT("global_tbl");
7579 rb_gc_mark_global_tbl();
7580
7581 MARK_CHECKPOINT("object_id");
7582 rb_gc_mark(objspace->next_object_id);
7583 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
7584
7585 if (stress_to_class) rb_gc_mark(stress_to_class);
7586
7587 MARK_CHECKPOINT("finish");
7588#undef MARK_CHECKPOINT
7589}
7590
7591#if RGENGC_CHECK_MODE >= 4
7592
7593#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7594#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7595#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7596
7597struct reflist {
7598 VALUE *list;
7599 int pos;
7600 int size;
7601};
7602
7603static struct reflist *
7604reflist_create(VALUE obj)
7605{
7606 struct reflist *refs = xmalloc(sizeof(struct reflist));
7607 refs->size = 1;
7608 refs->list = ALLOC_N(VALUE, refs->size);
7609 refs->list[0] = obj;
7610 refs->pos = 1;
7611 return refs;
7612}
7613
7614static void
7615reflist_destruct(struct reflist *refs)
7616{
7617 xfree(refs->list);
7618 xfree(refs);
7619}
7620
7621static void
7622reflist_add(struct reflist *refs, VALUE obj)
7623{
7624 if (refs->pos == refs->size) {
7625 refs->size *= 2;
7626 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7627 }
7628
7629 refs->list[refs->pos++] = obj;
7630}
7631
7632static void
7633reflist_dump(struct reflist *refs)
7634{
7635 int i;
7636 for (i=0; i<refs->pos; i++) {
7637 VALUE obj = refs->list[i];
7638 if (IS_ROOTSIG(obj)) { /* root */
7639 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
7640 }
7641 else {
7642 fprintf(stderr, "<%s>", obj_info(obj));
7643 }
7644 if (i+1 < refs->pos) fprintf(stderr, ", ");
7645 }
7646}
7647
7648static int
7649reflist_referred_from_machine_context(struct reflist *refs)
7650{
7651 int i;
7652 for (i=0; i<refs->pos; i++) {
7653 VALUE obj = refs->list[i];
7654 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
7655 }
7656 return 0;
7657}
7658
7659struct allrefs {
7660 rb_objspace_t *objspace;
7661 /* a -> obj1
7662 * b -> obj1
7663 * c -> obj1
7664 * c -> obj2
7665 * d -> obj3
7666 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7667 */
7668 struct st_table *references;
7669 const char *category;
7670 VALUE root_obj;
7672};
7673
7674static int
7675allrefs_add(struct allrefs *data, VALUE obj)
7676{
7677 struct reflist *refs;
7678 st_data_t r;
7679
7680 if (st_lookup(data->references, obj, &r)) {
7681 refs = (struct reflist *)r;
7682 reflist_add(refs, data->root_obj);
7683 return 0;
7684 }
7685 else {
7686 refs = reflist_create(data->root_obj);
7687 st_insert(data->references, obj, (st_data_t)refs);
7688 return 1;
7689 }
7690}
7691
7692static void
7693allrefs_i(VALUE obj, void *ptr)
7694{
7695 struct allrefs *data = (struct allrefs *)ptr;
7696
7697 if (allrefs_add(data, obj)) {
7698 push_mark_stack(&data->mark_stack, obj);
7699 }
7700}
7701
7702static void
7703allrefs_roots_i(VALUE obj, void *ptr)
7704{
7705 struct allrefs *data = (struct allrefs *)ptr;
7706 if (strlen(data->category) == 0) rb_bug("!!!");
7707 data->root_obj = MAKE_ROOTSIG(data->category);
7708
7709 if (allrefs_add(data, obj)) {
7710 push_mark_stack(&data->mark_stack, obj);
7711 }
7712}
7713#define PUSH_MARK_FUNC_DATA(v) do { \
7714 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7715 GET_RACTOR()->mfd = (v);
7716
7717#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7718
7719static st_table *
7720objspace_allrefs(rb_objspace_t *objspace)
7721{
7722 struct allrefs data;
7723 struct gc_mark_func_data_struct mfd;
7724 VALUE obj;
7725 int prev_dont_gc = dont_gc_val();
7726 dont_gc_on();
7727
7728 data.objspace = objspace;
7729 data.references = st_init_numtable();
7730 init_mark_stack(&data.mark_stack);
7731
7732 mfd.mark_func = allrefs_roots_i;
7733 mfd.data = &data;
7734
7735 /* traverse root objects */
7736 PUSH_MARK_FUNC_DATA(&mfd);
7737 GET_RACTOR()->mfd = &mfd;
7738 gc_mark_roots(objspace, &data.category);
7739 POP_MARK_FUNC_DATA();
7740
7741 /* traverse rest objects reachable from root objects */
7742 while (pop_mark_stack(&data.mark_stack, &obj)) {
7743 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7744 }
7745 free_stack_chunks(&data.mark_stack);
7746
7747 dont_gc_set(prev_dont_gc);
7748 return data.references;
7749}
7750
7751static int
7752objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7753{
7754 struct reflist *refs = (struct reflist *)value;
7755 reflist_destruct(refs);
7756 return ST_CONTINUE;
7757}
7758
7759static void
7760objspace_allrefs_destruct(struct st_table *refs)
7761{
7762 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7763 st_free_table(refs);
7764}
7765
7766#if RGENGC_CHECK_MODE >= 5
7767static int
7768allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7769{
7770 VALUE obj = (VALUE)k;
7771 struct reflist *refs = (struct reflist *)v;
7772 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
7773 reflist_dump(refs);
7774 fprintf(stderr, "\n");
7775 return ST_CONTINUE;
7776}
7777
7778static void
7779allrefs_dump(rb_objspace_t *objspace)
7780{
7781 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7782 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
7783 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7784}
7785#endif
7786
7787static int
7788gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7789{
7790 VALUE obj = k;
7791 struct reflist *refs = (struct reflist *)v;
7792 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
7793
7794 /* object should be marked or oldgen */
7795 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7796 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7797 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
7798 reflist_dump(refs);
7799
7800 if (reflist_referred_from_machine_context(refs)) {
7801 fprintf(stderr, " (marked from machine stack).\n");
7802 /* marked from machine context can be false positive */
7803 }
7804 else {
7805 objspace->rgengc.error_count++;
7806 fprintf(stderr, "\n");
7807 }
7808 }
7809 return ST_CONTINUE;
7810}
7811
7812static void
7813gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
7814{
7815 size_t saved_malloc_increase = objspace->malloc_params.increase;
7816#if RGENGC_ESTIMATE_OLDMALLOC
7817 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7818#endif
7819 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7820
7821 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7822
7823 if (checker_func) {
7824 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7825 }
7826
7827 if (objspace->rgengc.error_count > 0) {
7828#if RGENGC_CHECK_MODE >= 5
7829 allrefs_dump(objspace);
7830#endif
7831 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
7832 }
7833
7834 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7835 objspace->rgengc.allrefs_table = 0;
7836
7837 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
7838 objspace->malloc_params.increase = saved_malloc_increase;
7839#if RGENGC_ESTIMATE_OLDMALLOC
7840 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7841#endif
7842}
7843#endif /* RGENGC_CHECK_MODE >= 4 */
7844
7846 rb_objspace_t *objspace;
7847 int err_count;
7848 size_t live_object_count;
7849 size_t zombie_object_count;
7850
7851 VALUE parent;
7852 size_t old_object_count;
7853 size_t remembered_shady_count;
7854};
7855
7856static void
7857check_generation_i(const VALUE child, void *ptr)
7858{
7860 const VALUE parent = data->parent;
7861
7862 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7863
7864 if (!RVALUE_OLD_P(child)) {
7865 if (!RVALUE_REMEMBERED(parent) &&
7866 !RVALUE_REMEMBERED(child) &&
7867 !RVALUE_UNCOLLECTIBLE(child)) {
7868 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7869 data->err_count++;
7870 }
7871 }
7872}
7873
7874static void
7875check_color_i(const VALUE child, void *ptr)
7876{
7878 const VALUE parent = data->parent;
7879
7880 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7881 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7882 obj_info(parent), obj_info(child));
7883 data->err_count++;
7884 }
7885}
7886
7887static void
7888check_children_i(const VALUE child, void *ptr)
7889{
7891 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7892 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
7893 obj_info(child), obj_info(data->parent));
7894 rb_print_backtrace(); /* C backtrace will help to debug */
7895
7896 data->err_count++;
7897 }
7898}
7899
7900static int
7901verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
7903{
7904 VALUE obj;
7905 rb_objspace_t *objspace = data->objspace;
7906
7907 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
7908 void *poisoned = asan_unpoison_object_temporary(obj);
7909
7910 if (is_live_object(objspace, obj)) {
7911 /* count objects */
7912 data->live_object_count++;
7913 data->parent = obj;
7914
7915 /* Normally, we don't expect T_MOVED objects to be in the heap.
7916 * But they can stay alive on the stack, */
7917 if (!gc_object_moved_p(objspace, obj)) {
7918 /* moved slots don't have children */
7919 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
7920 }
7921
7922 /* check health of children */
7923 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7924 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7925
7926 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7927 /* reachable objects from an oldgen object should be old or (young with remember) */
7928 data->parent = obj;
7929 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
7930 }
7931
7932 if (is_incremental_marking(objspace)) {
7933 if (RVALUE_BLACK_P(obj)) {
7934 /* reachable objects from black objects should be black or grey objects */
7935 data->parent = obj;
7936 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
7937 }
7938 }
7939 }
7940 else {
7941 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
7942 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
7943 data->zombie_object_count++;
7944 }
7945 }
7946 if (poisoned) {
7947 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
7948 asan_poison_object(obj);
7949 }
7950 }
7951
7952 return 0;
7953}
7954
7955static int
7956gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
7957{
7958 unsigned int has_remembered_shady = FALSE;
7959 unsigned int has_remembered_old = FALSE;
7960 int remembered_old_objects = 0;
7961 int free_objects = 0;
7962 int zombie_objects = 0;
7963
7964 short slot_size = page->slot_size;
7965 uintptr_t start = (uintptr_t)page->start;
7966 uintptr_t end = start + page->total_slots * slot_size;
7967
7968 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7969 VALUE val = (VALUE)ptr;
7970 void *poisoned = asan_unpoison_object_temporary(val);
7971 enum ruby_value_type type = BUILTIN_TYPE(val);
7972
7973 if (type == T_NONE) free_objects++;
7974 if (type == T_ZOMBIE) zombie_objects++;
7975 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7976 has_remembered_shady = TRUE;
7977 }
7978 if (RVALUE_PAGE_MARKING(page, val)) {
7979 has_remembered_old = TRUE;
7980 remembered_old_objects++;
7981 }
7982
7983 if (poisoned) {
7984 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
7985 asan_poison_object(val);
7986 }
7987 }
7988
7989 if (!is_incremental_marking(objspace) &&
7990 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7991
7992 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7993 VALUE val = (VALUE)ptr;
7994 if (RVALUE_PAGE_MARKING(page, val)) {
7995 fprintf(stderr, "marking -> %s\n", obj_info(val));
7996 }
7997 }
7998 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7999 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
8000 }
8001
8002 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
8003 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
8004 (void *)page, obj ? obj_info(obj) : "");
8005 }
8006
8007 if (0) {
8008 /* free_slots may not equal to free_objects */
8009 if (page->free_slots != free_objects) {
8010 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, page->free_slots, free_objects);
8011 }
8012 }
8013 if (page->final_slots != zombie_objects) {
8014 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, page->final_slots, zombie_objects);
8015 }
8016
8017 return remembered_old_objects;
8018}
8019
8020static int
8021gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
8022{
8023 int remembered_old_objects = 0;
8024 struct heap_page *page = 0;
8025
8026 ccan_list_for_each(head, page, page_node) {
8027 asan_unlock_freelist(page);
8028 RVALUE *p = page->freelist;
8029 while (p) {
8030 VALUE vp = (VALUE)p;
8031 VALUE prev = vp;
8032 asan_unpoison_object(vp, false);
8033 if (BUILTIN_TYPE(vp) != T_NONE) {
8034 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8035 }
8036 p = p->as.free.next;
8037 asan_poison_object(prev);
8038 }
8039 asan_lock_freelist(page);
8040
8041 if (page->flags.has_remembered_objects == FALSE) {
8042 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
8043 }
8044 }
8045
8046 return remembered_old_objects;
8047}
8048
8049static int
8050gc_verify_heap_pages(rb_objspace_t *objspace)
8051{
8052 int remembered_old_objects = 0;
8053 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8054 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8055 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8056 }
8057 return remembered_old_objects;
8058}
8059
8060/*
8061 * call-seq:
8062 * GC.verify_internal_consistency -> nil
8063 *
8064 * Verify internal consistency.
8065 *
8066 * This method is implementation specific.
8067 * Now this method checks generational consistency
8068 * if RGenGC is supported.
8069 */
8070static VALUE
8071gc_verify_internal_consistency_m(VALUE dummy)
8072{
8073 gc_verify_internal_consistency(&rb_objspace);
8074 return Qnil;
8075}
8076
8077static void
8078gc_verify_internal_consistency_(rb_objspace_t *objspace)
8079{
8080 struct verify_internal_consistency_struct data = {0};
8081
8082 data.objspace = objspace;
8083 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
8084
8085 /* check relations */
8086 for (size_t i = 0; i < heap_allocated_pages; i++) {
8087 struct heap_page *page = heap_pages_sorted[i];
8088 short slot_size = page->slot_size;
8089
8090 uintptr_t start = (uintptr_t)page->start;
8091 uintptr_t end = start + page->total_slots * slot_size;
8092
8093 verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
8094 }
8095
8096 if (data.err_count != 0) {
8097#if RGENGC_CHECK_MODE >= 5
8098 objspace->rgengc.error_count = data.err_count;
8099 gc_marks_check(objspace, NULL, NULL);
8100 allrefs_dump(objspace);
8101#endif
8102 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
8103 }
8104
8105 /* check heap_page status */
8106 gc_verify_heap_pages(objspace);
8107
8108 /* check counters */
8109
8110 if (!is_lazy_sweeping(objspace) &&
8111 !finalizing &&
8112 ruby_single_main_ractor != NULL) {
8113 if (objspace_live_slots(objspace) != data.live_object_count) {
8114 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", "
8115 "objspace->profile.total_freed_objects: %"PRIdSIZE"\n",
8116 heap_pages_final_slots, objspace->profile.total_freed_objects);
8117 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8118 objspace_live_slots(objspace), data.live_object_count);
8119 }
8120 }
8121
8122 if (!is_marking(objspace)) {
8123 if (objspace->rgengc.old_objects != data.old_object_count) {
8124 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8125 objspace->rgengc.old_objects, data.old_object_count);
8126 }
8127 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8128 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
8129 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8130 }
8131 }
8132
8133 if (!finalizing) {
8134 size_t list_count = 0;
8135
8136 {
8137 VALUE z = heap_pages_deferred_final;
8138 while (z) {
8139 list_count++;
8140 z = RZOMBIE(z)->next;
8141 }
8142 }
8143
8144 if (heap_pages_final_slots != data.zombie_object_count ||
8145 heap_pages_final_slots != list_count) {
8146
8147 rb_bug("inconsistent finalizing object count:\n"
8148 " expect %"PRIuSIZE"\n"
8149 " but %"PRIuSIZE" zombies\n"
8150 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
8151 heap_pages_final_slots,
8152 data.zombie_object_count,
8153 list_count);
8154 }
8155 }
8156
8157 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
8158}
8159
8160static void
8161gc_verify_internal_consistency(rb_objspace_t *objspace)
8162{
8163 RB_VM_LOCK_ENTER();
8164 {
8165 rb_vm_barrier(); // stop other ractors
8166
8167 unsigned int prev_during_gc = during_gc;
8168 during_gc = FALSE; // stop gc here
8169 {
8170 gc_verify_internal_consistency_(objspace);
8171 }
8172 during_gc = prev_during_gc;
8173 }
8174 RB_VM_LOCK_LEAVE();
8175}
8176
8177void
8178rb_gc_verify_internal_consistency(void)
8179{
8180 gc_verify_internal_consistency(&rb_objspace);
8181}
8182
8183static VALUE
8184gc_verify_transient_heap_internal_consistency(VALUE dmy)
8185{
8186 rb_transient_heap_verify();
8187 return Qnil;
8188}
8189
8190#if GC_ENABLE_INCREMENTAL_MARK
8191static void
8192heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
8193{
8194 if (heap->pooled_pages) {
8195 if (heap->free_pages) {
8196 struct heap_page *free_pages_tail = heap->free_pages;
8197 while (free_pages_tail->free_next) {
8198 free_pages_tail = free_pages_tail->free_next;
8199 }
8200 free_pages_tail->free_next = heap->pooled_pages;
8201 }
8202 else {
8203 heap->free_pages = heap->pooled_pages;
8204 }
8205
8206 heap->pooled_pages = NULL;
8207 }
8208}
8209#endif
8210
8211/* marks */
8212
8213static void
8214gc_marks_start(rb_objspace_t *objspace, int full_mark)
8215{
8216 /* start marking */
8217 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
8218 gc_mode_transition(objspace, gc_mode_marking);
8219
8220 if (full_mark) {
8221#if GC_ENABLE_INCREMENTAL_MARK
8222 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8223 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8224
8225 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
8226 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
8227 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
8228 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8229#endif
8230 objspace->flags.during_minor_gc = FALSE;
8231 if (ruby_enable_autocompact) {
8232 objspace->flags.during_compacting |= TRUE;
8233 }
8234 objspace->profile.major_gc_count++;
8235 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8236 objspace->rgengc.old_objects = 0;
8237 objspace->rgengc.last_major_gc = objspace->profile.count;
8238 objspace->marked_slots = 0;
8239
8240 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8241 rb_size_pool_t *size_pool = &size_pools[i];
8242 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8243 rgengc_mark_and_rememberset_clear(objspace, heap);
8244 heap_move_pooled_pages_to_free_pages(heap);
8245 }
8246 }
8247 else {
8248 objspace->flags.during_minor_gc = TRUE;
8249 objspace->marked_slots =
8250 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
8251 objspace->profile.minor_gc_count++;
8252
8253 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8254 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8255 }
8256 }
8257
8258 gc_mark_roots(objspace, NULL);
8259
8260 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
8261 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
8262}
8263
8264#if GC_ENABLE_INCREMENTAL_MARK
8265static inline void
8266gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8267{
8268 if (bits) {
8269 do {
8270 if (bits & 1) {
8271 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
8272 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
8273 GC_ASSERT(RVALUE_MARKED((VALUE)p));
8274 gc_mark_children(objspace, (VALUE)p);
8275 }
8276 p += BASE_SLOT_SIZE;
8277 bits >>= 1;
8278 } while (bits);
8279 }
8280}
8281
8282static void
8283gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
8284{
8285 struct heap_page *page = 0;
8286
8287 ccan_list_for_each(&heap->pages, page, page_node) {
8288 bits_t *mark_bits = page->mark_bits;
8289 bits_t *wbun_bits = page->wb_unprotected_bits;
8290 uintptr_t p = page->start;
8291 size_t j;
8292
8293 bits_t bits = mark_bits[0] & wbun_bits[0];
8294 bits >>= NUM_IN_PAGE(p);
8295 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8296 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8297
8298 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8299 bits_t bits = mark_bits[j] & wbun_bits[j];
8300
8301 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8302 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8303 }
8304 }
8305
8306 gc_mark_stacked_objects_all(objspace);
8307}
8308#endif
8309
8310static void
8311gc_marks_finish(rb_objspace_t *objspace)
8312{
8313#if GC_ENABLE_INCREMENTAL_MARK
8314 /* finish incremental GC */
8315 if (is_incremental_marking(objspace)) {
8316 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8317 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
8318 mark_stack_size(&objspace->mark_stack));
8319 }
8320
8321 gc_mark_roots(objspace, 0);
8322 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
8323
8324#if RGENGC_CHECK_MODE >= 2
8325 if (gc_verify_heap_pages(objspace) != 0) {
8326 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
8327 }
8328#endif
8329
8330 objspace->flags.during_incremental_marking = FALSE;
8331 /* check children of all marked wb-unprotected objects */
8332 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8333 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8334 }
8335 }
8336#endif /* GC_ENABLE_INCREMENTAL_MARK */
8337
8338#if RGENGC_CHECK_MODE >= 2
8339 gc_verify_internal_consistency(objspace);
8340#endif
8341
8342 if (is_full_marking(objspace)) {
8343 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8344 const double r = gc_params.oldobject_limit_factor;
8345 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8346 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8347 }
8348
8349#if RGENGC_CHECK_MODE >= 4
8350 during_gc = FALSE;
8351 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
8352 during_gc = TRUE;
8353#endif
8354
8355 {
8356 /* decide full GC is needed or not */
8357 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8358 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
8359 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8360 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8361 int full_marking = is_full_marking(objspace);
8362 const int r_cnt = GET_VM()->ractor.cnt;
8363 const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
8364
8365 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8366
8367 /* setup free-able page counts */
8368 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8369 max_free_slots = gc_params.heap_init_slots * r_mul;
8370 }
8371
8372 if (sweep_slots > max_free_slots) {
8373 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8374 }
8375 else {
8376 heap_pages_freeable_pages = 0;
8377 }
8378
8379 /* check free_min */
8380 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8381 min_free_slots = gc_params.heap_free_slots * r_mul;
8382 }
8383
8384 if (sweep_slots < min_free_slots) {
8385 if (!full_marking) {
8386 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8387 full_marking = TRUE;
8388 /* do not update last_major_gc, because full marking is not done. */
8389 /* goto increment; */
8390 }
8391 else {
8392 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
8393 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8394 }
8395 }
8396
8397#if !USE_RVARGC
8398 if (full_marking) {
8399 /* increment: */
8400 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
8401 rb_size_pool_t *size_pool = &size_pools[0];
8402 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, size_pool, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8403
8404 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8405 }
8406#endif
8407 }
8408
8409 if (full_marking) {
8410 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8411 const double r = gc_params.oldobject_limit_factor;
8412 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8413 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8414 }
8415
8416 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8417 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8418 }
8419 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8420 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8421 }
8422 if (RGENGC_FORCE_MAJOR_GC) {
8423 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8424 }
8425
8426 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
8427 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
8428 "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
8429 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8430 objspace->rgengc.need_major_gc ? "major" : "minor");
8431 }
8432
8433 rb_transient_heap_finish_marking();
8434 rb_ractor_finish_marking();
8435
8436 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
8437}
8438
8439#if GC_ENABLE_INCREMENTAL_MARK
8440static void
8441gc_marks_step(rb_objspace_t *objspace, size_t slots)
8442{
8443 GC_ASSERT(is_marking(objspace));
8444
8445 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8446 gc_marks_finish(objspace);
8447 gc_sweep(objspace);
8448 }
8449 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE"\n", objspace->marked_slots);
8450}
8451#endif
8452
8453static bool
8454gc_compact_heap_cursors_met_p(rb_heap_t *heap)
8455{
8456 return heap->sweeping_page == heap->compact_cursor;
8457}
8458
8459static rb_size_pool_t *
8460gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, VALUE src)
8461{
8462 size_t obj_size;
8463 size_t idx = 0;
8464
8465 switch (BUILTIN_TYPE(src)) {
8466 case T_ARRAY:
8467 obj_size = rb_ary_size_as_embedded(src);
8468 break;
8469
8470 case T_OBJECT:
8471 if (rb_shape_obj_too_complex(src)) {
8472 return &size_pools[0];
8473 }
8474 else {
8475 obj_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src));
8476 }
8477 break;
8478
8479 case T_STRING:
8480 obj_size = rb_str_size_as_embedded(src);
8481 break;
8482
8483 default:
8484 return src_pool;
8485 }
8486
8487 if (rb_gc_size_allocatable_p(obj_size)){
8488 idx = size_pool_idx_for_size(obj_size);
8489 }
8490 return &size_pools[idx];
8491}
8492
8493static bool
8494gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_pool, VALUE src)
8495{
8496 GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
8497 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8498
8499 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8500 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8501 rb_shape_t *new_shape = NULL;
8502 rb_shape_t *orig_shape = NULL;
8503
8504 if (gc_compact_heap_cursors_met_p(dheap)) {
8505 return dheap != heap;
8506 }
8507
8508 if (RB_TYPE_P(src, T_OBJECT)) {
8509 orig_shape = rb_shape_get_shape(src);
8510 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8511 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8512 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8513
8514 if (!new_shape) {
8515 dest_pool = size_pool;
8516 dheap = heap;
8517 }
8518 }
8519 }
8520
8521 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8522 struct gc_sweep_context ctx = {
8523 .page = dheap->sweeping_page,
8524 .final_slots = 0,
8525 .freed_slots = 0,
8526 .empty_slots = 0,
8527 };
8528
8529 /* The page of src could be partially compacted, so it may contain
8530 * T_MOVED. Sweeping a page may read objects on this page, so we
8531 * need to lock the page. */
8532 lock_page_body(objspace, GET_PAGE_BODY(src));
8533 gc_sweep_page(objspace, dheap, &ctx);
8534 unlock_page_body(objspace, GET_PAGE_BODY(src));
8535
8536 if (dheap->sweeping_page->free_slots > 0) {
8537 heap_add_freepage(dheap, dheap->sweeping_page);
8538 };
8539
8540 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8541 if (gc_compact_heap_cursors_met_p(dheap)) {
8542 return dheap != heap;
8543 }
8544 }
8545
8546 if (orig_shape) {
8547 if (new_shape) {
8548 VALUE dest = rb_gc_location(src);
8549 rb_shape_set_shape(dest, new_shape);
8550 }
8551 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8552 }
8553
8554 return true;
8555}
8556
8557static bool
8558gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
8559{
8560 short slot_size = page->slot_size;
8561 short slot_bits = slot_size / BASE_SLOT_SIZE;
8562 GC_ASSERT(slot_bits > 0);
8563
8564 do {
8565 VALUE vp = (VALUE)p;
8566 GC_ASSERT(vp % sizeof(RVALUE) == 0);
8567
8568 if (bitset & 1) {
8569 objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
8570
8571 if (gc_is_moveable_obj(objspace, vp)) {
8572 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8573 //the cursors met. bubble up
8574 return false;
8575 }
8576 }
8577 }
8578 p += slot_size;
8579 bitset >>= slot_bits;
8580 } while (bitset);
8581
8582 return true;
8583}
8584
8585// Iterate up all the objects in page, moving them to where they want to go
8586static bool
8587gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
8588{
8589 GC_ASSERT(page == heap->compact_cursor);
8590
8591 bits_t *mark_bits, *pin_bits;
8592 bits_t bitset;
8593 uintptr_t p = page->start;
8594
8595 mark_bits = page->mark_bits;
8596 pin_bits = page->pinned_bits;
8597
8598 // objects that can be moved are marked and not pinned
8599 bitset = (mark_bits[0] & ~pin_bits[0]);
8600 bitset >>= NUM_IN_PAGE(p);
8601 if (bitset) {
8602 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8603 return false;
8604 }
8605 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8606
8607 for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8608 bitset = (mark_bits[j] & ~pin_bits[j]);
8609 if (bitset) {
8610 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8611 return false;
8612 }
8613 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8614 }
8615
8616 return true;
8617}
8618
8619static bool
8620gc_compact_all_compacted_p(rb_objspace_t *objspace)
8621{
8622 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8623 rb_size_pool_t *size_pool = &size_pools[i];
8624 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8625
8626 if (heap->total_pages > 0 &&
8627 !gc_compact_heap_cursors_met_p(heap)) {
8628 return false;
8629 }
8630 }
8631
8632 return true;
8633}
8634
8635static void
8636gc_sweep_compact(rb_objspace_t *objspace)
8637{
8638 gc_compact_start(objspace);
8639#if RGENGC_CHECK_MODE >= 2
8640 gc_verify_internal_consistency(objspace);
8641#endif
8642
8643 while (!gc_compact_all_compacted_p(objspace)) {
8644 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8645 rb_size_pool_t *size_pool = &size_pools[i];
8646 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8647
8648 if (gc_compact_heap_cursors_met_p(heap)) {
8649 continue;
8650 }
8651
8652 struct heap_page *start_page = heap->compact_cursor;
8653
8654 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8655 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8656
8657 continue;
8658 }
8659
8660 // If we get here, we've finished moving all objects on the compact_cursor page
8661 // So we can lock it and move the cursor on to the next one.
8662 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8663 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8664 }
8665 }
8666
8667 gc_compact_finish(objspace);
8668
8669#if RGENGC_CHECK_MODE >= 2
8670 gc_verify_internal_consistency(objspace);
8671#endif
8672}
8673
8674static void
8675gc_marks_rest(rb_objspace_t *objspace)
8676{
8677 gc_report(1, objspace, "gc_marks_rest\n");
8678
8679#if GC_ENABLE_INCREMENTAL_MARK
8680 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8681 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8682 }
8683#endif
8684
8685 if (is_incremental_marking(objspace)) {
8686 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8687 }
8688 else {
8689 gc_mark_stacked_objects_all(objspace);
8690 }
8691
8692 gc_marks_finish(objspace);
8693
8694 /* move to sweep */
8695 gc_sweep(objspace);
8696}
8697
8698static void
8699gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8700{
8701 GC_ASSERT(dont_gc_val() == FALSE);
8702#if GC_ENABLE_INCREMENTAL_MARK
8703
8704 unsigned int lock_lev;
8705 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8706
8707 if (heap->free_pages) {
8708 gc_report(2, objspace, "gc_marks_continue: has pooled pages");
8709 gc_marks_step(objspace, objspace->rincgc.step_slots);
8710 }
8711 else {
8712 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
8713 mark_stack_size(&objspace->mark_stack));
8714 gc_marks_rest(objspace);
8715 }
8716
8717 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8718#endif
8719}
8720
8721static void
8722gc_marks(rb_objspace_t *objspace, int full_mark)
8723{
8724 gc_prof_mark_timer_start(objspace);
8725
8726 /* setup marking */
8727
8728 gc_marks_start(objspace, full_mark);
8729 if (!is_incremental_marking(objspace)) {
8730 gc_marks_rest(objspace);
8731 }
8732
8733#if RGENGC_PROFILE > 0
8734 if (gc_prof_record(objspace)) {
8735 gc_profile_record *record = gc_prof_record(objspace);
8736 record->old_objects = objspace->rgengc.old_objects;
8737 }
8738#endif
8739 gc_prof_mark_timer_stop(objspace);
8740}
8741
8742/* RGENGC */
8743
8744static void
8745gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
8746{
8747 if (level <= RGENGC_DEBUG) {
8748 char buf[1024];
8749 FILE *out = stderr;
8750 va_list args;
8751 const char *status = " ";
8752
8753 if (during_gc) {
8754 status = is_full_marking(objspace) ? "+" : "-";
8755 }
8756 else {
8757 if (is_lazy_sweeping(objspace)) {
8758 status = "S";
8759 }
8760 if (is_incremental_marking(objspace)) {
8761 status = "M";
8762 }
8763 }
8764
8765 va_start(args, fmt);
8766 vsnprintf(buf, 1024, fmt, args);
8767 va_end(args);
8768
8769 fprintf(out, "%s|", status);
8770 fputs(buf, out);
8771 }
8772}
8773
8774/* bit operations */
8775
8776static int
8777rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
8778{
8779 return RVALUE_REMEMBERED(obj);
8780}
8781
8782static int
8783rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
8784{
8785 struct heap_page *page = GET_HEAP_PAGE(obj);
8786 bits_t *bits = &page->marking_bits[0];
8787
8788 GC_ASSERT(!is_incremental_marking(objspace));
8789
8790 if (MARKED_IN_BITMAP(bits, obj)) {
8791 return FALSE;
8792 }
8793 else {
8794 page->flags.has_remembered_objects = TRUE;
8795 MARK_IN_BITMAP(bits, obj);
8796 return TRUE;
8797 }
8798}
8799
8800/* wb, etc */
8801
8802/* return FALSE if already remembered */
8803static int
8804rgengc_remember(rb_objspace_t *objspace, VALUE obj)
8805{
8806 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
8807 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
8808
8809 check_rvalue_consistency(obj);
8810
8811 if (RGENGC_CHECK_MODE) {
8812 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
8813 }
8814
8815#if RGENGC_PROFILE > 0
8816 if (!rgengc_remembered(objspace, obj)) {
8817 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8818 objspace->profile.total_remembered_normal_object_count++;
8819#if RGENGC_PROFILE >= 2
8820 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
8821#endif
8822 }
8823 }
8824#endif /* RGENGC_PROFILE > 0 */
8825
8826 return rgengc_remembersetbits_set(objspace, obj);
8827}
8828
8829static int
8830rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
8831{
8832 int result = rgengc_remembersetbits_get(objspace, obj);
8833 check_rvalue_consistency(obj);
8834 return result;
8835}
8836
8837static int
8838rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
8839{
8840 gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
8841 return rgengc_remembered_sweep(objspace, obj);
8842}
8843
8844#ifndef PROFILE_REMEMBERSET_MARK
8845#define PROFILE_REMEMBERSET_MARK 0
8846#endif
8847
8848static inline void
8849rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8850{
8851 if (bitset) {
8852 do {
8853 if (bitset & 1) {
8854 VALUE obj = (VALUE)p;
8855 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8856 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8857 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8858
8859 gc_mark_children(objspace, obj);
8860 }
8861 p += BASE_SLOT_SIZE;
8862 bitset >>= 1;
8863 } while (bitset);
8864 }
8865}
8866
8867static void
8868rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
8869{
8870 size_t j;
8871 struct heap_page *page = 0;
8872#if PROFILE_REMEMBERSET_MARK
8873 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8874#endif
8875 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
8876
8877 ccan_list_for_each(&heap->pages, page, page_node) {
8878 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8879 uintptr_t p = page->start;
8880 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8881 bits_t *marking_bits = page->marking_bits;
8882 bits_t *uncollectible_bits = page->uncollectible_bits;
8883 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8884#if PROFILE_REMEMBERSET_MARK
8885 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8886 else if (page->flags.has_remembered_objects) has_old++;
8887 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8888#endif
8889 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8890 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8891 marking_bits[j] = 0;
8892 }
8893 page->flags.has_remembered_objects = FALSE;
8894
8895 bitset = bits[0];
8896 bitset >>= NUM_IN_PAGE(p);
8897 rgengc_rememberset_mark_plane(objspace, p, bitset);
8898 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8899
8900 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8901 bitset = bits[j];
8902 rgengc_rememberset_mark_plane(objspace, p, bitset);
8903 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8904 }
8905 }
8906#if PROFILE_REMEMBERSET_MARK
8907 else {
8908 skip++;
8909 }
8910#endif
8911 }
8912
8913#if PROFILE_REMEMBERSET_MARK
8914 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8915#endif
8916 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
8917}
8918
8919static void
8920rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
8921{
8922 struct heap_page *page = 0;
8923
8924 ccan_list_for_each(&heap->pages, page, page_node) {
8925 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8926 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8927 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8928 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8929 page->flags.has_uncollectible_shady_objects = FALSE;
8930 page->flags.has_remembered_objects = FALSE;
8931 }
8932}
8933
8934/* RGENGC: APIs */
8935
8936NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
8937
8938static void
8939gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
8940{
8941 if (RGENGC_CHECK_MODE) {
8942 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8943 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
8944 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8945 }
8946
8947#if 1
8948 /* mark `a' and remember (default behavior) */
8949 if (!rgengc_remembered(objspace, a)) {
8950 RB_VM_LOCK_ENTER_NO_BARRIER();
8951 {
8952 rgengc_remember(objspace, a);
8953 }
8954 RB_VM_LOCK_LEAVE_NO_BARRIER();
8955 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8956 }
8957#else
8958 /* mark `b' and remember */
8959 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8960 if (RVALUE_WB_UNPROTECTED(b)) {
8961 gc_remember_unprotected(objspace, b);
8962 }
8963 else {
8964 RVALUE_AGE_SET_OLD(objspace, b);
8965 rgengc_remember(objspace, b);
8966 }
8967
8968 gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8969#endif
8970
8971 check_rvalue_consistency(a);
8972 check_rvalue_consistency(b);
8973}
8974
8975#if GC_ENABLE_INCREMENTAL_MARK
8976static void
8977gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
8978{
8979 gc_mark_set_parent(objspace, parent);
8980 rgengc_check_relation(objspace, obj);
8981 if (gc_mark_set(objspace, obj) == FALSE) return;
8982 gc_aging(objspace, obj);
8983 gc_grey(objspace, obj);
8984}
8985
8986NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
8987
8988static void
8989gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
8990{
8991 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
8992
8993 if (RVALUE_BLACK_P(a)) {
8994 if (RVALUE_WHITE_P(b)) {
8995 if (!RVALUE_WB_UNPROTECTED(a)) {
8996 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
8997 gc_mark_from(objspace, b, a);
8998 }
8999 }
9000 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
9001 if (!RVALUE_WB_UNPROTECTED(b)) {
9002 gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
9003 RVALUE_AGE_SET_OLD(objspace, b);
9004
9005 if (RVALUE_BLACK_P(b)) {
9006 gc_grey(objspace, b);
9007 }
9008 }
9009 else {
9010 gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
9011 gc_remember_unprotected(objspace, b);
9012 }
9013 }
9014
9015 if (UNLIKELY(objspace->flags.during_compacting)) {
9016 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9017 }
9018 }
9019}
9020#else
9021#define gc_writebarrier_incremental(a, b, objspace)
9022#endif
9023
9024void
9025rb_gc_writebarrier(VALUE a, VALUE b)
9026{
9027 rb_objspace_t *objspace = &rb_objspace;
9028
9029 if (RGENGC_CHECK_MODE) {
9030 if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
9031 if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
9032 }
9033
9034 retry:
9035 if (!is_incremental_marking(objspace)) {
9036 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9037 // do nothing
9038 }
9039 else {
9040 gc_writebarrier_generational(a, b, objspace);
9041 }
9042 }
9043 else {
9044 bool retry = false;
9045 /* slow path */
9046 RB_VM_LOCK_ENTER_NO_BARRIER();
9047 {
9048 if (is_incremental_marking(objspace)) {
9049 gc_writebarrier_incremental(a, b, objspace);
9050 }
9051 else {
9052 retry = true;
9053 }
9054 }
9055 RB_VM_LOCK_LEAVE_NO_BARRIER();
9056
9057 if (retry) goto retry;
9058 }
9059 return;
9060}
9061
9062void
9063rb_gc_writebarrier_unprotect(VALUE obj)
9064{
9065 if (RVALUE_WB_UNPROTECTED(obj)) {
9066 return;
9067 }
9068 else {
9069 rb_objspace_t *objspace = &rb_objspace;
9070
9071 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9072 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
9073
9074 RB_VM_LOCK_ENTER_NO_BARRIER();
9075 {
9076 if (RVALUE_OLD_P(obj)) {
9077 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9078 RVALUE_DEMOTE(objspace, obj);
9079 gc_mark_set(objspace, obj);
9080 gc_remember_unprotected(objspace, obj);
9081
9082#if RGENGC_PROFILE
9083 objspace->profile.total_shade_operation_count++;
9084#if RGENGC_PROFILE >= 2
9085 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
9086#endif /* RGENGC_PROFILE >= 2 */
9087#endif /* RGENGC_PROFILE */
9088 }
9089 else {
9090 RVALUE_AGE_RESET(obj);
9091 }
9092
9093 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9094 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9095 }
9096 RB_VM_LOCK_LEAVE_NO_BARRIER();
9097 }
9098}
9099
9100/*
9101 * remember `obj' if needed.
9102 */
9103MJIT_FUNC_EXPORTED void
9104rb_gc_writebarrier_remember(VALUE obj)
9105{
9106 rb_objspace_t *objspace = &rb_objspace;
9107
9108 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9109
9110 if (is_incremental_marking(objspace)) {
9111 if (RVALUE_BLACK_P(obj)) {
9112 gc_grey(objspace, obj);
9113 }
9114 }
9115 else {
9116 if (RVALUE_OLD_P(obj)) {
9117 rgengc_remember(objspace, obj);
9118 }
9119 }
9120}
9121
9122static st_table *rgengc_unprotect_logging_table;
9123
9124static int
9125rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
9126{
9127 fprintf(stderr, "%s\t%"PRIuVALUE"\n", (char *)key, (VALUE)val);
9128 return ST_CONTINUE;
9129}
9130
9131static void
9132rgengc_unprotect_logging_exit_func(void)
9133{
9134 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
9135}
9136
9137void
9138rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
9139{
9140 VALUE obj = (VALUE)objptr;
9141
9142 if (rgengc_unprotect_logging_table == 0) {
9143 rgengc_unprotect_logging_table = st_init_strtable();
9144 atexit(rgengc_unprotect_logging_exit_func);
9145 }
9146
9147 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
9148 char buff[0x100];
9149 st_data_t cnt = 1;
9150 char *ptr = buff;
9151
9152 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
9153
9154 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
9155 cnt++;
9156 }
9157 else {
9158 ptr = (strdup)(buff);
9159 if (!ptr) rb_memerror();
9160 }
9161 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
9162 }
9163}
9164
9165void
9166rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
9167{
9168 rb_objspace_t *objspace = &rb_objspace;
9169
9170 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9171 if (!RVALUE_OLD_P(dest)) {
9172 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9173 RVALUE_AGE_RESET_RAW(dest);
9174 }
9175 else {
9176 RVALUE_DEMOTE(objspace, dest);
9177 }
9178 }
9179
9180 check_rvalue_consistency(dest);
9181}
9182
9183/* RGENGC analysis information */
9184
9185VALUE
9186rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
9187{
9188 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9189}
9190
9191VALUE
9192rb_obj_rgengc_promoted_p(VALUE obj)
9193{
9194 return RBOOL(OBJ_PROMOTED(obj));
9195}
9196
9197size_t
9198rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
9199{
9200 size_t n = 0;
9201 static ID ID_marked;
9202 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9203
9204 if (!ID_marked) {
9205#define I(s) ID_##s = rb_intern(#s);
9206 I(marked);
9207 I(wb_protected);
9208 I(old);
9209 I(marking);
9210 I(uncollectible);
9211 I(pinned);
9212#undef I
9213 }
9214
9215 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9216 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9217 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9218 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9219 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9220 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9221 return n;
9222}
9223
9224/* GC */
9225
9226void
9227rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
9228{
9229#if GC_ENABLE_INCREMENTAL_MARK
9230 newobj_cache->incremental_mark_step_allocated_slots = 0;
9231#endif
9232
9233 for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9234 rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
9235
9236 struct heap_page *page = cache->using_page;
9237 RVALUE *freelist = cache->freelist;
9238 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
9239
9240 heap_page_freelist_append(page, freelist);
9241
9242 cache->using_page = NULL;
9243 cache->freelist = NULL;
9244 }
9245}
9246
9247void
9248rb_gc_force_recycle(VALUE obj)
9249{
9250 /* no-op */
9251}
9252
9253#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9254#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9255#endif
9256
9257void
9258rb_gc_register_mark_object(VALUE obj)
9259{
9260 if (!is_pointer_to_heap(&rb_objspace, (void *)obj))
9261 return;
9262
9263 RB_VM_LOCK_ENTER();
9264 {
9265 VALUE ary_ary = GET_VM()->mark_object_ary;
9266 VALUE ary = rb_ary_last(0, 0, ary_ary);
9267
9268 if (NIL_P(ary) || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
9269 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9270 rb_ary_push(ary_ary, ary);
9271 }
9272
9273 rb_ary_push(ary, obj);
9274 }
9275 RB_VM_LOCK_LEAVE();
9276}
9277
9278void
9279rb_gc_register_address(VALUE *addr)
9280{
9281 rb_objspace_t *objspace = &rb_objspace;
9282 struct gc_list *tmp;
9283
9284 tmp = ALLOC(struct gc_list);
9285 tmp->next = global_list;
9286 tmp->varptr = addr;
9287 global_list = tmp;
9288}
9289
9290void
9291rb_gc_unregister_address(VALUE *addr)
9292{
9293 rb_objspace_t *objspace = &rb_objspace;
9294 struct gc_list *tmp = global_list;
9295
9296 if (tmp->varptr == addr) {
9297 global_list = tmp->next;
9298 xfree(tmp);
9299 return;
9300 }
9301 while (tmp->next) {
9302 if (tmp->next->varptr == addr) {
9303 struct gc_list *t = tmp->next;
9304
9305 tmp->next = tmp->next->next;
9306 xfree(t);
9307 break;
9308 }
9309 tmp = tmp->next;
9310 }
9311}
9312
9313void
9315{
9316 rb_gc_register_address(var);
9317}
9318
9319#define GC_NOTIFY 0
9320
9321enum {
9322 gc_stress_no_major,
9323 gc_stress_no_immediate_sweep,
9324 gc_stress_full_mark_after_malloc,
9325 gc_stress_max
9326};
9327
9328#define gc_stress_full_mark_after_malloc_p() \
9329 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9330
9331static void
9332heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
9333{
9334 if (!heap->free_pages) {
9335 if (!heap_increment(objspace, size_pool, heap)) {
9336 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9337 heap_increment(objspace, size_pool, heap);
9338 }
9339 }
9340}
9341
9342static int
9343ready_to_gc(rb_objspace_t *objspace)
9344{
9345 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9346 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
9347 rb_size_pool_t *size_pool = &size_pools[i];
9348 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9349 }
9350 return FALSE;
9351 }
9352 else {
9353 return TRUE;
9354 }
9355}
9356
9357static void
9358gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
9359{
9360 gc_prof_set_malloc_info(objspace);
9361 {
9362 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9363 size_t old_limit = malloc_limit;
9364
9365 if (inc > malloc_limit) {
9366 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9367 if (malloc_limit > gc_params.malloc_limit_max) {
9368 malloc_limit = gc_params.malloc_limit_max;
9369 }
9370 }
9371 else {
9372 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
9373 if (malloc_limit < gc_params.malloc_limit_min) {
9374 malloc_limit = gc_params.malloc_limit_min;
9375 }
9376 }
9377
9378 if (0) {
9379 if (old_limit != malloc_limit) {
9380 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
9381 rb_gc_count(), old_limit, malloc_limit);
9382 }
9383 else {
9384 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
9385 rb_gc_count(), malloc_limit);
9386 }
9387 }
9388 }
9389
9390 /* reset oldmalloc info */
9391#if RGENGC_ESTIMATE_OLDMALLOC
9392 if (!full_mark) {
9393 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9394 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9395 objspace->rgengc.oldmalloc_increase_limit =
9396 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9397
9398 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9399 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9400 }
9401 }
9402
9403 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
9404 rb_gc_count(),
9405 objspace->rgengc.need_major_gc,
9406 objspace->rgengc.oldmalloc_increase,
9407 objspace->rgengc.oldmalloc_increase_limit,
9408 gc_params.oldmalloc_limit_max);
9409 }
9410 else {
9411 /* major GC */
9412 objspace->rgengc.oldmalloc_increase = 0;
9413
9414 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9415 objspace->rgengc.oldmalloc_increase_limit =
9416 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9417 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9418 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9419 }
9420 }
9421 }
9422#endif
9423}
9424
9425static int
9426garbage_collect(rb_objspace_t *objspace, unsigned int reason)
9427{
9428 int ret;
9429
9430 RB_VM_LOCK_ENTER();
9431 {
9432#if GC_PROFILE_MORE_DETAIL
9433 objspace->profile.prepare_time = getrusage_time();
9434#endif
9435
9436 gc_rest(objspace);
9437
9438#if GC_PROFILE_MORE_DETAIL
9439 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9440#endif
9441
9442 ret = gc_start(objspace, reason);
9443 }
9444 RB_VM_LOCK_LEAVE();
9445
9446 return ret;
9447}
9448
9449static int
9450gc_start(rb_objspace_t *objspace, unsigned int reason)
9451{
9452 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9453#if GC_ENABLE_INCREMENTAL_MARK
9454 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
9455#endif
9456
9457 /* reason may be clobbered, later, so keep set immediate_sweep here */
9458 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9459
9460 /* Explicitly enable compaction (GC.compact) */
9461 if (do_full_mark && ruby_enable_autocompact) {
9462 objspace->flags.during_compacting = TRUE;
9463 }
9464 else {
9465 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9466 }
9467
9468 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
9469 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
9470
9471 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9472 GC_ASSERT(!is_lazy_sweeping(objspace));
9473 GC_ASSERT(!is_incremental_marking(objspace));
9474
9475 unsigned int lock_lev;
9476 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9477
9478#if RGENGC_CHECK_MODE >= 2
9479 gc_verify_internal_consistency(objspace);
9480#endif
9481
9482 if (ruby_gc_stressful) {
9483 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
9484
9485 if ((flag & (1<<gc_stress_no_major)) == 0) {
9486 do_full_mark = TRUE;
9487 }
9488
9489 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9490 }
9491 else {
9492 if (objspace->rgengc.need_major_gc) {
9493 reason |= objspace->rgengc.need_major_gc;
9494 do_full_mark = TRUE;
9495 }
9496 else if (RGENGC_FORCE_MAJOR_GC) {
9497 reason = GPR_FLAG_MAJOR_BY_FORCE;
9498 do_full_mark = TRUE;
9499 }
9500
9501 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9502 }
9503
9504 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9505 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
9506 }
9507
9508#if GC_ENABLE_INCREMENTAL_MARK
9509 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
9510 objspace->flags.during_incremental_marking = FALSE;
9511 }
9512 else {
9513 objspace->flags.during_incremental_marking = do_full_mark;
9514 }
9515#endif
9516
9517 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9518 objspace->flags.immediate_sweep = TRUE;
9519 }
9520
9521 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9522
9523 gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
9524 reason,
9525 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9526
9527#if USE_DEBUG_COUNTER
9528 RB_DEBUG_COUNTER_INC(gc_count);
9529
9530 if (reason & GPR_FLAG_MAJOR_MASK) {
9531 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9532 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9533 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9534 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9535#if RGENGC_ESTIMATE_OLDMALLOC
9536 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9537#endif
9538 }
9539 else {
9540 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9541 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9542 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9543 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9544 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9545 }
9546#endif
9547
9548 objspace->profile.count++;
9549 objspace->profile.latest_gc_info = reason;
9550 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
9551 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9552 gc_prof_setup_new_record(objspace, reason);
9553 gc_reset_malloc_info(objspace, do_full_mark);
9554 rb_transient_heap_start_marking(do_full_mark);
9555
9556 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
9557 GC_ASSERT(during_gc);
9558
9559 gc_prof_timer_start(objspace);
9560 {
9561 gc_marks(objspace, do_full_mark);
9562 }
9563 gc_prof_timer_stop(objspace);
9564
9565 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9566 return TRUE;
9567}
9568
9569static void
9570gc_rest(rb_objspace_t *objspace)
9571{
9572 int marking = is_incremental_marking(objspace);
9573 int sweeping = is_lazy_sweeping(objspace);
9574
9575 if (marking || sweeping) {
9576 unsigned int lock_lev;
9577 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9578
9579 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9580
9581 if (is_incremental_marking(objspace)) {
9582 gc_marks_rest(objspace);
9583 }
9584 if (is_lazy_sweeping(objspace)) {
9585 gc_sweep_rest(objspace);
9586 }
9587 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9588 }
9589}
9590
9592 rb_objspace_t *objspace;
9593 unsigned int reason;
9594};
9595
9596static void
9597gc_current_status_fill(rb_objspace_t *objspace, char *buff)
9598{
9599 int i = 0;
9600 if (is_marking(objspace)) {
9601 buff[i++] = 'M';
9602 if (is_full_marking(objspace)) buff[i++] = 'F';
9603#if GC_ENABLE_INCREMENTAL_MARK
9604 if (is_incremental_marking(objspace)) buff[i++] = 'I';
9605#endif
9606 }
9607 else if (is_sweeping(objspace)) {
9608 buff[i++] = 'S';
9609 if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
9610 }
9611 else {
9612 buff[i++] = 'N';
9613 }
9614 buff[i] = '\0';
9615}
9616
9617static const char *
9618gc_current_status(rb_objspace_t *objspace)
9619{
9620 static char buff[0x10];
9621 gc_current_status_fill(objspace, buff);
9622 return buff;
9623}
9624
9625#if PRINT_ENTER_EXIT_TICK
9626
9627static tick_t last_exit_tick;
9628static tick_t enter_tick;
9629static int enter_count = 0;
9630static char last_gc_status[0x10];
9631
9632static inline void
9633gc_record(rb_objspace_t *objspace, int direction, const char *event)
9634{
9635 if (direction == 0) { /* enter */
9636 enter_count++;
9637 enter_tick = tick();
9638 gc_current_status_fill(objspace, last_gc_status);
9639 }
9640 else { /* exit */
9641 tick_t exit_tick = tick();
9642 char current_gc_status[0x10];
9643 gc_current_status_fill(objspace, current_gc_status);
9644#if 1
9645 /* [last mutator time] [gc time] [event] */
9646 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9647 enter_tick - last_exit_tick,
9648 exit_tick - enter_tick,
9649 event,
9650 last_gc_status, current_gc_status,
9651 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9652 last_exit_tick = exit_tick;
9653#else
9654 /* [enter_tick] [gc time] [event] */
9655 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9656 enter_tick,
9657 exit_tick - enter_tick,
9658 event,
9659 last_gc_status, current_gc_status,
9660 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9661#endif
9662 }
9663}
9664#else /* PRINT_ENTER_EXIT_TICK */
9665static inline void
9666gc_record(rb_objspace_t *objspace, int direction, const char *event)
9667{
9668 /* null */
9669}
9670#endif /* PRINT_ENTER_EXIT_TICK */
9671
9672static const char *
9673gc_enter_event_cstr(enum gc_enter_event event)
9674{
9675 switch (event) {
9676 case gc_enter_event_start: return "start";
9677 case gc_enter_event_mark_continue: return "mark_continue";
9678 case gc_enter_event_sweep_continue: return "sweep_continue";
9679 case gc_enter_event_rest: return "rest";
9680 case gc_enter_event_finalizer: return "finalizer";
9681 case gc_enter_event_rb_memerror: return "rb_memerror";
9682 }
9683 return NULL;
9684}
9685
9686static void
9687gc_enter_count(enum gc_enter_event event)
9688{
9689 switch (event) {
9690 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
9691 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue); break;
9692 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue); break;
9693 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
9694 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
9695 case gc_enter_event_rb_memerror: /* nothing */ break;
9696 }
9697}
9698
9699#ifndef MEASURE_GC
9700#define MEASURE_GC (objspace->flags.measure_gc)
9701#endif
9702
9703static bool
9704gc_enter_event_measure_p(rb_objspace_t *objspace, enum gc_enter_event event)
9705{
9706 if (!MEASURE_GC) return false;
9707
9708 switch (event) {
9709 case gc_enter_event_start:
9710 case gc_enter_event_mark_continue:
9711 case gc_enter_event_sweep_continue:
9712 case gc_enter_event_rest:
9713 return true;
9714
9715 default:
9716 // case gc_enter_event_finalizer:
9717 // case gc_enter_event_rb_memerror:
9718 return false;
9719 }
9720}
9721
9722static bool current_process_time(struct timespec *ts);
9723
9724static void
9725gc_enter_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9726{
9727 if (gc_enter_event_measure_p(objspace, event)) {
9728 if (!current_process_time(&objspace->profile.start_time)) {
9729 objspace->profile.start_time.tv_sec = 0;
9730 objspace->profile.start_time.tv_nsec = 0;
9731 }
9732 }
9733}
9734
9735static void
9736gc_exit_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9737{
9738 if (gc_enter_event_measure_p(objspace, event)) {
9739 struct timespec end_time;
9740
9741 if ((objspace->profile.start_time.tv_sec > 0 ||
9742 objspace->profile.start_time.tv_nsec > 0) &&
9743 current_process_time(&end_time)) {
9744
9745 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9746 return; // ignore
9747 }
9748 else {
9749 uint64_t ns =
9750 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9751 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9752 objspace->profile.total_time_ns += ns;
9753 }
9754 }
9755 }
9756}
9757
9758static inline void
9759gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9760{
9761 RB_VM_LOCK_ENTER_LEV(lock_lev);
9762
9763 gc_enter_clock(objspace, event);
9764
9765 switch (event) {
9766 case gc_enter_event_rest:
9767 if (!is_marking(objspace)) break;
9768 // fall through
9769 case gc_enter_event_start:
9770 case gc_enter_event_mark_continue:
9771 // stop other ractors
9772 rb_vm_barrier();
9773 break;
9774 default:
9775 break;
9776 }
9777
9778 gc_enter_count(event);
9779 if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
9780 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9781
9782 during_gc = TRUE;
9783 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9784 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9785 gc_record(objspace, 0, gc_enter_event_cstr(event));
9786 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
9787}
9788
9789static inline void
9790gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9791{
9792 GC_ASSERT(during_gc != 0);
9793
9794 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
9795 gc_record(objspace, 1, gc_enter_event_cstr(event));
9796 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9797 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9798 during_gc = FALSE;
9799
9800 gc_exit_clock(objspace, event);
9801 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9802
9803#if RGENGC_CHECK_MODE >= 2
9804 if (event == gc_enter_event_sweep_continue && gc_mode(objspace) == gc_mode_none) {
9805 GC_ASSERT(!during_gc);
9806 // sweep finished
9807 gc_verify_internal_consistency(objspace);
9808 }
9809#endif
9810}
9811
9812static void *
9813gc_with_gvl(void *ptr)
9814{
9815 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
9816 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
9817}
9818
9819static int
9820garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
9821{
9822 if (dont_gc_val()) return TRUE;
9823 if (ruby_thread_has_gvl_p()) {
9824 return garbage_collect(objspace, reason);
9825 }
9826 else {
9827 if (ruby_native_thread_p()) {
9828 struct objspace_and_reason oar;
9829 oar.objspace = objspace;
9830 oar.reason = reason;
9831 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
9832 }
9833 else {
9834 /* no ruby thread */
9835 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9836 exit(EXIT_FAILURE);
9837 }
9838 }
9839}
9840
9841static VALUE
9842gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9843{
9844 rb_objspace_t *objspace = &rb_objspace;
9845 unsigned int reason = (GPR_FLAG_FULL_MARK |
9846 GPR_FLAG_IMMEDIATE_MARK |
9847 GPR_FLAG_IMMEDIATE_SWEEP |
9848 GPR_FLAG_METHOD);
9849
9850 /* For now, compact implies full mark / sweep, so ignore other flags */
9851 if (RTEST(compact)) {
9852 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9853
9854 reason |= GPR_FLAG_COMPACT;
9855 }
9856 else {
9857 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9858 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9859 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9860 }
9861
9862 garbage_collect(objspace, reason);
9863 gc_finalize_deferred(objspace);
9864
9865 return Qnil;
9866}
9867
9868static int
9869gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
9870{
9871 GC_ASSERT(!SPECIAL_CONST_P(obj));
9872
9873 switch (BUILTIN_TYPE(obj)) {
9874 case T_NONE:
9875 case T_NIL:
9876 case T_MOVED:
9877 case T_ZOMBIE:
9878 return FALSE;
9879 case T_SYMBOL:
9880 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
9881 return FALSE;
9882 }
9883 /* fall through */
9884 case T_STRING:
9885 case T_OBJECT:
9886 case T_FLOAT:
9887 case T_IMEMO:
9888 case T_ARRAY:
9889 case T_BIGNUM:
9890 case T_ICLASS:
9891 case T_MODULE:
9892 case T_REGEXP:
9893 case T_DATA:
9894 case T_MATCH:
9895 case T_STRUCT:
9896 case T_HASH:
9897 case T_FILE:
9898 case T_COMPLEX:
9899 case T_RATIONAL:
9900 case T_NODE:
9901 case T_CLASS:
9902 if (FL_TEST(obj, FL_FINALIZE)) {
9903 /* The finalizer table is a numtable. It looks up objects by address.
9904 * We can't mark the keys in the finalizer table because that would
9905 * prevent the objects from being collected. This check prevents
9906 * objects that are keys in the finalizer table from being moved
9907 * without directly pinning them. */
9908 if (st_is_member(finalizer_table, obj)) {
9909 return FALSE;
9910 }
9911 }
9912 GC_ASSERT(RVALUE_MARKED(obj));
9913 GC_ASSERT(!RVALUE_PINNED(obj));
9914
9915 return TRUE;
9916
9917 default:
9918 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
9919 break;
9920 }
9921
9922 return FALSE;
9923}
9924
9925/* Used in places that could malloc, which can cause the GC to run. We need to
9926 * temporarily disable the GC to allow the malloc to happen. */
9927#define COULD_MALLOC_REGION_START() \
9928 GC_ASSERT(during_gc); \
9929 VALUE _already_disabled = rb_gc_disable_no_rest(); \
9930 during_gc = false;
9931
9932#define COULD_MALLOC_REGION_END() \
9933 during_gc = true; \
9934 if (_already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9935
9936static VALUE
9937gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size)
9938{
9939 int marked;
9940 int wb_unprotected;
9941 int uncollectible;
9942 int marking;
9943 RVALUE *dest = (RVALUE *)free;
9944 RVALUE *src = (RVALUE *)scan;
9945
9946 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
9947
9948 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
9949 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9950
9951 /* Save off bits for current object. */
9952 marked = rb_objspace_marked_object_p((VALUE)src);
9953 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
9954 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
9955 marking = RVALUE_MARKING((VALUE)src);
9956
9957 /* Clear bits for eventual T_MOVED */
9958 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
9959 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
9960 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
9961 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)src), (VALUE)src);
9962
9963 if (FL_TEST((VALUE)src, FL_EXIVAR)) {
9964 /* Resizing the st table could cause a malloc */
9965 COULD_MALLOC_REGION_START();
9966 {
9967 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
9968 }
9969 COULD_MALLOC_REGION_END();
9970 }
9971
9972 st_data_t srcid = (st_data_t)src, id;
9973
9974 /* If the source object's object_id has been seen, we need to update
9975 * the object to object id mapping. */
9976 if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
9977 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
9978 /* Resizing the st table could cause a malloc */
9979 COULD_MALLOC_REGION_START();
9980 {
9981 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9982 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
9983 }
9984 COULD_MALLOC_REGION_END();
9985 }
9986
9987 /* Move the object */
9988 memcpy(dest, src, MIN(src_slot_size, slot_size));
9989
9990 if (RVALUE_OVERHEAD > 0) {
9991 void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
9992 void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
9993
9994 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
9995 }
9996
9997 memset(src, 0, src_slot_size);
9998
9999 /* Set bits for object in new location */
10000 if (marking) {
10001 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
10002 }
10003 else {
10004 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
10005 }
10006
10007 if (marked) {
10008 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
10009 }
10010 else {
10011 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
10012 }
10013
10014 if (wb_unprotected) {
10015 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10016 }
10017 else {
10018 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10019 }
10020
10021 if (uncollectible) {
10022 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10023 }
10024 else {
10025 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10026 }
10027
10028 /* Assign forwarding address */
10029 src->as.moved.flags = T_MOVED;
10030 src->as.moved.dummy = Qundef;
10031 src->as.moved.destination = (VALUE)dest;
10032 GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
10033
10034 return (VALUE)src;
10035}
10036
10037#if GC_CAN_COMPILE_COMPACTION
10038static int
10039compare_free_slots(const void *left, const void *right, void *dummy)
10040{
10041 struct heap_page *left_page;
10042 struct heap_page *right_page;
10043
10044 left_page = *(struct heap_page * const *)left;
10045 right_page = *(struct heap_page * const *)right;
10046
10047 return left_page->free_slots - right_page->free_slots;
10048}
10049
10050static void
10051gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
10052{
10053 for (int j = 0; j < SIZE_POOL_COUNT; j++) {
10054 rb_size_pool_t *size_pool = &size_pools[j];
10055
10056 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10057 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
10058 struct heap_page *page = 0, **page_list = malloc(size);
10059 size_t i = 0;
10060
10061 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10062 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10063 page_list[i++] = page;
10064 GC_ASSERT(page);
10065 }
10066
10067 GC_ASSERT((size_t)i == total_pages);
10068
10069 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
10070 * head of the list, so empty pages will end up at the start of the heap */
10071 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
10072
10073 /* Reset the eden heap */
10074 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10075
10076 for (i = 0; i < total_pages; i++) {
10077 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10078 if (page_list[i]->free_slots != 0) {
10079 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10080 }
10081 }
10082
10083 free(page_list);
10084 }
10085}
10086#endif
10087
10088static void
10089gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
10090{
10091 if (ARY_SHARED_P(v)) {
10092#if USE_RVARGC
10093 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
10094#endif
10095
10096 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
10097
10098#if USE_RVARGC
10099 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
10100 // If the root is embedded and its location has changed
10101 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10102 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
10103 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
10104 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
10105 }
10106#endif
10107 }
10108 else {
10109 long len = RARRAY_LEN(v);
10110
10111 if (len > 0) {
10113 for (long i = 0; i < len; i++) {
10114 UPDATE_IF_MOVED(objspace, ptr[i]);
10115 }
10116 }
10117
10118#if USE_RVARGC
10119 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10120 if (rb_ary_embeddable_p(v)) {
10121 rb_ary_make_embedded(v);
10122 }
10123 }
10124#endif
10125 }
10126}
10127
10128static void
10129gc_ref_update_object(rb_objspace_t *objspace, VALUE v)
10130{
10131 VALUE *ptr = ROBJECT_IVPTR(v);
10132
10133 if (rb_shape_obj_too_complex(v)) {
10134 rb_gc_update_tbl_refs(ROBJECT_IV_HASH(v));
10135 return;
10136 }
10137
10138#if USE_RVARGC
10139 size_t slot_size = rb_gc_obj_slot_size(v);
10140 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
10141 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10142 // Object can be re-embedded
10143 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
10144 RB_FL_SET_RAW(v, ROBJECT_EMBED);
10145 if (ROBJ_TRANSIENT_P(v)) {
10146 ROBJ_TRANSIENT_UNSET(v);
10147 }
10148 else {
10149 xfree(ptr);
10150 }
10151 ptr = ROBJECT(v)->as.ary;
10152 }
10153#endif
10154
10155 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10156 UPDATE_IF_MOVED(objspace, ptr[i]);
10157 }
10158}
10159
10160static int
10161hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10162{
10163 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10164
10165 if (gc_object_moved_p(objspace, (VALUE)*key)) {
10166 *key = rb_gc_location((VALUE)*key);
10167 }
10168
10169 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10170 *value = rb_gc_location((VALUE)*value);
10171 }
10172
10173 return ST_CONTINUE;
10174}
10175
10176static int
10177hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
10178{
10179 rb_objspace_t *objspace;
10180
10181 objspace = (rb_objspace_t *)argp;
10182
10183 if (gc_object_moved_p(objspace, (VALUE)key)) {
10184 return ST_REPLACE;
10185 }
10186
10187 if (gc_object_moved_p(objspace, (VALUE)value)) {
10188 return ST_REPLACE;
10189 }
10190 return ST_CONTINUE;
10191}
10192
10193static int
10194hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10195{
10196 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10197
10198 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10199 *value = rb_gc_location((VALUE)*value);
10200 }
10201
10202 return ST_CONTINUE;
10203}
10204
10205static int
10206hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
10207{
10208 rb_objspace_t *objspace;
10209
10210 objspace = (rb_objspace_t *)argp;
10211
10212 if (gc_object_moved_p(objspace, (VALUE)value)) {
10213 return ST_REPLACE;
10214 }
10215 return ST_CONTINUE;
10216}
10217
10218static void
10219gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
10220{
10221 if (!tbl || tbl->num_entries == 0) return;
10222
10223 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10224 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10225 }
10226}
10227
10228static void
10229gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
10230{
10231 if (!tbl || tbl->num_entries == 0) return;
10232
10233 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10234 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10235 }
10236}
10237
10238/* Update MOVED references in an st_table */
10239void
10240rb_gc_update_tbl_refs(st_table *ptr)
10241{
10242 rb_objspace_t *objspace = &rb_objspace;
10243 gc_update_table_refs(objspace, ptr);
10244}
10245
10246static void
10247gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
10248{
10249 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10250}
10251
10252static void
10253gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
10254{
10255 rb_method_definition_t *def = me->def;
10256
10257 UPDATE_IF_MOVED(objspace, me->owner);
10258 UPDATE_IF_MOVED(objspace, me->defined_class);
10259
10260 if (def) {
10261 switch (def->type) {
10262 case VM_METHOD_TYPE_ISEQ:
10263 if (def->body.iseq.iseqptr) {
10264 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
10265 }
10266 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
10267 break;
10268 case VM_METHOD_TYPE_ATTRSET:
10269 case VM_METHOD_TYPE_IVAR:
10270 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10271 break;
10272 case VM_METHOD_TYPE_BMETHOD:
10273 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10274 break;
10275 case VM_METHOD_TYPE_ALIAS:
10276 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
10277 return;
10278 case VM_METHOD_TYPE_REFINED:
10279 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
10280 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
10281 break;
10282 case VM_METHOD_TYPE_CFUNC:
10283 case VM_METHOD_TYPE_ZSUPER:
10284 case VM_METHOD_TYPE_MISSING:
10285 case VM_METHOD_TYPE_OPTIMIZED:
10286 case VM_METHOD_TYPE_UNDEF:
10287 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10288 break;
10289 }
10290 }
10291}
10292
10293static void
10294gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
10295{
10296 long i;
10297
10298 for (i=0; i<n; i++) {
10299 UPDATE_IF_MOVED(objspace, values[i]);
10300 }
10301}
10302
10303static void
10304gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
10305{
10306 switch (imemo_type(obj)) {
10307 case imemo_env:
10308 {
10309 rb_env_t *env = (rb_env_t *)obj;
10310 if (LIKELY(env->ep)) {
10311 // just after newobj() can be NULL here.
10312 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
10313 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10314 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
10315 }
10316 }
10317 break;
10318 case imemo_cref:
10319 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10320 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10321 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10322 break;
10323 case imemo_svar:
10324 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10325 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10326 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10327 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10328 break;
10329 case imemo_throw_data:
10330 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10331 break;
10332 case imemo_ifunc:
10333 break;
10334 case imemo_memo:
10335 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10336 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10337 break;
10338 case imemo_ment:
10339 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10340 break;
10341 case imemo_iseq:
10342 rb_iseq_update_references((rb_iseq_t *)obj);
10343 break;
10344 case imemo_ast:
10345 rb_ast_update_references((rb_ast_t *)obj);
10346 break;
10347 case imemo_callcache:
10348 {
10349 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
10350 if (cc->klass) {
10351 UPDATE_IF_MOVED(objspace, cc->klass);
10352 if (!is_live_object(objspace, cc->klass)) {
10353 *((VALUE *)(&cc->klass)) = (VALUE)0;
10354 }
10355 }
10356
10357 if (cc->cme_) {
10358 TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
10359 if (!is_live_object(objspace, (VALUE)cc->cme_)) {
10360 *((struct rb_callable_method_entry_struct **)(&cc->cme_)) = (struct rb_callable_method_entry_struct *)0;
10361 }
10362 }
10363 }
10364 break;
10365 case imemo_constcache:
10366 {
10368 UPDATE_IF_MOVED(objspace, ice->value);
10369 }
10370 break;
10371 case imemo_parser_strterm:
10372 case imemo_tmpbuf:
10373 case imemo_callinfo:
10374 break;
10375 default:
10376 rb_bug("not reachable %d", imemo_type(obj));
10377 break;
10378 }
10379}
10380
10381static enum rb_id_table_iterator_result
10382check_id_table_move(VALUE value, void *data)
10383{
10384 rb_objspace_t *objspace = (rb_objspace_t *)data;
10385
10386 if (gc_object_moved_p(objspace, (VALUE)value)) {
10387 return ID_TABLE_REPLACE;
10388 }
10389
10390 return ID_TABLE_CONTINUE;
10391}
10392
10393/* Returns the new location of an object, if it moved. Otherwise returns
10394 * the existing location. */
10395VALUE
10396rb_gc_location(VALUE value)
10397{
10398
10399 VALUE destination;
10400
10401 if (!SPECIAL_CONST_P(value)) {
10402 void *poisoned = asan_unpoison_object_temporary(value);
10403
10404 if (BUILTIN_TYPE(value) == T_MOVED) {
10405 destination = (VALUE)RMOVED(value)->destination;
10406 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
10407 }
10408 else {
10409 destination = value;
10410 }
10411
10412 /* Re-poison slot if it's not the one we want */
10413 if (poisoned) {
10414 GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
10415 asan_poison_object(value);
10416 }
10417 }
10418 else {
10419 destination = value;
10420 }
10421
10422 return destination;
10423}
10424
10425static enum rb_id_table_iterator_result
10426update_id_table(VALUE *value, void *data, int existing)
10427{
10428 rb_objspace_t *objspace = (rb_objspace_t *)data;
10429
10430 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10431 *value = rb_gc_location((VALUE)*value);
10432 }
10433
10434 return ID_TABLE_CONTINUE;
10435}
10436
10437static void
10438update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10439{
10440 if (tbl) {
10441 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10442 }
10443}
10444
10445static enum rb_id_table_iterator_result
10446update_cc_tbl_i(VALUE ccs_ptr, void *data)
10447{
10448 rb_objspace_t *objspace = (rb_objspace_t *)data;
10449 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
10450 VM_ASSERT(vm_ccs_p(ccs));
10451
10452 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
10453 ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
10454 }
10455
10456 for (int i=0; i<ccs->len; i++) {
10457 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
10458 ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
10459 }
10460 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
10461 ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
10462 }
10463 }
10464
10465 // do not replace
10466 return ID_TABLE_CONTINUE;
10467}
10468
10469static void
10470update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
10471{
10472 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
10473 if (tbl) {
10474 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10475 }
10476}
10477
10478static enum rb_id_table_iterator_result
10479update_cvc_tbl_i(VALUE cvc_entry, void *data)
10480{
10481 struct rb_cvar_class_tbl_entry *entry;
10482 rb_objspace_t * objspace = (rb_objspace_t *)data;
10483
10484 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10485
10486 if (entry->cref) {
10487 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
10488 }
10489
10490 entry->class_value = rb_gc_location(entry->class_value);
10491
10492 return ID_TABLE_CONTINUE;
10493}
10494
10495static void
10496update_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10497{
10498 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10499 if (tbl) {
10500 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10501 }
10502}
10503
10504static enum rb_id_table_iterator_result
10505mark_cvc_tbl_i(VALUE cvc_entry, void *data)
10506{
10507 struct rb_cvar_class_tbl_entry *entry;
10508
10509 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10510
10511 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
10512 rb_gc_mark((VALUE) entry->cref);
10513
10514 return ID_TABLE_CONTINUE;
10515}
10516
10517static void
10518mark_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10519{
10520 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10521 if (tbl) {
10522 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
10523 }
10524}
10525
10526static enum rb_id_table_iterator_result
10527update_const_table(VALUE value, void *data)
10528{
10529 rb_const_entry_t *ce = (rb_const_entry_t *)value;
10530 rb_objspace_t * objspace = (rb_objspace_t *)data;
10531
10532 if (gc_object_moved_p(objspace, ce->value)) {
10533 ce->value = rb_gc_location(ce->value);
10534 }
10535
10536 if (gc_object_moved_p(objspace, ce->file)) {
10537 ce->file = rb_gc_location(ce->file);
10538 }
10539
10540 return ID_TABLE_CONTINUE;
10541}
10542
10543static void
10544update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10545{
10546 if (!tbl) return;
10547 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10548}
10549
10550static void
10551update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
10552{
10553 while (entry) {
10554 UPDATE_IF_MOVED(objspace, entry->klass);
10555 entry = entry->next;
10556 }
10557}
10558
10559static void
10560update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
10561{
10562 UPDATE_IF_MOVED(objspace, ext->origin_);
10563 UPDATE_IF_MOVED(objspace, ext->includer);
10564 UPDATE_IF_MOVED(objspace, ext->refined_class);
10565 update_subclass_entries(objspace, ext->subclasses);
10566}
10567
10568static void
10569update_superclasses(rb_objspace_t *objspace, VALUE obj)
10570{
10571 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10572 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10573 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10574 }
10575 }
10576}
10577
10578static void
10579gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
10580{
10581 RVALUE *any = RANY(obj);
10582
10583 gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
10584
10585 switch (BUILTIN_TYPE(obj)) {
10586 case T_CLASS:
10587 case T_MODULE:
10588 if (RCLASS_SUPER((VALUE)obj)) {
10589 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10590 }
10591 if (!RCLASS_EXT(obj)) break;
10592 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10593 update_cc_tbl(objspace, obj);
10594 update_cvc_tbl(objspace, obj);
10595 update_superclasses(objspace, obj);
10596
10597 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10598 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10599 }
10600
10601 update_class_ext(objspace, RCLASS_EXT(obj));
10602 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10603 break;
10604
10605 case T_ICLASS:
10606 if (FL_TEST(obj, RICLASS_IS_ORIGIN) &&
10607 !FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
10608 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10609 }
10610 if (RCLASS_SUPER((VALUE)obj)) {
10611 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10612 }
10613 if (!RCLASS_EXT(obj)) break;
10614 update_class_ext(objspace, RCLASS_EXT(obj));
10615 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10616 update_cc_tbl(objspace, obj);
10617 break;
10618
10619 case T_IMEMO:
10620 gc_ref_update_imemo(objspace, obj);
10621 return;
10622
10623 case T_NIL:
10624 case T_FIXNUM:
10625 case T_NODE:
10626 case T_MOVED:
10627 case T_NONE:
10628 /* These can't move */
10629 return;
10630
10631 case T_ARRAY:
10632 gc_ref_update_array(objspace, obj);
10633 break;
10634
10635 case T_HASH:
10636 gc_ref_update_hash(objspace, obj);
10637 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10638 break;
10639
10640 case T_STRING:
10641 {
10642#if USE_RVARGC
10643#endif
10644
10645 if (STR_SHARED_P(obj)) {
10646#if USE_RVARGC
10647 VALUE old_root = any->as.string.as.heap.aux.shared;
10648#endif
10649 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
10650#if USE_RVARGC
10651 VALUE new_root = any->as.string.as.heap.aux.shared;
10652 rb_str_update_shared_ary(obj, old_root, new_root);
10653#endif
10654 }
10655
10656#if USE_RVARGC
10657 /* If, after move the string is not embedded, and can fit in the
10658 * slot it's been placed in, then re-embed it. */
10659 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10660 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10661 rb_str_make_embedded(obj);
10662 }
10663 }
10664#endif
10665
10666 break;
10667 }
10668 case T_DATA:
10669 /* Call the compaction callback, if it exists */
10670 {
10671 void *const ptr = DATA_PTR(obj);
10672 if (ptr) {
10673 if (RTYPEDDATA_P(obj)) {
10674 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
10675 if (compact_func) (*compact_func)(ptr);
10676 }
10677 }
10678 }
10679 break;
10680
10681 case T_OBJECT:
10682 gc_ref_update_object(objspace, obj);
10683 break;
10684
10685 case T_FILE:
10686 if (any->as.file.fptr) {
10687 UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
10688 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
10689 UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
10690 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_asciicompat);
10691 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
10692 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
10693 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
10694 }
10695 break;
10696 case T_REGEXP:
10697 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
10698 break;
10699
10700 case T_SYMBOL:
10701 if (DYNAMIC_SYM_P((VALUE)any)) {
10702 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10703 }
10704 break;
10705
10706 case T_FLOAT:
10707 case T_BIGNUM:
10708 break;
10709
10710 case T_MATCH:
10711 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10712
10713 if (any->as.match.str) {
10714 UPDATE_IF_MOVED(objspace, any->as.match.str);
10715 }
10716 break;
10717
10718 case T_RATIONAL:
10719 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10720 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10721 break;
10722
10723 case T_COMPLEX:
10724 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10725 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10726
10727 break;
10728
10729 case T_STRUCT:
10730 {
10731 long i, len = RSTRUCT_LEN(obj);
10732 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10733
10734 for (i = 0; i < len; i++) {
10735 UPDATE_IF_MOVED(objspace, ptr[i]);
10736 }
10737 }
10738 break;
10739 default:
10740#if GC_DEBUG
10741 rb_gcdebug_print_obj_condition((VALUE)obj);
10742 rb_obj_info_dump(obj);
10743 rb_bug("unreachable");
10744#endif
10745 break;
10746
10747 }
10748
10749 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
10750
10751 gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
10752}
10753
10754static int
10755gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
10756{
10757 VALUE v = (VALUE)vstart;
10758 asan_unlock_freelist(page);
10759 asan_lock_freelist(page);
10760 page->flags.has_uncollectible_shady_objects = FALSE;
10761 page->flags.has_remembered_objects = FALSE;
10762
10763 /* For each object on the page */
10764 for (; v != (VALUE)vend; v += stride) {
10765 void *poisoned = asan_unpoison_object_temporary(v);
10766
10767 switch (BUILTIN_TYPE(v)) {
10768 case T_NONE:
10769 case T_MOVED:
10770 case T_ZOMBIE:
10771 break;
10772 default:
10773 if (RVALUE_WB_UNPROTECTED(v)) {
10774 page->flags.has_uncollectible_shady_objects = TRUE;
10775 }
10776 if (RVALUE_PAGE_MARKING(page, v)) {
10777 page->flags.has_remembered_objects = TRUE;
10778 }
10779 if (page->flags.before_sweep) {
10780 if (RVALUE_MARKED(v)) {
10781 gc_update_object_references(objspace, v);
10782 }
10783 }
10784 else {
10785 gc_update_object_references(objspace, v);
10786 }
10787 }
10788
10789 if (poisoned) {
10790 asan_poison_object(v);
10791 }
10792 }
10793
10794 return 0;
10795}
10796
10797extern rb_symbols_t ruby_global_symbols;
10798#define global_symbols ruby_global_symbols
10799
10800static void
10801gc_update_references(rb_objspace_t *objspace)
10802{
10803 rb_execution_context_t *ec = GET_EC();
10804 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10805
10806 struct heap_page *page = NULL;
10807
10808 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10809 bool should_set_mark_bits = TRUE;
10810 rb_size_pool_t *size_pool = &size_pools[i];
10811 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10812
10813 ccan_list_for_each(&heap->pages, page, page_node) {
10814 uintptr_t start = (uintptr_t)page->start;
10815 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10816
10817 gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
10818 if (page == heap->sweeping_page) {
10819 should_set_mark_bits = FALSE;
10820 }
10821 if (should_set_mark_bits) {
10822 gc_setup_mark_bits(page);
10823 }
10824 }
10825 }
10826 rb_vm_update_references(vm);
10827 rb_transient_heap_update_references();
10828 rb_gc_update_global_tbl();
10829 global_symbols.ids = rb_gc_location(global_symbols.ids);
10830 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10831 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10832 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10833 gc_update_table_refs(objspace, global_symbols.str_sym);
10834 gc_update_table_refs(objspace, finalizer_table);
10835}
10836
10837#if GC_CAN_COMPILE_COMPACTION
10838/*
10839 * call-seq:
10840 * GC.latest_compact_info -> hash
10841 *
10842 * Returns information about object moved in the most recent \GC compaction.
10843 *
10844 * The returned hash has two keys :considered and :moved. The hash for
10845 * :considered lists the number of objects that were considered for movement
10846 * by the compactor, and the :moved hash lists the number of objects that
10847 * were actually moved. Some objects can't be moved (maybe they were pinned)
10848 * so these numbers can be used to calculate compaction efficiency.
10849 */
10850static VALUE
10851gc_compact_stats(VALUE self)
10852{
10853 size_t i;
10854 rb_objspace_t *objspace = &rb_objspace;
10855 VALUE h = rb_hash_new();
10856 VALUE considered = rb_hash_new();
10857 VALUE moved = rb_hash_new();
10858 VALUE moved_up = rb_hash_new();
10859 VALUE moved_down = rb_hash_new();
10860
10861 for (i=0; i<T_MASK; i++) {
10862 if (objspace->rcompactor.considered_count_table[i]) {
10863 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
10864 }
10865
10866 if (objspace->rcompactor.moved_count_table[i]) {
10867 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
10868 }
10869
10870 if (objspace->rcompactor.moved_up_count_table[i]) {
10871 rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
10872 }
10873
10874 if (objspace->rcompactor.moved_down_count_table[i]) {
10875 rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
10876 }
10877 }
10878
10879 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
10880 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
10881 rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
10882 rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
10883
10884 return h;
10885}
10886#else
10887# define gc_compact_stats rb_f_notimplement
10888#endif
10889
10890#if GC_CAN_COMPILE_COMPACTION
10891static void
10892root_obj_check_moved_i(const char *category, VALUE obj, void *data)
10893{
10894 if (gc_object_moved_p(&rb_objspace, obj)) {
10895 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
10896 }
10897}
10898
10899static void
10900reachable_object_check_moved_i(VALUE ref, void *data)
10901{
10902 VALUE parent = (VALUE)data;
10903 if (gc_object_moved_p(&rb_objspace, ref)) {
10904 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
10905 }
10906}
10907
10908static int
10909heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
10910{
10911 VALUE v = (VALUE)vstart;
10912 for (; v != (VALUE)vend; v += stride) {
10913 if (gc_object_moved_p(&rb_objspace, v)) {
10914 /* Moved object still on the heap, something may have a reference. */
10915 }
10916 else {
10917 void *poisoned = asan_unpoison_object_temporary(v);
10918
10919 switch (BUILTIN_TYPE(v)) {
10920 case T_NONE:
10921 case T_ZOMBIE:
10922 break;
10923 default:
10924 if (!rb_objspace_garbage_object_p(v)) {
10925 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
10926 }
10927 }
10928
10929 if (poisoned) {
10930 GC_ASSERT(BUILTIN_TYPE(v) == T_NONE);
10931 asan_poison_object(v);
10932 }
10933 }
10934 }
10935
10936 return 0;
10937}
10938
10939/*
10940 * call-seq:
10941 * GC.compact
10942 *
10943 * This function compacts objects together in Ruby's heap. It eliminates
10944 * unused space (or fragmentation) in the heap by moving objects in to that
10945 * unused space. This function returns a hash which contains statistics about
10946 * which objects were moved. See <tt>GC.latest_gc_info</tt> for details about
10947 * compaction statistics.
10948 *
10949 * This method is implementation specific and not expected to be implemented
10950 * in any implementation besides MRI.
10951 *
10952 * To test whether \GC compaction is supported, use the idiom:
10953 *
10954 * GC.respond_to?(:compact)
10955 */
10956static VALUE
10957gc_compact(VALUE self)
10958{
10959 /* Run GC with compaction enabled */
10960 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
10961
10962 return gc_compact_stats(self);
10963}
10964#else
10965# define gc_compact rb_f_notimplement
10966#endif
10967
10968#if GC_CAN_COMPILE_COMPACTION
10969static VALUE
10970gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE expand_heap, VALUE toward_empty)
10971{
10972 rb_objspace_t *objspace = &rb_objspace;
10973
10974 /* Clear the heap. */
10975 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qfalse);
10976 size_t growth_slots = gc_params.heap_init_slots;
10977
10978 if (RTEST(double_heap)) {
10979 rb_warn("double_heap is deprecated, please use expand_heap instead");
10980 }
10981
10982 RB_VM_LOCK_ENTER();
10983 {
10984 gc_rest(objspace);
10985
10986 /* if both double_heap and expand_heap are set, expand_heap takes precedence */
10987 if (RTEST(double_heap) || RTEST(expand_heap)) {
10988 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10989 rb_size_pool_t *size_pool = &size_pools[i];
10990 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10991
10992 if (RTEST(expand_heap)) {
10993 size_t required_pages = growth_slots / size_pool->slot_size;
10994 heap_add_pages(objspace, size_pool, heap, MAX(required_pages, heap->total_pages));
10995 }
10996 else {
10997 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10998 }
10999 }
11000 }
11001
11002 if (RTEST(toward_empty)) {
11003 gc_sort_heap_by_empty_slots(objspace);
11004 }
11005 }
11006 RB_VM_LOCK_LEAVE();
11007
11008 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
11009
11010 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
11011 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
11012
11013 return gc_compact_stats(self);
11014}
11015#else
11016# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
11017#endif
11018
11019VALUE
11020rb_gc_start(void)
11021{
11022 rb_gc();
11023 return Qnil;
11024}
11025
11026void
11027rb_gc(void)
11028{
11029 rb_objspace_t *objspace = &rb_objspace;
11030 unsigned int reason = GPR_DEFAULT_REASON;
11031 garbage_collect(objspace, reason);
11032}
11033
11034int
11035rb_during_gc(void)
11036{
11037 rb_objspace_t *objspace = &rb_objspace;
11038 return during_gc;
11039}
11040
11041#if RGENGC_PROFILE >= 2
11042
11043static const char *type_name(int type, VALUE obj);
11044
11045static void
11046gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
11047{
11048 VALUE result = rb_hash_new_with_size(T_MASK);
11049 int i;
11050 for (i=0; i<T_MASK; i++) {
11051 const char *type = type_name(i, 0);
11052 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
11053 }
11054 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
11055}
11056#endif
11057
11058size_t
11059rb_gc_count(void)
11060{
11061 return rb_objspace.profile.count;
11062}
11063
11064static VALUE
11065gc_count(rb_execution_context_t *ec, VALUE self)
11066{
11067 return SIZET2NUM(rb_gc_count());
11068}
11069
11070static VALUE
11071gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
11072{
11073 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11074 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11075#if RGENGC_ESTIMATE_OLDMALLOC
11076 static VALUE sym_oldmalloc;
11077#endif
11078 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11079 static VALUE sym_none, sym_marking, sym_sweeping;
11080 VALUE hash = Qnil, key = Qnil;
11081 VALUE major_by, need_major_by;
11082 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11083
11084 if (SYMBOL_P(hash_or_key)) {
11085 key = hash_or_key;
11086 }
11087 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
11088 hash = hash_or_key;
11089 }
11090 else {
11091 rb_raise(rb_eTypeError, "non-hash or symbol given");
11092 }
11093
11094 if (NIL_P(sym_major_by)) {
11095#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11096 S(major_by);
11097 S(gc_by);
11098 S(immediate_sweep);
11099 S(have_finalizer);
11100 S(state);
11101 S(need_major_by);
11102
11103 S(stress);
11104 S(nofree);
11105 S(oldgen);
11106 S(shady);
11107 S(force);
11108#if RGENGC_ESTIMATE_OLDMALLOC
11109 S(oldmalloc);
11110#endif
11111 S(newobj);
11112 S(malloc);
11113 S(method);
11114 S(capi);
11115
11116 S(none);
11117 S(marking);
11118 S(sweeping);
11119#undef S
11120 }
11121
11122#define SET(name, attr) \
11123 if (key == sym_##name) \
11124 return (attr); \
11125 else if (hash != Qnil) \
11126 rb_hash_aset(hash, sym_##name, (attr));
11127
11128 major_by =
11129 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11130 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11131 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11132 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11133#if RGENGC_ESTIMATE_OLDMALLOC
11134 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11135#endif
11136 Qnil;
11137 SET(major_by, major_by);
11138
11139 if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
11140 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11141 need_major_by =
11142 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11143 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11144 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11145 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11146#if RGENGC_ESTIMATE_OLDMALLOC
11147 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11148#endif
11149 Qnil;
11150 SET(need_major_by, need_major_by);
11151 }
11152
11153 SET(gc_by,
11154 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11155 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11156 (flags & GPR_FLAG_METHOD) ? sym_method :
11157 (flags & GPR_FLAG_CAPI) ? sym_capi :
11158 (flags & GPR_FLAG_STRESS) ? sym_stress :
11159 Qnil
11160 );
11161
11162 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11163 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11164
11165 if (orig_flags == 0) {
11166 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11167 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11168 }
11169#undef SET
11170
11171 if (!NIL_P(key)) {/* matched key should return above */
11172 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11173 }
11174
11175 return hash;
11176}
11177
11178VALUE
11179rb_gc_latest_gc_info(VALUE key)
11180{
11181 rb_objspace_t *objspace = &rb_objspace;
11182 return gc_info_decode(objspace, key, 0);
11183}
11184
11185static VALUE
11186gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
11187{
11188 rb_objspace_t *objspace = &rb_objspace;
11189
11190 if (NIL_P(arg)) {
11191 arg = rb_hash_new();
11192 }
11193 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
11194 rb_raise(rb_eTypeError, "non-hash or symbol given");
11195 }
11196
11197 return gc_info_decode(objspace, arg, 0);
11198}
11199
11200enum gc_stat_sym {
11201 gc_stat_sym_count,
11202 gc_stat_sym_time,
11203 gc_stat_sym_heap_allocated_pages,
11204 gc_stat_sym_heap_sorted_length,
11205 gc_stat_sym_heap_allocatable_pages,
11206 gc_stat_sym_heap_available_slots,
11207 gc_stat_sym_heap_live_slots,
11208 gc_stat_sym_heap_free_slots,
11209 gc_stat_sym_heap_final_slots,
11210 gc_stat_sym_heap_marked_slots,
11211 gc_stat_sym_heap_eden_pages,
11212 gc_stat_sym_heap_tomb_pages,
11213 gc_stat_sym_total_allocated_pages,
11214 gc_stat_sym_total_freed_pages,
11215 gc_stat_sym_total_allocated_objects,
11216 gc_stat_sym_total_freed_objects,
11217 gc_stat_sym_malloc_increase_bytes,
11218 gc_stat_sym_malloc_increase_bytes_limit,
11219 gc_stat_sym_minor_gc_count,
11220 gc_stat_sym_major_gc_count,
11221 gc_stat_sym_compact_count,
11222 gc_stat_sym_read_barrier_faults,
11223 gc_stat_sym_total_moved_objects,
11224 gc_stat_sym_remembered_wb_unprotected_objects,
11225 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11226 gc_stat_sym_old_objects,
11227 gc_stat_sym_old_objects_limit,
11228#if RGENGC_ESTIMATE_OLDMALLOC
11229 gc_stat_sym_oldmalloc_increase_bytes,
11230 gc_stat_sym_oldmalloc_increase_bytes_limit,
11231#endif
11232#if RGENGC_PROFILE
11233 gc_stat_sym_total_generated_normal_object_count,
11234 gc_stat_sym_total_generated_shady_object_count,
11235 gc_stat_sym_total_shade_operation_count,
11236 gc_stat_sym_total_promoted_count,
11237 gc_stat_sym_total_remembered_normal_object_count,
11238 gc_stat_sym_total_remembered_shady_object_count,
11239#endif
11240 gc_stat_sym_last
11241};
11242
11243static VALUE gc_stat_symbols[gc_stat_sym_last];
11244
11245static void
11246setup_gc_stat_symbols(void)
11247{
11248 if (gc_stat_symbols[0] == 0) {
11249#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11250 S(count);
11251 S(time);
11252 S(heap_allocated_pages);
11253 S(heap_sorted_length);
11254 S(heap_allocatable_pages);
11255 S(heap_available_slots);
11256 S(heap_live_slots);
11257 S(heap_free_slots);
11258 S(heap_final_slots);
11259 S(heap_marked_slots);
11260 S(heap_eden_pages);
11261 S(heap_tomb_pages);
11262 S(total_allocated_pages);
11263 S(total_freed_pages);
11264 S(total_allocated_objects);
11265 S(total_freed_objects);
11266 S(malloc_increase_bytes);
11267 S(malloc_increase_bytes_limit);
11268 S(minor_gc_count);
11269 S(major_gc_count);
11270 S(compact_count);
11271 S(read_barrier_faults);
11272 S(total_moved_objects);
11273 S(remembered_wb_unprotected_objects);
11274 S(remembered_wb_unprotected_objects_limit);
11275 S(old_objects);
11276 S(old_objects_limit);
11277#if RGENGC_ESTIMATE_OLDMALLOC
11278 S(oldmalloc_increase_bytes);
11279 S(oldmalloc_increase_bytes_limit);
11280#endif
11281#if RGENGC_PROFILE
11282 S(total_generated_normal_object_count);
11283 S(total_generated_shady_object_count);
11284 S(total_shade_operation_count);
11285 S(total_promoted_count);
11286 S(total_remembered_normal_object_count);
11287 S(total_remembered_shady_object_count);
11288#endif /* RGENGC_PROFILE */
11289#undef S
11290 }
11291}
11292
11293static size_t
11294gc_stat_internal(VALUE hash_or_sym)
11295{
11296 rb_objspace_t *objspace = &rb_objspace;
11297 VALUE hash = Qnil, key = Qnil;
11298
11299 setup_gc_stat_symbols();
11300
11301 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11302 hash = hash_or_sym;
11303 }
11304 else if (SYMBOL_P(hash_or_sym)) {
11305 key = hash_or_sym;
11306 }
11307 else {
11308 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11309 }
11310
11311#define SET(name, attr) \
11312 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11313 return attr; \
11314 else if (hash != Qnil) \
11315 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11316
11317 SET(count, objspace->profile.count);
11318 SET(time, (size_t) (objspace->profile.total_time_ns / (1000 * 1000) /* ns -> ms */)); // TODO: UINT64T2NUM
11319
11320 /* implementation dependent counters */
11321 SET(heap_allocated_pages, heap_allocated_pages);
11322 SET(heap_sorted_length, heap_pages_sorted_length);
11323 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11324 SET(heap_available_slots, objspace_available_slots(objspace));
11325 SET(heap_live_slots, objspace_live_slots(objspace));
11326 SET(heap_free_slots, objspace_free_slots(objspace));
11327 SET(heap_final_slots, heap_pages_final_slots);
11328 SET(heap_marked_slots, objspace->marked_slots);
11329 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11330 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11331 SET(total_allocated_pages, total_allocated_pages(objspace));
11332 SET(total_freed_pages, total_freed_pages(objspace));
11333 SET(total_allocated_objects, objspace->total_allocated_objects);
11334 SET(total_freed_objects, objspace->profile.total_freed_objects);
11335 SET(malloc_increase_bytes, malloc_increase);
11336 SET(malloc_increase_bytes_limit, malloc_limit);
11337 SET(minor_gc_count, objspace->profile.minor_gc_count);
11338 SET(major_gc_count, objspace->profile.major_gc_count);
11339 SET(compact_count, objspace->profile.compact_count);
11340 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11341 SET(total_moved_objects, objspace->rcompactor.total_moved);
11342 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11343 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11344 SET(old_objects, objspace->rgengc.old_objects);
11345 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11346#if RGENGC_ESTIMATE_OLDMALLOC
11347 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11348 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11349#endif
11350
11351#if RGENGC_PROFILE
11352 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11353 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11354 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11355 SET(total_promoted_count, objspace->profile.total_promoted_count);
11356 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11357 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11358#endif /* RGENGC_PROFILE */
11359#undef SET
11360
11361 if (!NIL_P(key)) { /* matched key should return above */
11362 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11363 }
11364
11365#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11366 if (hash != Qnil) {
11367 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11368 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11369 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
11370 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
11371 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11372 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11373 }
11374#endif
11375
11376 return 0;
11377}
11378
11379static VALUE
11380gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
11381{
11382 if (NIL_P(arg)) {
11383 arg = rb_hash_new();
11384 }
11385 else if (SYMBOL_P(arg)) {
11386 size_t value = gc_stat_internal(arg);
11387 return SIZET2NUM(value);
11388 }
11389 else if (RB_TYPE_P(arg, T_HASH)) {
11390 // ok
11391 }
11392 else {
11393 rb_raise(rb_eTypeError, "non-hash or symbol given");
11394 }
11395
11396 gc_stat_internal(arg);
11397 return arg;
11398}
11399
11400size_t
11401rb_gc_stat(VALUE key)
11402{
11403 if (SYMBOL_P(key)) {
11404 size_t value = gc_stat_internal(key);
11405 return value;
11406 }
11407 else {
11408 gc_stat_internal(key);
11409 return 0;
11410 }
11411}
11412
11413
11414enum gc_stat_heap_sym {
11415 gc_stat_heap_sym_slot_size,
11416 gc_stat_heap_sym_heap_allocatable_pages,
11417 gc_stat_heap_sym_heap_eden_pages,
11418 gc_stat_heap_sym_heap_eden_slots,
11419 gc_stat_heap_sym_heap_tomb_pages,
11420 gc_stat_heap_sym_heap_tomb_slots,
11421 gc_stat_heap_sym_total_allocated_pages,
11422 gc_stat_heap_sym_total_freed_pages,
11423 gc_stat_heap_sym_force_major_gc_count,
11424 gc_stat_heap_sym_last
11425};
11426
11427static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11428
11429static void
11430setup_gc_stat_heap_symbols(void)
11431{
11432 if (gc_stat_heap_symbols[0] == 0) {
11433#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11434 S(slot_size);
11435 S(heap_allocatable_pages);
11436 S(heap_eden_pages);
11437 S(heap_eden_slots);
11438 S(heap_tomb_pages);
11439 S(heap_tomb_slots);
11440 S(total_allocated_pages);
11441 S(total_freed_pages);
11442 S(force_major_gc_count);
11443#undef S
11444 }
11445}
11446
11447static size_t
11448gc_stat_heap_internal(int size_pool_idx, VALUE hash_or_sym)
11449{
11450 rb_objspace_t *objspace = &rb_objspace;
11451 VALUE hash = Qnil, key = Qnil;
11452
11453 setup_gc_stat_heap_symbols();
11454
11455 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11456 hash = hash_or_sym;
11457 }
11458 else if (SYMBOL_P(hash_or_sym)) {
11459 key = hash_or_sym;
11460 }
11461 else {
11462 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11463 }
11464
11465 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11466 rb_raise(rb_eArgError, "size pool index out of range");
11467 }
11468
11469 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
11470
11471#define SET(name, attr) \
11472 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11473 return attr; \
11474 else if (hash != Qnil) \
11475 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11476
11477 SET(slot_size, size_pool->slot_size);
11478 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11479 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11480 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11481 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11482 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11483 SET(total_allocated_pages, size_pool->total_allocated_pages);
11484 SET(total_freed_pages, size_pool->total_freed_pages);
11485 SET(force_major_gc_count, size_pool->force_major_gc_count);
11486#undef SET
11487
11488 if (!NIL_P(key)) { /* matched key should return above */
11489 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11490 }
11491
11492 return 0;
11493}
11494
11495static VALUE
11496gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
11497{
11498 if (NIL_P(heap_name)) {
11499 if (NIL_P(arg)) {
11500 arg = rb_hash_new();
11501 }
11502 else if (RB_TYPE_P(arg, T_HASH)) {
11503 // ok
11504 }
11505 else {
11506 rb_raise(rb_eTypeError, "non-hash given");
11507 }
11508
11509 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11510 VALUE hash = rb_hash_aref(arg, INT2FIX(i));
11511 if (NIL_P(hash)) {
11512 hash = rb_hash_new();
11513 rb_hash_aset(arg, INT2FIX(i), hash);
11514 }
11515 gc_stat_heap_internal(i, hash);
11516 }
11517 }
11518 else if (FIXNUM_P(heap_name)) {
11519 int size_pool_idx = FIX2INT(heap_name);
11520
11521 if (NIL_P(arg)) {
11522 arg = rb_hash_new();
11523 }
11524 else if (SYMBOL_P(arg)) {
11525 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11526 return SIZET2NUM(value);
11527 }
11528 else if (RB_TYPE_P(arg, T_HASH)) {
11529 // ok
11530 }
11531 else {
11532 rb_raise(rb_eTypeError, "non-hash or symbol given");
11533 }
11534
11535 gc_stat_heap_internal(size_pool_idx, arg);
11536 }
11537 else {
11538 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
11539 }
11540
11541 return arg;
11542}
11543
11544static VALUE
11545gc_stress_get(rb_execution_context_t *ec, VALUE self)
11546{
11547 rb_objspace_t *objspace = &rb_objspace;
11548 return ruby_gc_stress_mode;
11549}
11550
11551static void
11552gc_stress_set(rb_objspace_t *objspace, VALUE flag)
11553{
11554 objspace->flags.gc_stressful = RTEST(flag);
11555 objspace->gc_stress_mode = flag;
11556}
11557
11558static VALUE
11559gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
11560{
11561 rb_objspace_t *objspace = &rb_objspace;
11562 gc_stress_set(objspace, flag);
11563 return flag;
11564}
11565
11566VALUE
11567rb_gc_enable(void)
11568{
11569 rb_objspace_t *objspace = &rb_objspace;
11570 return rb_objspace_gc_enable(objspace);
11571}
11572
11573VALUE
11574rb_objspace_gc_enable(rb_objspace_t *objspace)
11575{
11576 int old = dont_gc_val();
11577
11578 dont_gc_off();
11579 return RBOOL(old);
11580}
11581
11582static VALUE
11583gc_enable(rb_execution_context_t *ec, VALUE _)
11584{
11585 return rb_gc_enable();
11586}
11587
11588VALUE
11589rb_gc_disable_no_rest(void)
11590{
11591 rb_objspace_t *objspace = &rb_objspace;
11592 return gc_disable_no_rest(objspace);
11593}
11594
11595static VALUE
11596gc_disable_no_rest(rb_objspace_t *objspace)
11597{
11598 int old = dont_gc_val();
11599 dont_gc_on();
11600 return RBOOL(old);
11601}
11602
11603VALUE
11604rb_gc_disable(void)
11605{
11606 rb_objspace_t *objspace = &rb_objspace;
11607 return rb_objspace_gc_disable(objspace);
11608}
11609
11610VALUE
11611rb_objspace_gc_disable(rb_objspace_t *objspace)
11612{
11613 gc_rest(objspace);
11614 return gc_disable_no_rest(objspace);
11615}
11616
11617static VALUE
11618gc_disable(rb_execution_context_t *ec, VALUE _)
11619{
11620 return rb_gc_disable();
11621}
11622
11623#if GC_CAN_COMPILE_COMPACTION
11624/*
11625 * call-seq:
11626 * GC.auto_compact = flag
11627 *
11628 * Updates automatic compaction mode.
11629 *
11630 * When enabled, the compactor will execute on every major collection.
11631 *
11632 * Enabling compaction will degrade performance on major collections.
11633 */
11634static VALUE
11635gc_set_auto_compact(VALUE _, VALUE v)
11636{
11637 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11638
11639 ruby_enable_autocompact = RTEST(v);
11640 return v;
11641}
11642#else
11643# define gc_set_auto_compact rb_f_notimplement
11644#endif
11645
11646#if GC_CAN_COMPILE_COMPACTION
11647/*
11648 * call-seq:
11649 * GC.auto_compact -> true or false
11650 *
11651 * Returns whether or not automatic compaction has been enabled.
11652 */
11653static VALUE
11654gc_get_auto_compact(VALUE _)
11655{
11656 return RBOOL(ruby_enable_autocompact);
11657}
11658#else
11659# define gc_get_auto_compact rb_f_notimplement
11660#endif
11661
11662static int
11663get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
11664{
11665 const char *ptr = getenv(name);
11666 ssize_t val;
11667
11668 if (ptr != NULL && *ptr) {
11669 size_t unit = 0;
11670 char *end;
11671#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11672 val = strtoll(ptr, &end, 0);
11673#else
11674 val = strtol(ptr, &end, 0);
11675#endif
11676 switch (*end) {
11677 case 'k': case 'K':
11678 unit = 1024;
11679 ++end;
11680 break;
11681 case 'm': case 'M':
11682 unit = 1024*1024;
11683 ++end;
11684 break;
11685 case 'g': case 'G':
11686 unit = 1024*1024*1024;
11687 ++end;
11688 break;
11689 }
11690 while (*end && isspace((unsigned char)*end)) end++;
11691 if (*end) {
11692 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11693 return 0;
11694 }
11695 if (unit > 0) {
11696 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11697 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
11698 return 0;
11699 }
11700 val *= unit;
11701 }
11702 if (val > 0 && (size_t)val > lower_bound) {
11703 if (RTEST(ruby_verbose)) {
11704 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
11705 }
11706 *default_value = (size_t)val;
11707 return 1;
11708 }
11709 else {
11710 if (RTEST(ruby_verbose)) {
11711 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
11712 name, val, *default_value, lower_bound);
11713 }
11714 return 0;
11715 }
11716 }
11717 return 0;
11718}
11719
11720static int
11721get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
11722{
11723 const char *ptr = getenv(name);
11724 double val;
11725
11726 if (ptr != NULL && *ptr) {
11727 char *end;
11728 val = strtod(ptr, &end);
11729 if (!*ptr || *end) {
11730 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11731 return 0;
11732 }
11733
11734 if (accept_zero && val == 0.0) {
11735 goto accept;
11736 }
11737 else if (val <= lower_bound) {
11738 if (RTEST(ruby_verbose)) {
11739 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
11740 name, val, *default_value, lower_bound);
11741 }
11742 }
11743 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
11744 val > upper_bound) {
11745 if (RTEST(ruby_verbose)) {
11746 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
11747 name, val, *default_value, upper_bound);
11748 }
11749 }
11750 else {
11751 goto accept;
11752 }
11753 }
11754 return 0;
11755
11756 accept:
11757 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
11758 *default_value = val;
11759 return 1;
11760}
11761
11762static void
11763gc_set_initial_pages(rb_objspace_t *objspace)
11764{
11765 gc_rest(objspace);
11766
11767 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11768 rb_size_pool_t *size_pool = &size_pools[i];
11769
11770 if (gc_params.heap_init_slots > size_pool->eden_heap.total_slots) {
11771 size_t slots = gc_params.heap_init_slots - size_pool->eden_heap.total_slots;
11772 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
11773 size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
11774 }
11775 else {
11776 /* We already have more slots than heap_init_slots allows, so
11777 * prevent creating more pages. */
11778 size_pool->allocatable_pages = 0;
11779 }
11780 }
11781 heap_pages_expand_sorted(objspace);
11782}
11783
11784/*
11785 * GC tuning environment variables
11786 *
11787 * * RUBY_GC_HEAP_INIT_SLOTS
11788 * - Initial allocation slots.
11789 * * RUBY_GC_HEAP_FREE_SLOTS
11790 * - Prepare at least this amount of slots after GC.
11791 * - Allocate slots if there are not enough slots.
11792 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
11793 * - Allocate slots by this factor.
11794 * - (next slots number) = (current slots number) * (this factor)
11795 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
11796 * - Allocation rate is limited to this number of slots.
11797 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
11798 * - Allocate additional pages when the number of free slots is
11799 * lower than the value (total_slots * (this ratio)).
11800 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
11801 * - Allocate slots to satisfy this formula:
11802 * free_slots = total_slots * goal_ratio
11803 * - In other words, prepare (total_slots * goal_ratio) free slots.
11804 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
11805 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
11806 * - Allow to free pages when the number of free slots is
11807 * greater than the value (total_slots * (this ratio)).
11808 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
11809 * - Do full GC when the number of old objects is more than R * N
11810 * where R is this factor and
11811 * N is the number of old objects just after last full GC.
11812 *
11813 * * obsolete
11814 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
11815 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
11816 *
11817 * * RUBY_GC_MALLOC_LIMIT
11818 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
11819 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11820 *
11821 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
11822 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
11823 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11824 */
11825
11826void
11827ruby_gc_set_params(void)
11828{
11829 rb_objspace_t *objspace = &rb_objspace;
11830 /* RUBY_GC_HEAP_FREE_SLOTS */
11831 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
11832 /* ok */
11833 }
11834
11835 /* RUBY_GC_HEAP_INIT_SLOTS */
11836 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
11837 gc_set_initial_pages(objspace);
11838 }
11839
11840 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
11841 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
11842 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
11843 0.0, 1.0, FALSE);
11844 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
11845 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
11846 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
11847 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
11848 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
11849
11850 if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
11851 malloc_limit = gc_params.malloc_limit_min;
11852 }
11853 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
11854 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
11855 gc_params.malloc_limit_max = SIZE_MAX;
11856 }
11857 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
11858
11859#if RGENGC_ESTIMATE_OLDMALLOC
11860 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
11861 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
11862 }
11863 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
11864 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
11865#endif
11866}
11867
11868static void
11869reachable_objects_from_callback(VALUE obj)
11870{
11871 rb_ractor_t *cr = GET_RACTOR();
11872 cr->mfd->mark_func(obj, cr->mfd->data);
11873}
11874
11875void
11876rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
11877{
11878 rb_objspace_t *objspace = &rb_objspace;
11879
11880 RB_VM_LOCK_ENTER();
11881 {
11882 if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
11883
11884 if (is_markable_object(objspace, obj)) {
11885 rb_ractor_t *cr = GET_RACTOR();
11886 struct gc_mark_func_data_struct mfd = {
11887 .mark_func = func,
11888 .data = data,
11889 }, *prev_mfd = cr->mfd;
11890
11891 cr->mfd = &mfd;
11892 gc_mark_children(objspace, obj);
11893 cr->mfd = prev_mfd;
11894 }
11895 }
11896 RB_VM_LOCK_LEAVE();
11897}
11898
11900 const char *category;
11901 void (*func)(const char *category, VALUE, void *);
11902 void *data;
11903};
11904
11905static void
11906root_objects_from(VALUE obj, void *ptr)
11907{
11908 const struct root_objects_data *data = (struct root_objects_data *)ptr;
11909 (*data->func)(data->category, obj, data->data);
11910}
11911
11912void
11913rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
11914{
11915 rb_objspace_t *objspace = &rb_objspace;
11916 objspace_reachable_objects_from_root(objspace, func, passing_data);
11917}
11918
11919static void
11920objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
11921{
11922 if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11923
11924 rb_ractor_t *cr = GET_RACTOR();
11925 struct root_objects_data data = {
11926 .func = func,
11927 .data = passing_data,
11928 };
11929 struct gc_mark_func_data_struct mfd = {
11930 .mark_func = root_objects_from,
11931 .data = &data,
11932 }, *prev_mfd = cr->mfd;
11933
11934 cr->mfd = &mfd;
11935 gc_mark_roots(objspace, &data.category);
11936 cr->mfd = prev_mfd;
11937}
11938
11939/*
11940 ------------------------ Extended allocator ------------------------
11941*/
11942
11944 VALUE exc;
11945 const char *fmt;
11946 va_list *ap;
11947};
11948
11949static void *
11950gc_vraise(void *ptr)
11951{
11952 struct gc_raise_tag *argv = ptr;
11953 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11954 UNREACHABLE_RETURN(NULL);
11955}
11956
11957static void
11958gc_raise(VALUE exc, const char *fmt, ...)
11959{
11960 va_list ap;
11961 va_start(ap, fmt);
11962 struct gc_raise_tag argv = {
11963 exc, fmt, &ap,
11964 };
11965
11966 if (ruby_thread_has_gvl_p()) {
11967 gc_vraise(&argv);
11969 }
11970 else if (ruby_native_thread_p()) {
11971 rb_thread_call_with_gvl(gc_vraise, &argv);
11973 }
11974 else {
11975 /* Not in a ruby thread */
11976 fprintf(stderr, "%s", "[FATAL] ");
11977 vfprintf(stderr, fmt, ap);
11978 }
11979
11980 va_end(ap);
11981 abort();
11982}
11983
11984static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
11985
11986static void
11987negative_size_allocation_error(const char *msg)
11988{
11989 gc_raise(rb_eNoMemError, "%s", msg);
11990}
11991
11992static void *
11993ruby_memerror_body(void *dummy)
11994{
11995 rb_memerror();
11996 return 0;
11997}
11998
11999NORETURN(static void ruby_memerror(void));
12001static void
12002ruby_memerror(void)
12003{
12004 if (ruby_thread_has_gvl_p()) {
12005 rb_memerror();
12006 }
12007 else {
12008 if (ruby_native_thread_p()) {
12009 rb_thread_call_with_gvl(ruby_memerror_body, 0);
12010 }
12011 else {
12012 /* no ruby thread */
12013 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12014 }
12015 }
12016 exit(EXIT_FAILURE);
12017}
12018
12019void
12020rb_memerror(void)
12021{
12022 rb_execution_context_t *ec = GET_EC();
12023 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
12024 VALUE exc;
12025
12026 if (0) {
12027 // Print out pid, sleep, so you can attach debugger to see what went wrong:
12028 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
12029 sleep(60);
12030 }
12031
12032 if (during_gc) {
12033 // TODO: OMG!! How to implement it?
12034 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12035 }
12036
12037 exc = nomem_error;
12038 if (!exc ||
12039 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12040 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12041 exit(EXIT_FAILURE);
12042 }
12043 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12044 rb_ec_raised_clear(ec);
12045 }
12046 else {
12047 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12048 exc = ruby_vm_special_exception_copy(exc);
12049 }
12050 ec->errinfo = exc;
12051 EC_JUMP_TAG(ec, TAG_RAISE);
12052}
12053
12054void *
12055rb_aligned_malloc(size_t alignment, size_t size)
12056{
12057 /* alignment must be a power of 2 */
12058 GC_ASSERT(((alignment - 1) & alignment) == 0);
12059 GC_ASSERT(alignment % sizeof(void*) == 0);
12060
12061 void *res;
12062
12063#if defined __MINGW32__
12064 res = __mingw_aligned_malloc(size, alignment);
12065#elif defined _WIN32
12066 void *_aligned_malloc(size_t, size_t);
12067 res = _aligned_malloc(size, alignment);
12068#elif defined(HAVE_POSIX_MEMALIGN)
12069 if (posix_memalign(&res, alignment, size) != 0) {
12070 return NULL;
12071 }
12072#elif defined(HAVE_MEMALIGN)
12073 res = memalign(alignment, size);
12074#else
12075 char* aligned;
12076 res = malloc(alignment + size + sizeof(void*));
12077 aligned = (char*)res + alignment + sizeof(void*);
12078 aligned -= ((VALUE)aligned & (alignment - 1));
12079 ((void**)aligned)[-1] = res;
12080 res = (void*)aligned;
12081#endif
12082
12083 GC_ASSERT((uintptr_t)res % alignment == 0);
12084
12085 return res;
12086}
12087
12088static void
12089rb_aligned_free(void *ptr, size_t size)
12090{
12091#if defined __MINGW32__
12092 __mingw_aligned_free(ptr);
12093#elif defined _WIN32
12094 _aligned_free(ptr);
12095#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12096 free(ptr);
12097#else
12098 free(((void**)ptr)[-1]);
12099#endif
12100}
12101
12102static inline size_t
12103objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
12104{
12105#ifdef HAVE_MALLOC_USABLE_SIZE
12106 return malloc_usable_size(ptr);
12107#else
12108 return hint;
12109#endif
12110}
12111
12112enum memop_type {
12113 MEMOP_TYPE_MALLOC = 0,
12114 MEMOP_TYPE_FREE,
12115 MEMOP_TYPE_REALLOC
12116};
12117
12118static inline void
12119atomic_sub_nounderflow(size_t *var, size_t sub)
12120{
12121 if (sub == 0) return;
12122
12123 while (1) {
12124 size_t val = *var;
12125 if (val < sub) sub = val;
12126 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
12127 }
12128}
12129
12130static void
12131objspace_malloc_gc_stress(rb_objspace_t *objspace)
12132{
12133 if (ruby_gc_stressful && ruby_native_thread_p()) {
12134 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12135 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12136
12137 if (gc_stress_full_mark_after_malloc_p()) {
12138 reason |= GPR_FLAG_FULL_MARK;
12139 }
12140 garbage_collect_with_gvl(objspace, reason);
12141 }
12142}
12143
12144static inline bool
12145objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12146{
12147 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
12148 mem,
12149 type == MEMOP_TYPE_MALLOC ? "malloc" :
12150 type == MEMOP_TYPE_FREE ? "free " :
12151 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
12152 new_size, old_size);
12153 return false;
12154}
12155
12156static bool
12157objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12158{
12159 if (new_size > old_size) {
12160 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12161#if RGENGC_ESTIMATE_OLDMALLOC
12162 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12163#endif
12164 }
12165 else {
12166 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12167#if RGENGC_ESTIMATE_OLDMALLOC
12168 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12169#endif
12170 }
12171
12172 if (type == MEMOP_TYPE_MALLOC) {
12173 retry:
12174 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
12175 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12176 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
12177 goto retry;
12178 }
12179 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12180 }
12181 }
12182
12183#if MALLOC_ALLOCATED_SIZE
12184 if (new_size >= old_size) {
12185 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12186 }
12187 else {
12188 size_t dec_size = old_size - new_size;
12189 size_t allocated_size = objspace->malloc_params.allocated_size;
12190
12191#if MALLOC_ALLOCATED_SIZE_CHECK
12192 if (allocated_size < dec_size) {
12193 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
12194 }
12195#endif
12196 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12197 }
12198
12199 switch (type) {
12200 case MEMOP_TYPE_MALLOC:
12201 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12202 break;
12203 case MEMOP_TYPE_FREE:
12204 {
12205 size_t allocations = objspace->malloc_params.allocations;
12206 if (allocations > 0) {
12207 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12208 }
12209#if MALLOC_ALLOCATED_SIZE_CHECK
12210 else {
12211 GC_ASSERT(objspace->malloc_params.allocations > 0);
12212 }
12213#endif
12214 }
12215 break;
12216 case MEMOP_TYPE_REALLOC: /* ignore */ break;
12217 }
12218#endif
12219 return true;
12220}
12221
12222#define objspace_malloc_increase(...) \
12223 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12224 !malloc_increase_done; \
12225 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12226
12227struct malloc_obj_info { /* 4 words */
12228 size_t size;
12229#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12230 size_t gen;
12231 const char *file;
12232 size_t line;
12233#endif
12234};
12235
12236#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12237const char *ruby_malloc_info_file;
12238int ruby_malloc_info_line;
12239#endif
12240
12241static inline size_t
12242objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
12243{
12244 if (size == 0) size = 1;
12245
12246#if CALC_EXACT_MALLOC_SIZE
12247 size += sizeof(struct malloc_obj_info);
12248#endif
12249
12250 return size;
12251}
12252
12253static bool
12254malloc_during_gc_p(rb_objspace_t *objspace)
12255{
12256 /* malloc is not allowed during GC when we're not using multiple ractors
12257 * (since ractors can run while another thread is sweeping) and when we
12258 * have the GVL (since if we don't have the GVL, we'll try to acquire the
12259 * GVL which will block and ensure the other thread finishes GC). */
12260 return during_gc && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12261}
12262
12263static inline void *
12264objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
12265{
12266 size = objspace_malloc_size(objspace, mem, size);
12267 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12268
12269#if CALC_EXACT_MALLOC_SIZE
12270 {
12271 struct malloc_obj_info *info = mem;
12272 info->size = size;
12273#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12274 info->gen = objspace->profile.count;
12275 info->file = ruby_malloc_info_file;
12276 info->line = info->file ? ruby_malloc_info_line : 0;
12277#endif
12278 mem = info + 1;
12279 }
12280#endif
12281
12282 return mem;
12283}
12284
12285#if defined(__GNUC__) && RUBY_DEBUG
12286#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12287#endif
12288
12289#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12290# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12291#endif
12292
12293#define GC_MEMERROR(...) \
12294 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12295
12296#define TRY_WITH_GC(siz, expr) do { \
12297 const gc_profile_record_flag gpr = \
12298 GPR_FLAG_FULL_MARK | \
12299 GPR_FLAG_IMMEDIATE_MARK | \
12300 GPR_FLAG_IMMEDIATE_SWEEP | \
12301 GPR_FLAG_MALLOC; \
12302 objspace_malloc_gc_stress(objspace); \
12303 \
12304 if (LIKELY((expr))) { \
12305 /* Success on 1st try */ \
12306 } \
12307 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12308 /* @shyouhei thinks this doesn't happen */ \
12309 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12310 } \
12311 else if ((expr)) { \
12312 /* Success on 2nd try */ \
12313 } \
12314 else { \
12315 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12316 "%"PRIdSIZE" bytes for %s", \
12317 siz, # expr); \
12318 } \
12319 } while (0)
12320
12321/* these shouldn't be called directly.
12322 * objspace_* functions do not check allocation size.
12323 */
12324static void *
12325objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
12326{
12327 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12328 rb_warn("malloc during GC detected, this could cause crashes if it triggers another GC");
12329#if RGENGC_CHECK_MODE || RUBY_DEBUG
12330 rb_bug("Cannot malloc during GC");
12331#endif
12332 }
12333
12334 void *mem;
12335
12336 size = objspace_malloc_prepare(objspace, size);
12337 TRY_WITH_GC(size, mem = malloc(size));
12338 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12339 return objspace_malloc_fixup(objspace, mem, size);
12340}
12341
12342static inline size_t
12343xmalloc2_size(const size_t count, const size_t elsize)
12344{
12345 return size_mul_or_raise(count, elsize, rb_eArgError);
12346}
12347
12348static void *
12349objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
12350{
12351 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12352 rb_warn("realloc during GC detected, this could cause crashes if it triggers another GC");
12353#if RGENGC_CHECK_MODE || RUBY_DEBUG
12354 rb_bug("Cannot realloc during GC");
12355#endif
12356 }
12357
12358 void *mem;
12359
12360 if (!ptr) return objspace_xmalloc0(objspace, new_size);
12361
12362 /*
12363 * The behavior of realloc(ptr, 0) is implementation defined.
12364 * Therefore we don't use realloc(ptr, 0) for portability reason.
12365 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
12366 */
12367 if (new_size == 0) {
12368 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12369 /*
12370 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
12371 * returns a non-NULL pointer to an access-protected memory page.
12372 * The returned pointer cannot be read / written at all, but
12373 * still be a valid argument of free().
12374 *
12375 * https://man.openbsd.org/malloc.3
12376 *
12377 * - Linux's malloc(3) man page says that it _might_ perhaps return
12378 * a non-NULL pointer when its argument is 0. That return value
12379 * is safe (and is expected) to be passed to free().
12380 *
12381 * https://man7.org/linux/man-pages/man3/malloc.3.html
12382 *
12383 * - As I read the implementation jemalloc's malloc() returns fully
12384 * normal 16 bytes memory region when its argument is 0.
12385 *
12386 * - As I read the implementation musl libc's malloc() returns
12387 * fully normal 32 bytes memory region when its argument is 0.
12388 *
12389 * - Other malloc implementations can also return non-NULL.
12390 */
12391 objspace_xfree(objspace, ptr, old_size);
12392 return mem;
12393 }
12394 else {
12395 /*
12396 * It is dangerous to return NULL here, because that could lead to
12397 * RCE. Fallback to 1 byte instead of zero.
12398 *
12399 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
12400 */
12401 new_size = 1;
12402 }
12403 }
12404
12405#if CALC_EXACT_MALLOC_SIZE
12406 {
12407 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12408 new_size += sizeof(struct malloc_obj_info);
12409 ptr = info;
12410 old_size = info->size;
12411 }
12412#endif
12413
12414 old_size = objspace_malloc_size(objspace, ptr, old_size);
12415 TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
12416 new_size = objspace_malloc_size(objspace, mem, new_size);
12417
12418#if CALC_EXACT_MALLOC_SIZE
12419 {
12420 struct malloc_obj_info *info = mem;
12421 info->size = new_size;
12422 mem = info + 1;
12423 }
12424#endif
12425
12426 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12427
12428 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12429 return mem;
12430}
12431
12432#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12433
12434#define MALLOC_INFO_GEN_SIZE 100
12435#define MALLOC_INFO_SIZE_SIZE 10
12436static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12437static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12438static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12439static st_table *malloc_info_file_table;
12440
12441static int
12442mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12443{
12444 const char *file = (void *)key;
12445 const size_t *data = (void *)val;
12446
12447 fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
12448
12449 return ST_CONTINUE;
12450}
12451
12452__attribute__((destructor))
12453void
12454rb_malloc_info_show_results(void)
12455{
12456 int i;
12457
12458 fprintf(stderr, "* malloc_info gen statistics\n");
12459 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12460 if (i == MALLOC_INFO_GEN_SIZE-1) {
12461 fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12462 }
12463 else {
12464 fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12465 }
12466 }
12467
12468 fprintf(stderr, "* malloc_info size statistics\n");
12469 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12470 int s = 16 << i;
12471 fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
12472 }
12473 fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
12474
12475 if (malloc_info_file_table) {
12476 fprintf(stderr, "* malloc_info file statistics\n");
12477 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12478 }
12479}
12480#else
12481void
12482rb_malloc_info_show_results(void)
12483{
12484}
12485#endif
12486
12487static void
12488objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
12489{
12490 if (!ptr) {
12491 /*
12492 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
12493 * its first version. We would better follow.
12494 */
12495 return;
12496 }
12497#if CALC_EXACT_MALLOC_SIZE
12498 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12499 ptr = info;
12500 old_size = info->size;
12501
12502#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12503 {
12504 int gen = (int)(objspace->profile.count - info->gen);
12505 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12506 int i;
12507
12508 malloc_info_gen_cnt[gen_index]++;
12509 malloc_info_gen_size[gen_index] += info->size;
12510
12511 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12512 size_t s = 16 << i;
12513 if (info->size <= s) {
12514 malloc_info_size[i]++;
12515 goto found;
12516 }
12517 }
12518 malloc_info_size[i]++;
12519 found:;
12520
12521 {
12522 st_data_t key = (st_data_t)info->file, d;
12523 size_t *data;
12524
12525 if (malloc_info_file_table == NULL) {
12526 malloc_info_file_table = st_init_numtable_with_size(1024);
12527 }
12528 if (st_lookup(malloc_info_file_table, key, &d)) {
12529 /* hit */
12530 data = (size_t *)d;
12531 }
12532 else {
12533 data = malloc(xmalloc2_size(2, sizeof(size_t)));
12534 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
12535 data[0] = data[1] = 0;
12536 st_insert(malloc_info_file_table, key, (st_data_t)data);
12537 }
12538 data[0] ++;
12539 data[1] += info->size;
12540 };
12541 if (0 && gen >= 2) { /* verbose output */
12542 if (info->file) {
12543 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
12544 info->size, gen, info->file, info->line);
12545 }
12546 else {
12547 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
12548 info->size, gen);
12549 }
12550 }
12551 }
12552#endif
12553#endif
12554 old_size = objspace_malloc_size(objspace, ptr, old_size);
12555
12556 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12557 free(ptr);
12558 ptr = NULL;
12559 RB_DEBUG_COUNTER_INC(heap_xfree);
12560 }
12561}
12562
12563static void *
12564ruby_xmalloc0(size_t size)
12565{
12566 return objspace_xmalloc0(&rb_objspace, size);
12567}
12568
12569void *
12570ruby_xmalloc_body(size_t size)
12571{
12572 if ((ssize_t)size < 0) {
12573 negative_size_allocation_error("too large allocation size");
12574 }
12575 return ruby_xmalloc0(size);
12576}
12577
12578void
12579ruby_malloc_size_overflow(size_t count, size_t elsize)
12580{
12582 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
12583 count, elsize);
12584}
12585
12586void *
12587ruby_xmalloc2_body(size_t n, size_t size)
12588{
12589 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
12590}
12591
12592static void *
12593objspace_xcalloc(rb_objspace_t *objspace, size_t size)
12594{
12595 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12596 rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
12597#if RGENGC_CHECK_MODE || RUBY_DEBUG
12598 rb_bug("Cannot calloc during GC");
12599#endif
12600 }
12601
12602 void *mem;
12603
12604 size = objspace_malloc_prepare(objspace, size);
12605 TRY_WITH_GC(size, mem = calloc1(size));
12606 return objspace_malloc_fixup(objspace, mem, size);
12607}
12608
12609void *
12610ruby_xcalloc_body(size_t n, size_t size)
12611{
12612 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
12613}
12614
12615#ifdef ruby_sized_xrealloc
12616#undef ruby_sized_xrealloc
12617#endif
12618void *
12619ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
12620{
12621 if ((ssize_t)new_size < 0) {
12622 negative_size_allocation_error("too large allocation size");
12623 }
12624
12625 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
12626}
12627
12628void *
12629ruby_xrealloc_body(void *ptr, size_t new_size)
12630{
12631 return ruby_sized_xrealloc(ptr, new_size, 0);
12632}
12633
12634#ifdef ruby_sized_xrealloc2
12635#undef ruby_sized_xrealloc2
12636#endif
12637void *
12638ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
12639{
12640 size_t len = xmalloc2_size(n, size);
12641 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
12642}
12643
12644void *
12645ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
12646{
12647 return ruby_sized_xrealloc2(ptr, n, size, 0);
12648}
12649
12650#ifdef ruby_sized_xfree
12651#undef ruby_sized_xfree
12652#endif
12653void
12654ruby_sized_xfree(void *x, size_t size)
12655{
12656 if (LIKELY(x)) {
12657 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
12658 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
12659 * that case. */
12660 if (LIKELY(GET_VM())) {
12661 objspace_xfree(&rb_objspace, x, size);
12662 }
12663 else {
12664 ruby_mimfree(x);
12665 }
12666 }
12667}
12668
12669void
12670ruby_xfree(void *x)
12671{
12672 ruby_sized_xfree(x, 0);
12673}
12674
12675void *
12676rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12677{
12678 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12679 return ruby_xmalloc(w);
12680}
12681
12682void *
12683rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12684{
12685 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12686 return ruby_xcalloc(w, 1);
12687}
12688
12689void *
12690rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
12691{
12692 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12693 return ruby_xrealloc((void *)p, w);
12694}
12695
12696void *
12697rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12698{
12699 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12700 return ruby_xmalloc(u);
12701}
12702
12703void *
12704rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12705{
12706 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12707 return ruby_xcalloc(u, 1);
12708}
12709
12710/* Mimic ruby_xmalloc, but need not rb_objspace.
12711 * should return pointer suitable for ruby_xfree
12712 */
12713void *
12714ruby_mimmalloc(size_t size)
12715{
12716 void *mem;
12717#if CALC_EXACT_MALLOC_SIZE
12718 size += sizeof(struct malloc_obj_info);
12719#endif
12720 mem = malloc(size);
12721#if CALC_EXACT_MALLOC_SIZE
12722 if (!mem) {
12723 return NULL;
12724 }
12725 else
12726 /* set 0 for consistency of allocated_size/allocations */
12727 {
12728 struct malloc_obj_info *info = mem;
12729 info->size = 0;
12730#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12731 info->gen = 0;
12732 info->file = NULL;
12733 info->line = 0;
12734#endif
12735 mem = info + 1;
12736 }
12737#endif
12738 return mem;
12739}
12740
12741void
12742ruby_mimfree(void *ptr)
12743{
12744#if CALC_EXACT_MALLOC_SIZE
12745 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12746 ptr = info;
12747#endif
12748 free(ptr);
12749}
12750
12751void *
12752rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
12753{
12754 void *ptr;
12755 VALUE imemo;
12756 rb_imemo_tmpbuf_t *tmpbuf;
12757
12758 /* Keep the order; allocate an empty imemo first then xmalloc, to
12759 * get rid of potential memory leak */
12760 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
12761 *store = imemo;
12762 ptr = ruby_xmalloc0(size);
12763 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
12764 tmpbuf->ptr = ptr;
12765 tmpbuf->cnt = cnt;
12766 return ptr;
12767}
12768
12769void *
12770rb_alloc_tmp_buffer(volatile VALUE *store, long len)
12771{
12772 long cnt;
12773
12774 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
12775 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
12776 }
12777
12778 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
12779}
12780
12781void
12782rb_free_tmp_buffer(volatile VALUE *store)
12783{
12784 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
12785 if (s) {
12786 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
12787 s->cnt = 0;
12788 ruby_xfree(ptr);
12789 }
12790}
12791
12792#if MALLOC_ALLOCATED_SIZE
12793/*
12794 * call-seq:
12795 * GC.malloc_allocated_size -> Integer
12796 *
12797 * Returns the size of memory allocated by malloc().
12798 *
12799 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12800 */
12801
12802static VALUE
12803gc_malloc_allocated_size(VALUE self)
12804{
12805 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
12806}
12807
12808/*
12809 * call-seq:
12810 * GC.malloc_allocations -> Integer
12811 *
12812 * Returns the number of malloc() allocations.
12813 *
12814 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12815 */
12816
12817static VALUE
12818gc_malloc_allocations(VALUE self)
12819{
12820 return UINT2NUM(rb_objspace.malloc_params.allocations);
12821}
12822#endif
12823
12824void
12825rb_gc_adjust_memory_usage(ssize_t diff)
12826{
12827 rb_objspace_t *objspace = &rb_objspace;
12828 if (diff > 0) {
12829 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
12830 }
12831 else if (diff < 0) {
12832 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
12833 }
12834}
12835
12836/*
12837 ------------------------------ WeakMap ------------------------------
12838*/
12839
12840struct weakmap {
12841 st_table *obj2wmap; /* obj -> [ref,...] */
12842 st_table *wmap2obj; /* ref -> obj */
12843 VALUE final;
12844};
12845
12846#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
12847
12848#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12849static int
12850wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
12851{
12852 rb_objspace_t *objspace = (rb_objspace_t *)arg;
12853 VALUE obj = (VALUE)val;
12854 if (!is_live_object(objspace, obj)) return ST_DELETE;
12855 return ST_CONTINUE;
12856}
12857#endif
12858
12859static int
12860wmap_replace_ref(st_data_t *key, st_data_t *value, st_data_t _argp, int existing)
12861{
12862 *key = rb_gc_location((VALUE)*key);
12863
12864 VALUE *values = (VALUE *)*value;
12865 VALUE size = values[0];
12866
12867 for (VALUE index = 1; index <= size; index++) {
12868 values[index] = rb_gc_location(values[index]);
12869 }
12870
12871 return ST_CONTINUE;
12872}
12873
12874static int
12875wmap_foreach_replace(st_data_t key, st_data_t value, st_data_t _argp, int error)
12876{
12877 if (rb_gc_location((VALUE)key) != (VALUE)key) {
12878 return ST_REPLACE;
12879 }
12880
12881 VALUE *values = (VALUE *)value;
12882 VALUE size = values[0];
12883
12884 for (VALUE index = 1; index <= size; index++) {
12885 VALUE val = values[index];
12886 if (rb_gc_location(val) != val) {
12887 return ST_REPLACE;
12888 }
12889 }
12890
12891 return ST_CONTINUE;
12892}
12893
12894static void
12895wmap_compact(void *ptr)
12896{
12897 struct weakmap *w = ptr;
12898 if (w->wmap2obj) rb_gc_update_tbl_refs(w->wmap2obj);
12899 if (w->obj2wmap) st_foreach_with_replace(w->obj2wmap, wmap_foreach_replace, wmap_replace_ref, (st_data_t)NULL);
12900 w->final = rb_gc_location(w->final);
12901}
12902
12903static void
12904wmap_mark(void *ptr)
12905{
12906 struct weakmap *w = ptr;
12907#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12908 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
12909#endif
12910 rb_gc_mark_movable(w->final);
12911}
12912
12913static int
12914wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
12915{
12916 VALUE *ptr = (VALUE *)val;
12917 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
12918 return ST_CONTINUE;
12919}
12920
12921static void
12922wmap_free(void *ptr)
12923{
12924 struct weakmap *w = ptr;
12925 st_foreach(w->obj2wmap, wmap_free_map, 0);
12926 st_free_table(w->obj2wmap);
12927 st_free_table(w->wmap2obj);
12928 xfree(w);
12929}
12930
12931static int
12932wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
12933{
12934 VALUE *ptr = (VALUE *)val;
12935 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
12936 return ST_CONTINUE;
12937}
12938
12939static size_t
12940wmap_memsize(const void *ptr)
12941{
12942 size_t size;
12943 const struct weakmap *w = ptr;
12944 size = sizeof(*w);
12945 size += st_memsize(w->obj2wmap);
12946 size += st_memsize(w->wmap2obj);
12947 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12948 return size;
12949}
12950
12951static const rb_data_type_t weakmap_type = {
12952 "weakmap",
12953 {
12954 wmap_mark,
12955 wmap_free,
12956 wmap_memsize,
12957 wmap_compact,
12958 },
12959 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12960};
12961
12962static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
12963
12964static VALUE
12965wmap_allocate(VALUE klass)
12966{
12967 struct weakmap *w;
12968 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
12969 w->obj2wmap = rb_init_identtable();
12970 w->wmap2obj = rb_init_identtable();
12971 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12972 return obj;
12973}
12974
12975static int
12976wmap_live_p(rb_objspace_t *objspace, VALUE obj)
12977{
12978 if (SPECIAL_CONST_P(obj)) return TRUE;
12979 /* If is_pointer_to_heap returns false, the page could be in the tomb heap
12980 * or have already been freed. */
12981 if (!is_pointer_to_heap(objspace, (void *)obj)) return FALSE;
12982
12983 void *poisoned = asan_unpoison_object_temporary(obj);
12984
12985 enum ruby_value_type t = BUILTIN_TYPE(obj);
12986 int ret = (!(t == T_NONE || t >= T_FIXNUM || t == T_ICLASS) &&
12987 is_live_object(objspace, obj));
12988
12989 if (poisoned) {
12990 asan_poison_object(obj);
12991 }
12992
12993 return ret;
12994}
12995
12996static int
12997wmap_remove_inverse_ref(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
12998{
12999 if (!existing) return ST_STOP;
13000
13001 VALUE old_ref = (VALUE)arg;
13002
13003 VALUE *values = (VALUE *)*val;
13004 VALUE size = values[0];
13005
13006 if (size == 1) {
13007 // fast path, we only had one backref
13008 RUBY_ASSERT(values[1] == old_ref);
13009 ruby_sized_xfree(values, 2 * sizeof(VALUE));
13010 return ST_DELETE;
13011 }
13012
13013 bool found = false;
13014 VALUE index = 1;
13015 for (; index <= size; index++) {
13016 if (values[index] == old_ref) {
13017 found = true;
13018 break;
13019 }
13020 }
13021 if (!found) return ST_STOP;
13022
13023 if (size > index) {
13024 MEMMOVE(&values[index], &values[index + 1], VALUE, size - index);
13025 }
13026
13027 size -= 1;
13028 values[0] = size;
13029 SIZED_REALLOC_N(values, VALUE, size + 1, size + 2);
13030 *val = (st_data_t)values;
13031 return ST_CONTINUE;
13032}
13033
13034/* :nodoc: */
13035static VALUE
13036wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
13037{
13038 st_data_t orig, wmap, data;
13039 VALUE obj, *rids, i, size;
13040 struct weakmap *w;
13041
13042 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13043 /* Get reference from object id. */
13044 if (UNDEF_P(obj = id2ref_obj_tbl(&rb_objspace, objid))) {
13045 rb_bug("wmap_finalize: objid is not found.");
13046 }
13047
13048 /* obj is original referenced object and/or weak reference. */
13049 orig = (st_data_t)obj;
13050 if (st_delete(w->obj2wmap, &orig, &data)) {
13051 rids = (VALUE *)data;
13052 size = *rids++;
13053 for (i = 0; i < size; ++i) {
13054 wmap = (st_data_t)rids[i];
13055 st_delete(w->wmap2obj, &wmap, NULL);
13056 }
13057 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
13058 }
13059
13060 wmap = (st_data_t)obj;
13061 if (st_delete(w->wmap2obj, &wmap, &orig)) {
13062 wmap = (st_data_t)obj;
13063 st_update(w->obj2wmap, orig, wmap_remove_inverse_ref, wmap);
13064 }
13065 return self;
13066}
13067
13069 rb_objspace_t *objspace;
13070 VALUE value;
13071};
13072
13073static VALUE
13074wmap_inspect_append(rb_objspace_t *objspace, VALUE str, VALUE obj)
13075{
13076 if (SPECIAL_CONST_P(obj)) {
13077 return rb_str_append(str, rb_inspect(obj));
13078 }
13079 else if (wmap_live_p(objspace, obj)) {
13080 return rb_str_append(str, rb_any_to_s(obj));
13081 }
13082 else {
13083 return rb_str_catf(str, "#<collected:%p>", (void*)obj);
13084 }
13085}
13086
13087static int
13088wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
13089{
13090 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
13091 rb_objspace_t *objspace = argp->objspace;
13092 VALUE str = argp->value;
13093 VALUE k = (VALUE)key, v = (VALUE)val;
13094
13095 if (RSTRING_PTR(str)[0] == '#') {
13096 rb_str_cat2(str, ", ");
13097 }
13098 else {
13099 rb_str_cat2(str, ": ");
13100 RSTRING_PTR(str)[0] = '#';
13101 }
13102 wmap_inspect_append(objspace, str, k);
13103 rb_str_cat2(str, " => ");
13104 wmap_inspect_append(objspace, str, v);
13105
13106 return ST_CONTINUE;
13107}
13108
13109static VALUE
13110wmap_inspect(VALUE self)
13111{
13112 VALUE str;
13113 VALUE c = rb_class_name(CLASS_OF(self));
13114 struct weakmap *w;
13115 struct wmap_iter_arg args;
13116
13117 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13118 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
13119 if (w->wmap2obj) {
13120 args.objspace = &rb_objspace;
13121 args.value = str;
13122 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
13123 }
13124 RSTRING_PTR(str)[0] = '#';
13125 rb_str_cat2(str, ">");
13126 return str;
13127}
13128
13129static inline bool
13130wmap_live_entry_p(rb_objspace_t *objspace, st_data_t key, st_data_t val)
13131{
13132 return wmap_live_p(objspace, (VALUE)key) && wmap_live_p(objspace, (VALUE)val);
13133}
13134
13135static int
13136wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
13137{
13138 rb_objspace_t *objspace = (rb_objspace_t *)arg;
13139
13140 if (wmap_live_entry_p(objspace, key, val)) {
13141 rb_yield_values(2, (VALUE)key, (VALUE)val);
13142 return ST_CONTINUE;
13143 }
13144 else {
13145 return ST_DELETE;
13146 }
13147}
13148
13149/* Iterates over keys and objects in a weakly referenced object */
13150static VALUE
13151wmap_each(VALUE self)
13152{
13153 struct weakmap *w;
13154 rb_objspace_t *objspace = &rb_objspace;
13155
13156 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13157 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
13158 return self;
13159}
13160
13161static int
13162wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
13163{
13164 rb_objspace_t *objspace = (rb_objspace_t *)arg;
13165
13166 if (wmap_live_entry_p(objspace, key, val)) {
13167 rb_yield((VALUE)key);
13168 return ST_CONTINUE;
13169 }
13170 else {
13171 return ST_DELETE;
13172 }
13173}
13174
13175/* Iterates over keys and objects in a weakly referenced object */
13176static VALUE
13177wmap_each_key(VALUE self)
13178{
13179 struct weakmap *w;
13180 rb_objspace_t *objspace = &rb_objspace;
13181
13182 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13183 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
13184 return self;
13185}
13186
13187static int
13188wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
13189{
13190 rb_objspace_t *objspace = (rb_objspace_t *)arg;
13191
13192 if (wmap_live_entry_p(objspace, key, val)) {
13193 rb_yield((VALUE)val);
13194 return ST_CONTINUE;
13195 }
13196 else {
13197 return ST_DELETE;
13198 }
13199}
13200
13201/* Iterates over keys and objects in a weakly referenced object */
13202static VALUE
13203wmap_each_value(VALUE self)
13204{
13205 struct weakmap *w;
13206 rb_objspace_t *objspace = &rb_objspace;
13207
13208 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13209 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
13210 return self;
13211}
13212
13213static int
13214wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
13215{
13216 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
13217 rb_objspace_t *objspace = argp->objspace;
13218 VALUE ary = argp->value;
13219
13220 if (wmap_live_entry_p(objspace, key, val)) {
13221 rb_ary_push(ary, (VALUE)key);
13222 return ST_CONTINUE;
13223 }
13224 else {
13225 return ST_DELETE;
13226 }
13227}
13228
13229/* Iterates over keys and objects in a weakly referenced object */
13230static VALUE
13231wmap_keys(VALUE self)
13232{
13233 struct weakmap *w;
13234 struct wmap_iter_arg args;
13235
13236 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13237 args.objspace = &rb_objspace;
13238 args.value = rb_ary_new();
13239 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
13240 return args.value;
13241}
13242
13243static int
13244wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
13245{
13246 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
13247 rb_objspace_t *objspace = argp->objspace;
13248 VALUE ary = argp->value;
13249
13250 if (wmap_live_entry_p(objspace, key, val)) {
13251 rb_ary_push(ary, (VALUE)val);
13252 return ST_CONTINUE;
13253 }
13254 else {
13255 return ST_DELETE;
13256 }
13257}
13258
13259/* Iterates over values and objects in a weakly referenced object */
13260static VALUE
13261wmap_values(VALUE self)
13262{
13263 struct weakmap *w;
13264 struct wmap_iter_arg args;
13265
13266 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13267 args.objspace = &rb_objspace;
13268 args.value = rb_ary_new();
13269 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
13270 return args.value;
13271}
13272
13273static int
13274wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
13275{
13276 VALUE size, *ptr, *optr;
13277 if (existing) {
13278 size = (ptr = optr = (VALUE *)*val)[0];
13279
13280 for (VALUE index = 1; index <= size; index++) {
13281 if (ptr[index] == (VALUE)arg) {
13282 // The reference was already registered.
13283 return ST_STOP;
13284 }
13285 }
13286
13287 ++size;
13288 SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
13289 }
13290 else {
13291 optr = 0;
13292 size = 1;
13293 ptr = ruby_xmalloc0(2 * sizeof(VALUE));
13294 }
13295 ptr[0] = size;
13296 ptr[size] = (VALUE)arg;
13297 if (ptr == optr) return ST_STOP;
13298 *val = (st_data_t)ptr;
13299 return ST_CONTINUE;
13300}
13301
13303 VALUE new_value;
13304 VALUE old_value;
13305};
13306
13307static int
13308wmap_aset_replace_value(st_data_t *key, st_data_t *val, st_data_t _args, int existing)
13309{
13310 struct wmap_aset_replace_args *args = (struct wmap_aset_replace_args *)_args;
13311
13312 if (existing) {
13313 args->old_value = *val;
13314 }
13315 *val = (st_data_t)args->new_value;
13316 return ST_CONTINUE;
13317}
13318
13319/* Creates a weak reference from the given key to the given value */
13320static VALUE
13321wmap_aset(VALUE self, VALUE key, VALUE value)
13322{
13323 struct weakmap *w;
13324
13325 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13326 if (FL_ABLE(value)) {
13327 define_final0(value, w->final);
13328 }
13329 if (FL_ABLE(key)) {
13330 define_final0(key, w->final);
13331 }
13332
13333 struct wmap_aset_replace_args aset_args = {
13334 .new_value = value,
13335 .old_value = Qundef,
13336 };
13337 st_update(w->wmap2obj, (st_data_t)key, wmap_aset_replace_value, (st_data_t)&aset_args);
13338
13339 // If the value is unchanged, we have nothing to do.
13340 if (value != aset_args.old_value) {
13341 if (!UNDEF_P(aset_args.old_value) && FL_ABLE(aset_args.old_value)) {
13342 // That key existed and had an inverse reference, we need to clear the outdated inverse reference.
13343 st_update(w->obj2wmap, (st_data_t)aset_args.old_value, wmap_remove_inverse_ref, key);
13344 }
13345
13346 if (FL_ABLE(value)) {
13347 // If the value has no finalizer, we don't need to keep the inverse reference
13348 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
13349 }
13350 }
13351
13352 return nonspecial_obj_id(value);
13353}
13354
13355/* Retrieves a weakly referenced object with the given key */
13356static VALUE
13357wmap_lookup(VALUE self, VALUE key)
13358{
13359 st_data_t data;
13360 VALUE obj;
13361 struct weakmap *w;
13362 rb_objspace_t *objspace = &rb_objspace;
13363 GC_ASSERT(wmap_live_p(objspace, key));
13364
13365 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13366 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
13367 obj = (VALUE)data;
13368 if (!wmap_live_p(objspace, obj)) return Qundef;
13369 return obj;
13370}
13371
13372/* Retrieves a weakly referenced object with the given key */
13373static VALUE
13374wmap_aref(VALUE self, VALUE key)
13375{
13376 VALUE obj = wmap_lookup(self, key);
13377 return !UNDEF_P(obj) ? obj : Qnil;
13378}
13379
13380/* Returns +true+ if +key+ is registered */
13381static VALUE
13382wmap_has_key(VALUE self, VALUE key)
13383{
13384 return RBOOL(!UNDEF_P(wmap_lookup(self, key)));
13385}
13386
13387/* Returns the number of referenced objects */
13388static VALUE
13389wmap_size(VALUE self)
13390{
13391 struct weakmap *w;
13392 st_index_t n;
13393
13394 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13395 n = w->wmap2obj->num_entries;
13396#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
13397 return ULONG2NUM(n);
13398#else
13399 return ULL2NUM(n);
13400#endif
13401}
13402
13403/*
13404 ------------------------------ GC profiler ------------------------------
13405*/
13406
13407#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13408
13409static bool
13410current_process_time(struct timespec *ts)
13411{
13412#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13413 {
13414 static int try_clock_gettime = 1;
13415 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13416 return true;
13417 }
13418 else {
13419 try_clock_gettime = 0;
13420 }
13421 }
13422#endif
13423
13424#ifdef RUSAGE_SELF
13425 {
13426 struct rusage usage;
13427 struct timeval time;
13428 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13429 time = usage.ru_utime;
13430 ts->tv_sec = time.tv_sec;
13431 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13432 return true;
13433 }
13434 }
13435#endif
13436
13437#ifdef _WIN32
13438 {
13439 FILETIME creation_time, exit_time, kernel_time, user_time;
13440 ULARGE_INTEGER ui;
13441
13442 if (GetProcessTimes(GetCurrentProcess(),
13443 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13444 memcpy(&ui, &user_time, sizeof(FILETIME));
13445#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13446 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13447 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13448 return true;
13449 }
13450 }
13451#endif
13452
13453 return false;
13454}
13455
13456static double
13457getrusage_time(void)
13458{
13459 struct timespec ts;
13460 if (current_process_time(&ts)) {
13461 return ts.tv_sec + ts.tv_nsec * 1e-9;
13462 }
13463 else {
13464 return 0.0;
13465 }
13466}
13467
13468
13469static inline void
13470gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
13471{
13472 if (objspace->profile.run) {
13473 size_t index = objspace->profile.next_index;
13474 gc_profile_record *record;
13475
13476 /* create new record */
13477 objspace->profile.next_index++;
13478
13479 if (!objspace->profile.records) {
13480 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13481 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13482 }
13483 if (index >= objspace->profile.size) {
13484 void *ptr;
13485 objspace->profile.size += 1000;
13486 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13487 if (!ptr) rb_memerror();
13488 objspace->profile.records = ptr;
13489 }
13490 if (!objspace->profile.records) {
13491 rb_bug("gc_profile malloc or realloc miss");
13492 }
13493 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13494 MEMZERO(record, gc_profile_record, 1);
13495
13496 /* setup before-GC parameter */
13497 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13498#if MALLOC_ALLOCATED_SIZE
13499 record->allocated_size = malloc_allocated_size;
13500#endif
13501#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13502#ifdef RUSAGE_SELF
13503 {
13504 struct rusage usage;
13505 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13506 record->maxrss = usage.ru_maxrss;
13507 record->minflt = usage.ru_minflt;
13508 record->majflt = usage.ru_majflt;
13509 }
13510 }
13511#endif
13512#endif
13513 }
13514}
13515
13516static inline void
13517gc_prof_timer_start(rb_objspace_t *objspace)
13518{
13519 if (gc_prof_enabled(objspace)) {
13520 gc_profile_record *record = gc_prof_record(objspace);
13521#if GC_PROFILE_MORE_DETAIL
13522 record->prepare_time = objspace->profile.prepare_time;
13523#endif
13524 record->gc_time = 0;
13525 record->gc_invoke_time = getrusage_time();
13526 }
13527}
13528
13529static double
13530elapsed_time_from(double time)
13531{
13532 double now = getrusage_time();
13533 if (now > time) {
13534 return now - time;
13535 }
13536 else {
13537 return 0;
13538 }
13539}
13540
13541static inline void
13542gc_prof_timer_stop(rb_objspace_t *objspace)
13543{
13544 if (gc_prof_enabled(objspace)) {
13545 gc_profile_record *record = gc_prof_record(objspace);
13546 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13547 record->gc_invoke_time -= objspace->profile.invoke_time;
13548 }
13549}
13550
13551#define RUBY_DTRACE_GC_HOOK(name) \
13552 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13553static inline void
13554gc_prof_mark_timer_start(rb_objspace_t *objspace)
13555{
13556 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13557#if GC_PROFILE_MORE_DETAIL
13558 if (gc_prof_enabled(objspace)) {
13559 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13560 }
13561#endif
13562}
13563
13564static inline void
13565gc_prof_mark_timer_stop(rb_objspace_t *objspace)
13566{
13567 RUBY_DTRACE_GC_HOOK(MARK_END);
13568#if GC_PROFILE_MORE_DETAIL
13569 if (gc_prof_enabled(objspace)) {
13570 gc_profile_record *record = gc_prof_record(objspace);
13571 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13572 }
13573#endif
13574}
13575
13576static inline void
13577gc_prof_sweep_timer_start(rb_objspace_t *objspace)
13578{
13579 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13580 if (gc_prof_enabled(objspace)) {
13581 gc_profile_record *record = gc_prof_record(objspace);
13582
13583 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13584 objspace->profile.gc_sweep_start_time = getrusage_time();
13585 }
13586 }
13587}
13588
13589static inline void
13590gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
13591{
13592 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13593
13594 if (gc_prof_enabled(objspace)) {
13595 double sweep_time;
13596 gc_profile_record *record = gc_prof_record(objspace);
13597
13598 if (record->gc_time > 0) {
13599 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13600 /* need to accumulate GC time for lazy sweep after gc() */
13601 record->gc_time += sweep_time;
13602 }
13603 else if (GC_PROFILE_MORE_DETAIL) {
13604 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13605 }
13606
13607#if GC_PROFILE_MORE_DETAIL
13608 record->gc_sweep_time += sweep_time;
13609 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13610#endif
13611 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13612 }
13613}
13614
13615static inline void
13616gc_prof_set_malloc_info(rb_objspace_t *objspace)
13617{
13618#if GC_PROFILE_MORE_DETAIL
13619 if (gc_prof_enabled(objspace)) {
13620 gc_profile_record *record = gc_prof_record(objspace);
13621 record->allocate_increase = malloc_increase;
13622 record->allocate_limit = malloc_limit;
13623 }
13624#endif
13625}
13626
13627static inline void
13628gc_prof_set_heap_info(rb_objspace_t *objspace)
13629{
13630 if (gc_prof_enabled(objspace)) {
13631 gc_profile_record *record = gc_prof_record(objspace);
13632 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
13633 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13634
13635#if GC_PROFILE_MORE_DETAIL
13636 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13637 record->heap_live_objects = live;
13638 record->heap_free_objects = total - live;
13639#endif
13640
13641 record->heap_total_objects = total;
13642 record->heap_use_size = live * sizeof(RVALUE);
13643 record->heap_total_size = total * sizeof(RVALUE);
13644 }
13645}
13646
13647/*
13648 * call-seq:
13649 * GC::Profiler.clear -> nil
13650 *
13651 * Clears the \GC profiler data.
13652 *
13653 */
13654
13655static VALUE
13656gc_profile_clear(VALUE _)
13657{
13658 rb_objspace_t *objspace = &rb_objspace;
13659 void *p = objspace->profile.records;
13660 objspace->profile.records = NULL;
13661 objspace->profile.size = 0;
13662 objspace->profile.next_index = 0;
13663 objspace->profile.current_record = 0;
13664 if (p) {
13665 free(p);
13666 }
13667 return Qnil;
13668}
13669
13670/*
13671 * call-seq:
13672 * GC::Profiler.raw_data -> [Hash, ...]
13673 *
13674 * Returns an Array of individual raw profile data Hashes ordered
13675 * from earliest to latest by +:GC_INVOKE_TIME+.
13676 *
13677 * For example:
13678 *
13679 * [
13680 * {
13681 * :GC_TIME=>1.3000000000000858e-05,
13682 * :GC_INVOKE_TIME=>0.010634999999999999,
13683 * :HEAP_USE_SIZE=>289640,
13684 * :HEAP_TOTAL_SIZE=>588960,
13685 * :HEAP_TOTAL_OBJECTS=>14724,
13686 * :GC_IS_MARKED=>false
13687 * },
13688 * # ...
13689 * ]
13690 *
13691 * The keys mean:
13692 *
13693 * +:GC_TIME+::
13694 * Time elapsed in seconds for this GC run
13695 * +:GC_INVOKE_TIME+::
13696 * Time elapsed in seconds from startup to when the GC was invoked
13697 * +:HEAP_USE_SIZE+::
13698 * Total bytes of heap used
13699 * +:HEAP_TOTAL_SIZE+::
13700 * Total size of heap in bytes
13701 * +:HEAP_TOTAL_OBJECTS+::
13702 * Total number of objects
13703 * +:GC_IS_MARKED+::
13704 * Returns +true+ if the GC is in mark phase
13705 *
13706 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
13707 * to the following hash keys:
13708 *
13709 * +:GC_MARK_TIME+::
13710 * +:GC_SWEEP_TIME+::
13711 * +:ALLOCATE_INCREASE+::
13712 * +:ALLOCATE_LIMIT+::
13713 * +:HEAP_USE_PAGES+::
13714 * +:HEAP_LIVE_OBJECTS+::
13715 * +:HEAP_FREE_OBJECTS+::
13716 * +:HAVE_FINALIZE+::
13717 *
13718 */
13719
13720static VALUE
13721gc_profile_record_get(VALUE _)
13722{
13723 VALUE prof;
13724 VALUE gc_profile = rb_ary_new();
13725 size_t i;
13726 rb_objspace_t *objspace = (&rb_objspace);
13727
13728 if (!objspace->profile.run) {
13729 return Qnil;
13730 }
13731
13732 for (i =0; i < objspace->profile.next_index; i++) {
13733 gc_profile_record *record = &objspace->profile.records[i];
13734
13735 prof = rb_hash_new();
13736 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
13737 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
13738 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
13739 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
13740 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
13741 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
13742 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
13743 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
13744#if GC_PROFILE_MORE_DETAIL
13745 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
13746 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
13747 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
13748 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
13749 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
13750 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
13751 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
13752
13753 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
13754 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
13755
13756 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13757#endif
13758
13759#if RGENGC_PROFILE > 0
13760 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
13761 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
13762 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
13763#endif
13764 rb_ary_push(gc_profile, prof);
13765 }
13766
13767 return gc_profile;
13768}
13769
13770#if GC_PROFILE_MORE_DETAIL
13771#define MAJOR_REASON_MAX 0x10
13772
13773static char *
13774gc_profile_dump_major_reason(unsigned int flags, char *buff)
13775{
13776 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13777 int i = 0;
13778
13779 if (reason == GPR_FLAG_NONE) {
13780 buff[0] = '-';
13781 buff[1] = 0;
13782 }
13783 else {
13784#define C(x, s) \
13785 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13786 buff[i++] = #x[0]; \
13787 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13788 buff[i] = 0; \
13789 }
13790 C(NOFREE, N);
13791 C(OLDGEN, O);
13792 C(SHADY, S);
13793#if RGENGC_ESTIMATE_OLDMALLOC
13794 C(OLDMALLOC, M);
13795#endif
13796#undef C
13797 }
13798 return buff;
13799}
13800#endif
13801
13802static void
13803gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
13804{
13805 rb_objspace_t *objspace = &rb_objspace;
13806 size_t count = objspace->profile.next_index;
13807#ifdef MAJOR_REASON_MAX
13808 char reason_str[MAJOR_REASON_MAX];
13809#endif
13810
13811 if (objspace->profile.run && count /* > 1 */) {
13812 size_t i;
13813 const gc_profile_record *record;
13814
13815 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
13816 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13817
13818 for (i = 0; i < count; i++) {
13819 record = &objspace->profile.records[i];
13820 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
13821 i+1, record->gc_invoke_time, record->heap_use_size,
13822 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13823 }
13824
13825#if GC_PROFILE_MORE_DETAIL
13826 const char *str = "\n\n" \
13827 "More detail.\n" \
13828 "Prepare Time = Previously GC's rest sweep time\n"
13829 "Index Flags Allocate Inc. Allocate Limit"
13830#if CALC_EXACT_MALLOC_SIZE
13831 " Allocated Size"
13832#endif
13833 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13834#if RGENGC_PROFILE
13835 " OldgenObj RemNormObj RemShadObj"
13836#endif
13837#if GC_PROFILE_DETAIL_MEMORY
13838 " MaxRSS(KB) MinorFLT MajorFLT"
13839#endif
13840 "\n";
13841 append(out, rb_str_new_cstr(str));
13842
13843 for (i = 0; i < count; i++) {
13844 record = &objspace->profile.records[i];
13845 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
13846#if CALC_EXACT_MALLOC_SIZE
13847 " %15"PRIuSIZE
13848#endif
13849 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13850#if RGENGC_PROFILE
13851 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13852#endif
13853#if GC_PROFILE_DETAIL_MEMORY
13854 "%11ld %8ld %8ld"
13855#endif
13856
13857 "\n",
13858 i+1,
13859 gc_profile_dump_major_reason(record->flags, reason_str),
13860 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
13861 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
13862 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
13863 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
13864 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
13865 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
13866 record->allocate_increase, record->allocate_limit,
13867#if CALC_EXACT_MALLOC_SIZE
13868 record->allocated_size,
13869#endif
13870 record->heap_use_pages,
13871 record->gc_mark_time*1000,
13872 record->gc_sweep_time*1000,
13873 record->prepare_time*1000,
13874
13875 record->heap_live_objects,
13876 record->heap_free_objects,
13877 record->removing_objects,
13878 record->empty_objects
13879#if RGENGC_PROFILE
13880 ,
13881 record->old_objects,
13882 record->remembered_normal_objects,
13883 record->remembered_shady_objects
13884#endif
13885#if GC_PROFILE_DETAIL_MEMORY
13886 ,
13887 record->maxrss / 1024,
13888 record->minflt,
13889 record->majflt
13890#endif
13891
13892 ));
13893 }
13894#endif
13895 }
13896}
13897
13898/*
13899 * call-seq:
13900 * GC::Profiler.result -> String
13901 *
13902 * Returns a profile data report such as:
13903 *
13904 * GC 1 invokes.
13905 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
13906 * 1 0.012 159240 212940 10647 0.00000000000001530000
13907 */
13908
13909static VALUE
13910gc_profile_result(VALUE _)
13911{
13912 VALUE str = rb_str_buf_new(0);
13913 gc_profile_dump_on(str, rb_str_buf_append);
13914 return str;
13915}
13916
13917/*
13918 * call-seq:
13919 * GC::Profiler.report
13920 * GC::Profiler.report(io)
13921 *
13922 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
13923 *
13924 */
13925
13926static VALUE
13927gc_profile_report(int argc, VALUE *argv, VALUE self)
13928{
13929 VALUE out;
13930
13931 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
13932 gc_profile_dump_on(out, rb_io_write);
13933
13934 return Qnil;
13935}
13936
13937/*
13938 * call-seq:
13939 * GC::Profiler.total_time -> float
13940 *
13941 * The total time used for garbage collection in seconds
13942 */
13943
13944static VALUE
13945gc_profile_total_time(VALUE self)
13946{
13947 double time = 0;
13948 rb_objspace_t *objspace = &rb_objspace;
13949
13950 if (objspace->profile.run && objspace->profile.next_index > 0) {
13951 size_t i;
13952 size_t count = objspace->profile.next_index;
13953
13954 for (i = 0; i < count; i++) {
13955 time += objspace->profile.records[i].gc_time;
13956 }
13957 }
13958 return DBL2NUM(time);
13959}
13960
13961/*
13962 * call-seq:
13963 * GC::Profiler.enabled? -> true or false
13964 *
13965 * The current status of \GC profile mode.
13966 */
13967
13968static VALUE
13969gc_profile_enable_get(VALUE self)
13970{
13971 rb_objspace_t *objspace = &rb_objspace;
13972 return RBOOL(objspace->profile.run);
13973}
13974
13975/*
13976 * call-seq:
13977 * GC::Profiler.enable -> nil
13978 *
13979 * Starts the \GC profiler.
13980 *
13981 */
13982
13983static VALUE
13984gc_profile_enable(VALUE _)
13985{
13986 rb_objspace_t *objspace = &rb_objspace;
13987 objspace->profile.run = TRUE;
13988 objspace->profile.current_record = 0;
13989 return Qnil;
13990}
13991
13992/*
13993 * call-seq:
13994 * GC::Profiler.disable -> nil
13995 *
13996 * Stops the \GC profiler.
13997 *
13998 */
13999
14000static VALUE
14001gc_profile_disable(VALUE _)
14002{
14003 rb_objspace_t *objspace = &rb_objspace;
14004
14005 objspace->profile.run = FALSE;
14006 objspace->profile.current_record = 0;
14007 return Qnil;
14008}
14009
14010/*
14011 ------------------------------ DEBUG ------------------------------
14012*/
14013
14014static const char *
14015type_name(int type, VALUE obj)
14016{
14017 switch (type) {
14018#define TYPE_NAME(t) case (t): return #t;
14019 TYPE_NAME(T_NONE);
14020 TYPE_NAME(T_OBJECT);
14021 TYPE_NAME(T_CLASS);
14022 TYPE_NAME(T_MODULE);
14023 TYPE_NAME(T_FLOAT);
14024 TYPE_NAME(T_STRING);
14025 TYPE_NAME(T_REGEXP);
14026 TYPE_NAME(T_ARRAY);
14027 TYPE_NAME(T_HASH);
14028 TYPE_NAME(T_STRUCT);
14029 TYPE_NAME(T_BIGNUM);
14030 TYPE_NAME(T_FILE);
14031 TYPE_NAME(T_MATCH);
14032 TYPE_NAME(T_COMPLEX);
14033 TYPE_NAME(T_RATIONAL);
14034 TYPE_NAME(T_NIL);
14035 TYPE_NAME(T_TRUE);
14036 TYPE_NAME(T_FALSE);
14037 TYPE_NAME(T_SYMBOL);
14038 TYPE_NAME(T_FIXNUM);
14039 TYPE_NAME(T_UNDEF);
14040 TYPE_NAME(T_IMEMO);
14041 TYPE_NAME(T_ICLASS);
14042 TYPE_NAME(T_MOVED);
14043 TYPE_NAME(T_ZOMBIE);
14044 case T_DATA:
14045 if (obj && rb_objspace_data_type_name(obj)) {
14046 return rb_objspace_data_type_name(obj);
14047 }
14048 return "T_DATA";
14049#undef TYPE_NAME
14050 }
14051 return "unknown";
14052}
14053
14054static const char *
14055obj_type_name(VALUE obj)
14056{
14057 return type_name(TYPE(obj), obj);
14058}
14059
14060const char *
14061rb_method_type_name(rb_method_type_t type)
14062{
14063 switch (type) {
14064 case VM_METHOD_TYPE_ISEQ: return "iseq";
14065 case VM_METHOD_TYPE_ATTRSET: return "attrest";
14066 case VM_METHOD_TYPE_IVAR: return "ivar";
14067 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
14068 case VM_METHOD_TYPE_ALIAS: return "alias";
14069 case VM_METHOD_TYPE_REFINED: return "refined";
14070 case VM_METHOD_TYPE_CFUNC: return "cfunc";
14071 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
14072 case VM_METHOD_TYPE_MISSING: return "missing";
14073 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
14074 case VM_METHOD_TYPE_UNDEF: return "undef";
14075 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
14076 }
14077 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
14078}
14079
14080static void
14081rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
14082{
14083 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
14084 VALUE path = rb_iseq_path(iseq);
14085 int n = ISEQ_BODY(iseq)->location.first_lineno;
14086 snprintf(buff, buff_size, " %s@%s:%d",
14087 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
14088 RSTRING_PTR(path), n);
14089 }
14090}
14091
14092static int
14093str_len_no_raise(VALUE str)
14094{
14095 long len = RSTRING_LEN(str);
14096 if (len < 0) return 0;
14097 if (len > INT_MAX) return INT_MAX;
14098 return (int)len;
14099}
14100
14101#define BUFF_ARGS buff + pos, buff_size - pos
14102#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
14103#define APPEND_S(s) do { \
14104 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
14105 goto end; \
14106 } \
14107 else { \
14108 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
14109 } \
14110 } while (0)
14111#define TF(c) ((c) != 0 ? "true" : "false")
14112#define C(c, s) ((c) != 0 ? (s) : " ")
14113
14114static size_t
14115rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
14116{
14117 size_t pos = 0;
14118
14119 if (SPECIAL_CONST_P(obj)) {
14120 APPEND_F("%s", obj_type_name(obj));
14121
14122 if (FIXNUM_P(obj)) {
14123 APPEND_F(" %ld", FIX2LONG(obj));
14124 }
14125 else if (SYMBOL_P(obj)) {
14126 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
14127 }
14128 }
14129 else {
14130 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
14131
14132 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
14133 APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
14134 (void *)obj, age,
14135 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
14136 C(RVALUE_MARK_BITMAP(obj), "M"),
14137 C(RVALUE_PIN_BITMAP(obj), "P"),
14138 C(RVALUE_MARKING_BITMAP(obj), "R"),
14139 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
14140 C(rb_objspace_garbage_object_p(obj), "G"),
14141 obj_type_name(obj));
14142 }
14143 else {
14144 /* fake */
14145 APPEND_F("%p [%dXXXX] %s",
14146 (void *)obj, age,
14147 obj_type_name(obj));
14148 }
14149
14150 if (internal_object_p(obj)) {
14151 /* ignore */
14152 }
14153 else if (RBASIC(obj)->klass == 0) {
14154 APPEND_S("(temporary internal)");
14155 }
14156 else if (RTEST(RBASIC(obj)->klass)) {
14157 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
14158 if (!NIL_P(class_path)) {
14159 APPEND_F("(%s)", RSTRING_PTR(class_path));
14160 }
14161 }
14162
14163#if GC_DEBUG
14164 APPEND_F("@%s:%d", RANY(obj)->file, RANY(obj)->line);
14165#endif
14166 }
14167 end:
14168
14169 return pos;
14170}
14171
14172static size_t
14173rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
14174{
14175 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
14176 const enum ruby_value_type type = BUILTIN_TYPE(obj);
14177
14178 switch (type) {
14179 case T_NODE:
14180 UNEXPECTED_NODE(rb_raw_obj_info);
14181 break;
14182 case T_ARRAY:
14183 if (ARY_SHARED_P(obj)) {
14184 APPEND_S("shared -> ");
14185 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
14186 }
14187 else if (ARY_EMBED_P(obj)) {
14188 APPEND_F("[%s%s] len: %ld (embed)",
14189 C(ARY_EMBED_P(obj), "E"),
14190 C(ARY_SHARED_P(obj), "S"),
14191 RARRAY_LEN(obj));
14192 }
14193 else {
14194 APPEND_F("[%s%s%s] len: %ld, capa:%ld ptr:%p",
14195 C(ARY_EMBED_P(obj), "E"),
14196 C(ARY_SHARED_P(obj), "S"),
14197 C(RARRAY_TRANSIENT_P(obj), "T"),
14198 RARRAY_LEN(obj),
14199 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
14200 (void *)RARRAY_CONST_PTR_TRANSIENT(obj));
14201 }
14202 break;
14203 case T_STRING: {
14204 if (STR_SHARED_P(obj)) {
14205 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
14206 }
14207 else {
14208 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
14209
14210 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
14211 }
14212 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
14213 break;
14214 }
14215 case T_SYMBOL: {
14216 VALUE fstr = RSYMBOL(obj)->fstr;
14217 ID id = RSYMBOL(obj)->id;
14218 if (RB_TYPE_P(fstr, T_STRING)) {
14219 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
14220 }
14221 else {
14222 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
14223 }
14224 break;
14225 }
14226 case T_MOVED: {
14227 APPEND_F("-> %p", (void*)rb_gc_location(obj));
14228 break;
14229 }
14230 case T_HASH: {
14231 APPEND_F("[%c%c] %"PRIdSIZE,
14232 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
14233 RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
14234 RHASH_SIZE(obj));
14235 break;
14236 }
14237 case T_CLASS:
14238 case T_MODULE:
14239 {
14240 VALUE class_path = rb_class_path_cached(obj);
14241 if (!NIL_P(class_path)) {
14242 APPEND_F("%s", RSTRING_PTR(class_path));
14243 }
14244 else {
14245 APPEND_S("(annon)");
14246 }
14247 break;
14248 }
14249 case T_ICLASS:
14250 {
14251 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
14252 if (!NIL_P(class_path)) {
14253 APPEND_F("src:%s", RSTRING_PTR(class_path));
14254 }
14255 break;
14256 }
14257 case T_OBJECT:
14258 {
14259 uint32_t len = ROBJECT_IV_CAPACITY(obj);
14260
14261 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
14262 APPEND_F("(embed) len:%d", len);
14263 }
14264 else {
14265 VALUE *ptr = ROBJECT_IVPTR(obj);
14266 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
14267 }
14268 }
14269 break;
14270 case T_DATA: {
14271 const struct rb_block *block;
14272 const rb_iseq_t *iseq;
14273 if (rb_obj_is_proc(obj) &&
14274 (block = vm_proc_block(obj)) != NULL &&
14275 (vm_block_type(block) == block_type_iseq) &&
14276 (iseq = vm_block_iseq(block)) != NULL) {
14277 rb_raw_iseq_info(BUFF_ARGS, iseq);
14278 }
14279 else if (rb_ractor_p(obj)) {
14280 rb_ractor_t *r = (void *)DATA_PTR(obj);
14281 if (r) {
14282 APPEND_F("r:%d", r->pub.id);
14283 }
14284 }
14285 else {
14286 const char * const type_name = rb_objspace_data_type_name(obj);
14287 if (type_name) {
14288 APPEND_F("%s", type_name);
14289 }
14290 }
14291 break;
14292 }
14293 case T_IMEMO: {
14294 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
14295
14296 switch (imemo_type(obj)) {
14297 case imemo_ment:
14298 {
14299 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
14300
14301 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
14302 rb_id2name(me->called_id),
14303 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
14304 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
14305 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
14306 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
14307 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
14308 me->def ? rb_method_type_name(me->def->type) : "NULL",
14309 me->def ? me->def->aliased : -1,
14310 (void *)me->owner, // obj_info(me->owner),
14311 (void *)me->defined_class); //obj_info(me->defined_class)));
14312
14313 if (me->def) {
14314 switch (me->def->type) {
14315 case VM_METHOD_TYPE_ISEQ:
14316 APPEND_S(" (iseq:");
14317 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
14318 APPEND_S(")");
14319 break;
14320 default:
14321 break;
14322 }
14323 }
14324
14325 break;
14326 }
14327 case imemo_iseq: {
14328 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
14329 rb_raw_iseq_info(BUFF_ARGS, iseq);
14330 break;
14331 }
14332 case imemo_callinfo:
14333 {
14334 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
14335 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
14336 rb_id2name(vm_ci_mid(ci)),
14337 vm_ci_flag(ci),
14338 vm_ci_argc(ci),
14339 vm_ci_kwarg(ci) ? "available" : "NULL");
14340 break;
14341 }
14342 case imemo_callcache:
14343 {
14344 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
14345 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
14346 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
14347
14348 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
14349 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
14350 cme ? rb_id2name(cme->called_id) : "<NULL>",
14351 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
14352 (void *)cme,
14353 (void *)vm_cc_call(cc));
14354 break;
14355 }
14356 default:
14357 break;
14358 }
14359 }
14360 default:
14361 break;
14362 }
14363 }
14364 end:
14365
14366 return pos;
14367}
14368
14369#undef TF
14370#undef C
14371
14372const char *
14373rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
14374{
14375 asan_unpoisoning_object(obj) {
14376 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14377 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14378 if (pos >= buff_size) {} // truncated
14379 }
14380
14381 return buff;
14382}
14383
14384#undef APPEND_S
14385#undef APPEND_F
14386#undef BUFF_ARGS
14387
14388#if RGENGC_OBJ_INFO
14389#define OBJ_INFO_BUFFERS_NUM 10
14390#define OBJ_INFO_BUFFERS_SIZE 0x100
14391static rb_atomic_t obj_info_buffers_index = 0;
14392static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14393
14394/* Increments *var atomically and resets *var to 0 when maxval is
14395 * reached. Returns the wraparound old *var value (0...maxval). */
14396static rb_atomic_t
14397atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
14398{
14399 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
14400 if (UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
14401 const rb_atomic_t newval = oldval + 1;
14402 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
14403 oldval %= maxval;
14404 }
14405 return oldval;
14406}
14407
14408static const char *
14409obj_info(VALUE obj)
14410{
14411 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14412 char *const buff = obj_info_buffers[index];
14413 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14414}
14415#else
14416static const char *
14417obj_info(VALUE obj)
14418{
14419 return obj_type_name(obj);
14420}
14421#endif
14422
14423MJIT_FUNC_EXPORTED const char *
14424rb_obj_info(VALUE obj)
14425{
14426 return obj_info(obj);
14427}
14428
14429void
14430rb_obj_info_dump(VALUE obj)
14431{
14432 char buff[0x100];
14433 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14434}
14435
14436MJIT_FUNC_EXPORTED void
14437rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
14438{
14439 char buff[0x100];
14440 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14441}
14442
14443#if GC_DEBUG
14444
14445void
14446rb_gcdebug_print_obj_condition(VALUE obj)
14447{
14448 rb_objspace_t *objspace = &rb_objspace;
14449
14450 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14451
14452 if (BUILTIN_TYPE(obj) == T_MOVED) {
14453 fprintf(stderr, "moved?: true\n");
14454 }
14455 else {
14456 fprintf(stderr, "moved?: false\n");
14457 }
14458 if (is_pointer_to_heap(objspace, (void *)obj)) {
14459 fprintf(stderr, "pointer to heap?: true\n");
14460 }
14461 else {
14462 fprintf(stderr, "pointer to heap?: false\n");
14463 return;
14464 }
14465
14466 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
14467 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
14468 fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
14469 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
14470 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
14471 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
14472
14473 if (is_lazy_sweeping(objspace)) {
14474 fprintf(stderr, "lazy sweeping?: true\n");
14475 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
14476 }
14477 else {
14478 fprintf(stderr, "lazy sweeping?: false\n");
14479 }
14480}
14481
14482static VALUE
14483gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
14484{
14485 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
14486 return Qnil;
14487}
14488
14489void
14490rb_gcdebug_sentinel(VALUE obj, const char *name)
14491{
14492 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
14493}
14494
14495#endif /* GC_DEBUG */
14496
14497#if GC_DEBUG_STRESS_TO_CLASS
14498/*
14499 * call-seq:
14500 * GC.add_stress_to_class(class[, ...])
14501 *
14502 * Raises NoMemoryError when allocating an instance of the given classes.
14503 *
14504 */
14505static VALUE
14506rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
14507{
14508 rb_objspace_t *objspace = &rb_objspace;
14509
14510 if (!stress_to_class) {
14511 stress_to_class = rb_ary_hidden_new(argc);
14512 }
14513 rb_ary_cat(stress_to_class, argv, argc);
14514 return self;
14515}
14516
14517/*
14518 * call-seq:
14519 * GC.remove_stress_to_class(class[, ...])
14520 *
14521 * No longer raises NoMemoryError when allocating an instance of the
14522 * given classes.
14523 *
14524 */
14525static VALUE
14526rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
14527{
14528 rb_objspace_t *objspace = &rb_objspace;
14529 int i;
14530
14531 if (stress_to_class) {
14532 for (i = 0; i < argc; ++i) {
14533 rb_ary_delete_same(stress_to_class, argv[i]);
14534 }
14535 if (RARRAY_LEN(stress_to_class) == 0) {
14536 stress_to_class = 0;
14537 }
14538 }
14539 return Qnil;
14540}
14541#endif
14542
14543/*
14544 * Document-module: ObjectSpace
14545 *
14546 * The ObjectSpace module contains a number of routines
14547 * that interact with the garbage collection facility and allow you to
14548 * traverse all living objects with an iterator.
14549 *
14550 * ObjectSpace also provides support for object finalizers, procs that will be
14551 * called when a specific object is about to be destroyed by garbage
14552 * collection. See the documentation for
14553 * <code>ObjectSpace.define_finalizer</code> for important information on
14554 * how to use this method correctly.
14555 *
14556 * a = "A"
14557 * b = "B"
14558 *
14559 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
14560 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
14561 *
14562 * a = nil
14563 * b = nil
14564 *
14565 * _produces:_
14566 *
14567 * Finalizer two on 537763470
14568 * Finalizer one on 537763480
14569 */
14570
14571/*
14572 * Document-class: ObjectSpace::WeakMap
14573 *
14574 * An ObjectSpace::WeakMap object holds references to
14575 * any objects, but those objects can get garbage collected.
14576 *
14577 * This class is mostly used internally by WeakRef, please use
14578 * +lib/weakref.rb+ for the public interface.
14579 */
14580
14581/* Document-class: GC::Profiler
14582 *
14583 * The GC profiler provides access to information on GC runs including time,
14584 * length and object space size.
14585 *
14586 * Example:
14587 *
14588 * GC::Profiler.enable
14589 *
14590 * require 'rdoc/rdoc'
14591 *
14592 * GC::Profiler.report
14593 *
14594 * GC::Profiler.disable
14595 *
14596 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
14597 */
14598
14599#include "gc.rbinc"
14600/*
14601 * call-seq:
14602 * GC.using_rvargc? -> true or false
14603 *
14604 * Returns true if using experimental feature Variable Width Allocation, false
14605 * otherwise.
14606 */
14607static VALUE
14608gc_using_rvargc_p(VALUE mod)
14609{
14610#if USE_RVARGC
14611 return Qtrue;
14612#else
14613 return Qfalse;
14614#endif
14615}
14616
14617void
14618Init_GC(void)
14619{
14620#undef rb_intern
14621 VALUE rb_mObjSpace;
14622 VALUE rb_mProfiler;
14623 VALUE gc_constants;
14624
14625 rb_mGC = rb_define_module("GC");
14626
14627 gc_constants = rb_hash_new();
14628 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
14629 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14630 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
14631 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
14632 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14633 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14634 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
14635 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT));
14636 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14637 OBJ_FREEZE(gc_constants);
14638 /* internal constants */
14639 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
14640
14641 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
14642 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
14643 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
14644 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
14645 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
14646 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
14647 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
14648 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
14649 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
14650
14651 rb_mObjSpace = rb_define_module("ObjectSpace");
14652
14653 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
14654
14655 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
14656 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
14657
14658 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
14659
14660 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
14661
14662 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
14663 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
14664
14665 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
14666
14667 {
14668 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
14669 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
14670 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
14671 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
14672 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
14673 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
14674 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
14675 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
14676 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
14677 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
14678 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
14679 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
14680 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
14681 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
14682 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
14683 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
14684 rb_include_module(rb_cWeakMap, rb_mEnumerable);
14685 }
14686
14687 /* internal methods */
14688 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
14689 rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
14690#if MALLOC_ALLOCATED_SIZE
14691 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
14692 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
14693#endif
14694
14695 rb_define_singleton_method(rb_mGC, "using_rvargc?", gc_using_rvargc_p, 0);
14696
14697 if (GC_COMPACTION_SUPPORTED) {
14698 rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
14699 rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
14700 rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
14701 rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
14702 }
14703 else {
14707 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
14708 /* When !GC_COMPACTION_SUPPORTED, this method is not defined in gc.rb */
14709 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
14710 }
14711
14712#if GC_DEBUG_STRESS_TO_CLASS
14713 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
14714 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
14715#endif
14716
14717 {
14718 VALUE opts;
14719 /* \GC build options */
14720 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
14721#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14722 OPT(GC_DEBUG);
14723 OPT(USE_RGENGC);
14724 OPT(RGENGC_DEBUG);
14725 OPT(RGENGC_CHECK_MODE);
14726 OPT(RGENGC_PROFILE);
14727 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14728 OPT(GC_PROFILE_MORE_DETAIL);
14729 OPT(GC_ENABLE_LAZY_SWEEP);
14730 OPT(CALC_EXACT_MALLOC_SIZE);
14731 OPT(MALLOC_ALLOCATED_SIZE);
14732 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14733 OPT(GC_PROFILE_DETAIL_MEMORY);
14734 OPT(GC_COMPACTION_SUPPORTED);
14735#undef OPT
14736 OBJ_FREEZE(opts);
14737 }
14738}
14739
14740#ifdef ruby_xmalloc
14741#undef ruby_xmalloc
14742#endif
14743#ifdef ruby_xmalloc2
14744#undef ruby_xmalloc2
14745#endif
14746#ifdef ruby_xcalloc
14747#undef ruby_xcalloc
14748#endif
14749#ifdef ruby_xrealloc
14750#undef ruby_xrealloc
14751#endif
14752#ifdef ruby_xrealloc2
14753#undef ruby_xrealloc2
14754#endif
14755
14756void *
14757ruby_xmalloc(size_t size)
14758{
14759#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14760 ruby_malloc_info_file = __FILE__;
14761 ruby_malloc_info_line = __LINE__;
14762#endif
14763 return ruby_xmalloc_body(size);
14764}
14765
14766void *
14767ruby_xmalloc2(size_t n, size_t size)
14768{
14769#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14770 ruby_malloc_info_file = __FILE__;
14771 ruby_malloc_info_line = __LINE__;
14772#endif
14773 return ruby_xmalloc2_body(n, size);
14774}
14775
14776void *
14777ruby_xcalloc(size_t n, size_t size)
14778{
14779#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14780 ruby_malloc_info_file = __FILE__;
14781 ruby_malloc_info_line = __LINE__;
14782#endif
14783 return ruby_xcalloc_body(n, size);
14784}
14785
14786void *
14787ruby_xrealloc(void *ptr, size_t new_size)
14788{
14789#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14790 ruby_malloc_info_file = __FILE__;
14791 ruby_malloc_info_line = __LINE__;
14792#endif
14793 return ruby_xrealloc_body(ptr, new_size);
14794}
14795
14796void *
14797ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
14798{
14799#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14800 ruby_malloc_info_file = __FILE__;
14801 ruby_malloc_info_line = __LINE__;
14802#endif
14803 return ruby_xrealloc2_body(ptr, n, new_size);
14804}
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:177
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:321
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:138
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:91
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
Definition stdalign.h:28
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
Definition vm_trace.c:1703
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition defines.h:91
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition defines.h:89
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition event.h:93
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition event.h:92
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition event.h:91
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:95
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:89
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:90
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:88
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_TEST().
Definition fl_type.h:501
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_SET().
Definition fl_type.h:638
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:207
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
Definition class.c:1125
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:955
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1033
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition class.c:1057
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2574
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:107
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:67
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define rb_str_cat2
Old name of rb_str_cat_cstr.
Definition string.h:1683
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:143
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition fl_type.h:66
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
Definition fl_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:62
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:130
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:140
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:137
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:652
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
Definition value_type.h:86
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
Definition rgengc.h:237
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:139
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
Definition fl_type.h:61
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:141
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:70
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:6460
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:6500
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition error.c:3150
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1102
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1095
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:459
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1091
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition error.c:411
VALUE rb_eArgError
ArgumentError exception.
Definition error.c:1092
VALUE rb_mKernel
Kernel module.
Definition object.c:51
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
Definition object.c:589
VALUE rb_mEnumerable
Enumerable module.
Definition enum.c:27
VALUE rb_mGC
GC module.
Definition gc.c:1209
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:190
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:600
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:50
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:122
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:787
VALUE rb_stdout
STDOUT constant.
Definition io.c:194
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3026
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
#define rb_check_frozen
Just another name of rb_check_frozen.
Definition error.h:264
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
Definition io.c:2265
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:175
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1571
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3353
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:871
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3319
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition string.c:1532
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:197
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
Definition variable.c:310
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1065
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1142
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:664
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1148
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:367
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2789
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition symbol.c:789
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition symbol.c:942
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition symbol.c:959
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3440
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1761
#define strtod(s, e)
Just another name of ruby_strtod.
Definition util.h:212
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
Definition util.h:176
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition sprintf.c:1219
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition sprintf.c:1242
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
Definition vm_eval.c:1369
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1357
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:378
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
Definition pid_t.h:38
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition rarray.h:70
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:56
static bool RARRAY_TRANSIENT_P(VALUE ary)
Queries if the array is a transient array.
Definition rarray.h:364
#define RARRAY_AREF(a, i)
Definition rarray.h:583
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:63
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:82
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:108
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define USE_RGENGC
Definition rgengc.h:44
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition rgengc.h:118
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:82
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:92
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:162
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
Definition rstring.h:484
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition rstring.h:498
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:540
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:507
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:489
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:563
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:325
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5427
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
MEMO.
Definition imemo.h:104
Ruby's array.
Definition rarray.h:176
Ruby's object's, base components.
Definition rbasic.h:64
const VALUE klass
Class of an object.
Definition rbasic.h:88
VALUE flags
Per-object flags.
Definition rbasic.h:77
Definition class.h:66
Internal header for Complex.
Definition complex.h:13
Definition rdata.h:124
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:138
Ruby's File and IO.
Definition rfile.h:35
struct rb_io_t * fptr
IO's specific fields.
Definition rfile.h:41
Definition hash.h:43
Regular expression execution context.
Definition rmatch.h:94
VALUE regexp
The expression of this match.
Definition rmatch.h:112
VALUE str
The target string that the match was made against.
Definition rmatch.h:102
Definition gc.c:573
Ruby's ordinal objects.
Definition robject.h:94
VALUE ary[ROBJECT_EMBED_LEN_MAX]
Embedded instance variables.
Definition robject.h:136
Internal header for Rational.
Definition rational.h:17
Ruby's regular expression.
Definition rregexp.h:60
const VALUE src
Source code of this expression.
Definition rregexp.h:74
Ruby's String.
Definition rstring.h:231
union RString::@50 as
String's specific fields.
struct RString::@50::@51 heap
Strings that use separated memory region for contents use this pattern.
union RString::@50::@51::@53 aux
Auxiliary info.
VALUE shared
Parent of the string.
Definition rstring.h:276
"Typed" user data.
Definition rtypeddata.h:340
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
Definition rtypeddata.h:350
Definition gc.c:582
Definition gc.c:1195
Definition gc.c:667
Definition vm_core.h:247
Definition method.h:62
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:44
Definition class.h:32
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:190
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:241
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:197
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:211
VALUE ecopts
Flags as Ruby hash.
Definition io.h:134
Ruby's IO, metadata and buffers.
Definition io.h:138
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
Definition io.h:200
VALUE pathv
pathname for file
Definition io.h:159
struct rb_io_enc_t encs
Decomposed encoding flags.
Definition io.h:180
VALUE write_lock
This is a Ruby level mutex.
Definition io.h:224
VALUE self
The IO's Ruby level counterpart.
Definition io.h:141
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
Definition io.h:215
VALUE tied_io_for_writing
Duplex IO object, if set.
Definition io.h:178
VALUE timeout
The timeout associated with this IO when performing blocking operations.
Definition io.h:229
Definition method.h:54
rb_cref_t * cref
class reference, should be marked
Definition method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Internal header for Class.
Definition class.h:26
Represents the region of a capture group.
Definition rmatch.h:65
Represents a match.
Definition rmatch.h:71
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
Definition rmatch.h:82
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:53
THROW_DATA.
Definition imemo.h:62
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:181
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375
ruby_value_type
C-level type of an object.
Definition value_type.h:112
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:144