14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
17#include "ruby/internal/config.h"
24#define sighandler_t ruby_sighandler_t
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
41#ifndef HAVE_MALLOC_USABLE_SIZE
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
54# elif defined(HAVE_MALLOC_H)
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
63#ifdef HAVE_MALLOC_TRIM
68# include <emscripten/emmalloc.h>
72#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
82#ifdef HAVE_SYS_RESOURCE_H
83# include <sys/resource.h>
86#if defined _WIN32 || defined __CYGWIN__
88#elif defined(HAVE_POSIX_MEMALIGN)
89#elif defined(HAVE_MEMALIGN)
96#include <emscripten.h>
99#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
100# include <mach/task.h>
101# include <mach/mach_init.h>
102# include <mach/mach_port.h>
108#include "debug_counter.h"
109#include "eval_intern.h"
112#include "internal/class.h"
113#include "internal/compile.h"
114#include "internal/complex.h"
115#include "internal/cont.h"
116#include "internal/error.h"
117#include "internal/eval.h"
118#include "internal/gc.h"
119#include "internal/hash.h"
120#include "internal/imemo.h"
121#include "internal/io.h"
122#include "internal/numeric.h"
123#include "internal/object.h"
124#include "internal/proc.h"
125#include "internal/rational.h"
126#include "internal/sanitizers.h"
127#include "internal/struct.h"
128#include "internal/symbol.h"
129#include "internal/thread.h"
130#include "internal/variable.h"
131#include "internal/warnings.h"
141#include "ruby_assert.h"
142#include "ruby_atomic.h"
146#include "vm_callinfo.h"
147#include "ractor_core.h"
152#define rb_setjmp(env) RUBY_SETJMP(env)
153#define rb_jmp_buf rb_jmpbuf_t
154#undef rb_data_object_wrap
156#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
157#define MAP_ANONYMOUS MAP_ANON
161static size_t malloc_offset = 0;
162#if defined(HAVE_MALLOC_USABLE_SIZE)
164gc_compute_malloc_offset(
void)
175 for (offset = 0; offset <= 16; offset += 8) {
176 size_t allocated = (64 - offset);
177 void *test_ptr = malloc(allocated);
178 size_t wasted = malloc_usable_size(test_ptr) - allocated;
189gc_compute_malloc_offset(
void)
197rb_malloc_grow_capa(
size_t current,
size_t type_size)
199 size_t current_capacity = current;
200 if (current_capacity < 4) {
201 current_capacity = 4;
203 current_capacity *= type_size;
206 size_t new_capacity = (current_capacity * 2);
209 if (rb_popcount64(new_capacity) != 1) {
210 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
213 new_capacity -= malloc_offset;
214 new_capacity /= type_size;
215 if (current > new_capacity) {
216 rb_bug(
"rb_malloc_grow_capa: current_capacity=%"PRIuSIZE
", new_capacity=%"PRIuSIZE
", malloc_offset=%"PRIuSIZE
"", current, new_capacity, malloc_offset);
222static inline struct rbimpl_size_mul_overflow_tag
223size_add_overflow(size_t x, size_t y)
229#elif __has_builtin(__builtin_add_overflow)
230 p = __builtin_add_overflow(x, y, &z);
232#elif defined(DSIZE_T)
244 return (
struct rbimpl_size_mul_overflow_tag) { p, z, };
247static inline struct rbimpl_size_mul_overflow_tag
248size_mul_add_overflow(size_t x, size_t y, size_t z)
250 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
251 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
252 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
255static inline struct rbimpl_size_mul_overflow_tag
256size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
258 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
259 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
260 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
261 return (
struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
264PRINTF_ARGS(NORETURN(
static void gc_raise(
VALUE,
const char*, ...)), 2, 3);
267size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
269 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
270 if (LIKELY(!t.left)) {
273 else if (rb_during_gc()) {
279 "integer overflow: %"PRIuSIZE
282 x, y, (
size_t)SIZE_MAX);
287rb_size_mul_or_raise(
size_t x,
size_t y,
VALUE exc)
289 return size_mul_or_raise(x, y, exc);
293size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
295 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
296 if (LIKELY(!t.left)) {
299 else if (rb_during_gc()) {
305 "integer overflow: %"PRIuSIZE
309 x, y, z, (
size_t)SIZE_MAX);
314rb_size_mul_add_or_raise(
size_t x,
size_t y,
size_t z,
VALUE exc)
316 return size_mul_add_or_raise(x, y, z, exc);
320size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w,
VALUE exc)
322 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
323 if (LIKELY(!t.left)) {
326 else if (rb_during_gc()) {
332 "integer overflow: %"PRIdSIZE
337 x, y, z, w, (
size_t)SIZE_MAX);
341#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
343volatile VALUE rb_gc_guarded_val;
345rb_gc_guarded_ptr_val(
volatile VALUE *ptr,
VALUE val)
347 rb_gc_guarded_val = val;
353#ifndef GC_HEAP_INIT_SLOTS
354#define GC_HEAP_INIT_SLOTS 10000
356#ifndef GC_HEAP_FREE_SLOTS
357#define GC_HEAP_FREE_SLOTS 4096
359#ifndef GC_HEAP_GROWTH_FACTOR
360#define GC_HEAP_GROWTH_FACTOR 1.8
362#ifndef GC_HEAP_GROWTH_MAX_SLOTS
363#define GC_HEAP_GROWTH_MAX_SLOTS 0
365#ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
366# define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
368#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
369#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
372#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
373#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
375#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
376#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
378#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
379#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
382#ifndef GC_MALLOC_LIMIT_MIN
383#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
385#ifndef GC_MALLOC_LIMIT_MAX
386#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
388#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
389#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
392#ifndef GC_OLDMALLOC_LIMIT_MIN
393#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
395#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
396#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
398#ifndef GC_OLDMALLOC_LIMIT_MAX
399#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
402#ifndef GC_CAN_COMPILE_COMPACTION
404# define GC_CAN_COMPILE_COMPACTION 0
406# define GC_CAN_COMPILE_COMPACTION 1
410#ifndef PRINT_MEASURE_LINE
411#define PRINT_MEASURE_LINE 0
413#ifndef PRINT_ENTER_EXIT_TICK
414#define PRINT_ENTER_EXIT_TICK 0
416#ifndef PRINT_ROOT_TICKS
417#define PRINT_ROOT_TICKS 0
420#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
424 size_t size_pool_init_slots[SIZE_POOL_COUNT];
425 size_t heap_free_slots;
426 double growth_factor;
427 size_t growth_max_slots;
429 double heap_free_slots_min_ratio;
430 double heap_free_slots_goal_ratio;
431 double heap_free_slots_max_ratio;
432 double uncollectible_wb_unprotected_objects_limit_ratio;
433 double oldobject_limit_factor;
435 size_t malloc_limit_min;
436 size_t malloc_limit_max;
437 double malloc_limit_growth_factor;
439 size_t oldmalloc_limit_min;
440 size_t oldmalloc_limit_max;
441 double oldmalloc_limit_growth_factor;
449 GC_HEAP_GROWTH_FACTOR,
450 GC_HEAP_GROWTH_MAX_SLOTS,
452 GC_HEAP_FREE_SLOTS_MIN_RATIO,
453 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
454 GC_HEAP_FREE_SLOTS_MAX_RATIO,
455 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
456 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
460 GC_MALLOC_LIMIT_GROWTH_FACTOR,
462 GC_OLDMALLOC_LIMIT_MIN,
463 GC_OLDMALLOC_LIMIT_MAX,
464 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
485#define RGENGC_DEBUG -1
487#define RGENGC_DEBUG 0
490#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
491# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
492#elif defined(HAVE_VA_ARGS_MACRO)
493# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
495# define RGENGC_DEBUG_ENABLED(level) 0
497int ruby_rgengc_debug;
507#ifndef RGENGC_CHECK_MODE
508#define RGENGC_CHECK_MODE 0
512#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
519#ifndef RGENGC_PROFILE
520#define RGENGC_PROFILE 0
529#ifndef RGENGC_ESTIMATE_OLDMALLOC
530#define RGENGC_ESTIMATE_OLDMALLOC 1
536#ifndef RGENGC_FORCE_MAJOR_GC
537#define RGENGC_FORCE_MAJOR_GC 0
540#ifndef GC_PROFILE_MORE_DETAIL
541#define GC_PROFILE_MORE_DETAIL 0
543#ifndef GC_PROFILE_DETAIL_MEMORY
544#define GC_PROFILE_DETAIL_MEMORY 0
546#ifndef GC_ENABLE_LAZY_SWEEP
547#define GC_ENABLE_LAZY_SWEEP 1
549#ifndef CALC_EXACT_MALLOC_SIZE
550#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
552#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
553#ifndef MALLOC_ALLOCATED_SIZE
554#define MALLOC_ALLOCATED_SIZE 0
557#define MALLOC_ALLOCATED_SIZE 0
559#ifndef MALLOC_ALLOCATED_SIZE_CHECK
560#define MALLOC_ALLOCATED_SIZE_CHECK 0
563#ifndef GC_DEBUG_STRESS_TO_CLASS
564#define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
567#ifndef RGENGC_OBJ_INFO
568#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
572 GPR_FLAG_NONE = 0x000,
574 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
575 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
576 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
577 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
578#if RGENGC_ESTIMATE_OLDMALLOC
579 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
581 GPR_FLAG_MAJOR_MASK = 0x0ff,
584 GPR_FLAG_NEWOBJ = 0x100,
585 GPR_FLAG_MALLOC = 0x200,
586 GPR_FLAG_METHOD = 0x400,
587 GPR_FLAG_CAPI = 0x800,
588 GPR_FLAG_STRESS = 0x1000,
591 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
592 GPR_FLAG_HAVE_FINALIZE = 0x4000,
593 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
594 GPR_FLAG_FULL_MARK = 0x10000,
595 GPR_FLAG_COMPACT = 0x20000,
598 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
599 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
600} gc_profile_record_flag;
606 double gc_invoke_time;
608 size_t heap_total_objects;
609 size_t heap_use_size;
610 size_t heap_total_size;
611 size_t moved_objects;
613#if GC_PROFILE_MORE_DETAIL
615 double gc_sweep_time;
617 size_t heap_use_pages;
618 size_t heap_live_objects;
619 size_t heap_free_objects;
621 size_t allocate_increase;
622 size_t allocate_limit;
625 size_t removing_objects;
626 size_t empty_objects;
627#if GC_PROFILE_DETAIL_MEMORY
633#if MALLOC_ALLOCATED_SIZE
634 size_t allocated_size;
637#if RGENGC_PROFILE > 0
639 size_t remembered_normal_objects;
640 size_t remembered_shady_objects;
648 shape_id_t original_shape_id;
651#define RMOVED(obj) ((struct RMoved *)(obj))
702 uint32_t _ractor_belonging_id;
711# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
713# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
715# define RVALUE_OVERHEAD 0
721typedef uintptr_t bits_t;
723 BITS_SIZE =
sizeof(bits_t),
724 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
726#define popcount_bits rb_popcount_intptr
743#define STACK_CHUNK_SIZE 500
746 VALUE data[STACK_CHUNK_SIZE];
756 size_t unused_cache_size;
759#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
760#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
762typedef int (*gc_compact_compare_func)(
const void *l,
const void *r,
void *d);
766 struct ccan_list_head pages;
769 uintptr_t compact_cursor_index;
778 size_t allocatable_pages;
781 size_t total_allocated_pages;
782 size_t total_freed_pages;
783 size_t force_major_gc_count;
784 size_t force_incremental_marking_finish_count;
785 size_t total_allocated_objects;
786 size_t total_freed_objects;
807#if MALLOC_ALLOCATED_SIZE
808 size_t allocated_size;
815 unsigned int mode : 2;
816 unsigned int immediate_sweep : 1;
817 unsigned int dont_gc : 1;
818 unsigned int dont_incremental : 1;
819 unsigned int during_gc : 1;
820 unsigned int during_compacting : 1;
821 unsigned int during_reference_updating : 1;
822 unsigned int gc_stressful: 1;
823 unsigned int has_newobj_hook: 1;
824 unsigned int during_minor_gc : 1;
825 unsigned int during_incremental_marking : 1;
826 unsigned int measure_gc : 1;
830 VALUE next_object_id;
843 size_t allocated_pages;
844 size_t allocatable_pages;
845 size_t sorted_length;
847 size_t freeable_pages;
851 VALUE deferred_final;
858 unsigned int latest_gc_info;
864#if GC_PROFILE_MORE_DETAIL
869 size_t minor_gc_count;
870 size_t major_gc_count;
871 size_t compact_count;
872 size_t read_barrier_faults;
873#if RGENGC_PROFILE > 0
874 size_t total_generated_normal_object_count;
875 size_t total_generated_shady_object_count;
876 size_t total_shade_operation_count;
877 size_t total_promoted_count;
878 size_t total_remembered_normal_object_count;
879 size_t total_remembered_shady_object_count;
881#if RGENGC_PROFILE >= 2
882 size_t generated_normal_object_count_types[
RUBY_T_MASK];
883 size_t generated_shady_object_count_types[
RUBY_T_MASK];
886 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
887 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
892 double gc_sweep_start_time;
893 size_t total_allocated_objects_at_gc_start;
894 size_t heap_used_at_gc_start;
898 uint64_t marking_time_ns;
900 uint64_t sweeping_time_ns;
901 struct timespec sweeping_start_time;
904 size_t weak_references_count;
905 size_t retained_weak_references_count;
909 VALUE gc_stress_mode;
914 size_t last_major_gc;
915 size_t uncollectible_wb_unprotected_objects;
916 size_t uncollectible_wb_unprotected_objects_limit;
918 size_t old_objects_limit;
920#if RGENGC_ESTIMATE_OLDMALLOC
921 size_t oldmalloc_increase;
922 size_t oldmalloc_increase_limit;
925#if RGENGC_CHECK_MODE >= 2
932 size_t considered_count_table[
T_MASK];
933 size_t moved_count_table[
T_MASK];
934 size_t moved_up_count_table[
T_MASK];
935 size_t moved_down_count_table[
T_MASK];
939 gc_compact_compare_func compare_func;
950#if GC_DEBUG_STRESS_TO_CLASS
951 VALUE stress_to_class;
954 rb_darray(
VALUE *) weak_references;
959#ifndef HEAP_PAGE_ALIGN_LOG
961#define HEAP_PAGE_ALIGN_LOG 16
964#define BASE_SLOT_SIZE sizeof(RVALUE)
966#define CEILDIV(i, mod) roomof(i, mod)
968 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
969 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
970 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
971 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)) / BASE_SLOT_SIZE),
972 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
973 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
975#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
976#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
978#if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
979# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
982#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
988static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
990#elif defined(__wasm__)
994static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
996#elif HAVE_CONST_PAGE_SIZE
998static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
1000#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
1002static const bool HEAP_PAGE_ALLOC_USE_MMAP =
true;
1004#elif defined(PAGE_SIZE)
1006# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
1008#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
1010# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
1014static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
1017#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
1019# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
1021static bool heap_page_alloc_use_mmap;
1024#define RVALUE_AGE_BIT_COUNT 2
1025#define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
1034 unsigned int before_sweep : 1;
1035 unsigned int has_remembered_objects : 1;
1036 unsigned int has_uncollectible_wb_unprotected_objects : 1;
1037 unsigned int in_tomb : 1;
1045 struct ccan_list_node page_node;
1047 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
1049 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
1050 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
1051 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
1053 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
1056 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
1057 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
1064asan_lock_freelist(
struct heap_page *page)
1066 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1073asan_unlock_freelist(
struct heap_page *page)
1075 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1078#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
1079#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
1080#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
1082#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
1083#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
1084#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
1085#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1088#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1089#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1090#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1093#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1094#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1095#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1096#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1097#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1099#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1101#define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
1102#define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
1104#define RVALUE_OLD_AGE 3
1107RVALUE_AGE_GET(
VALUE obj)
1109 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1110 return (
int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
1114RVALUE_AGE_SET(
VALUE obj,
int age)
1117 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1119 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
1121 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
1122 if (age == RVALUE_OLD_AGE) {
1131#define rb_objspace (*rb_objspace_of(GET_VM()))
1132#define rb_objspace_of(vm) ((vm)->objspace)
1133#define unless_objspace(objspace) \
1134 rb_objspace_t *objspace; \
1135 rb_vm_t *unless_objspace_vm = GET_VM(); \
1136 if (unless_objspace_vm) objspace = unless_objspace_vm->objspace; \
1139#define ruby_initial_gc_stress gc_params.gc_stress
1141VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1143#define malloc_limit objspace->malloc_params.limit
1144#define malloc_increase objspace->malloc_params.increase
1145#define malloc_allocated_size objspace->malloc_params.allocated_size
1146#define heap_pages_sorted objspace->heap_pages.sorted
1147#define heap_allocated_pages objspace->heap_pages.allocated_pages
1148#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1149#define heap_pages_lomem objspace->heap_pages.range[0]
1150#define heap_pages_himem objspace->heap_pages.range[1]
1151#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1152#define heap_pages_final_slots objspace->heap_pages.final_slots
1153#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1154#define size_pools objspace->size_pools
1155#define during_gc objspace->flags.during_gc
1156#define finalizing objspace->atomic_flags.finalizing
1157#define finalizer_table objspace->finalizer_table
1158#define global_list objspace->global_list
1159#define ruby_gc_stressful objspace->flags.gc_stressful
1160#define ruby_gc_stress_mode objspace->gc_stress_mode
1161#if GC_DEBUG_STRESS_TO_CLASS
1162#define stress_to_class objspace->stress_to_class
1163#define set_stress_to_class(c) (stress_to_class = (c))
1165#define stress_to_class (objspace, 0)
1166#define set_stress_to_class(c) (objspace, (c))
1170#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1171#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1172#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1173#define dont_gc_val() (objspace->flags.dont_gc)
1175#define dont_gc_on() (objspace->flags.dont_gc = 1)
1176#define dont_gc_off() (objspace->flags.dont_gc = 0)
1177#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1178#define dont_gc_val() (objspace->flags.dont_gc)
1181static inline enum gc_mode
1182gc_mode_verify(
enum gc_mode mode)
1184#if RGENGC_CHECK_MODE > 0
1187 case gc_mode_marking:
1188 case gc_mode_sweeping:
1189 case gc_mode_compacting:
1192 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
1201 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1202 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1213 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1214 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1223 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1224 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1233 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1234 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1243 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1244 count += size_pools[i].allocatable_pages;
1253 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1255 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1256 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1265 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1267 count += size_pool->total_allocated_pages;
1276 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1278 count += size_pool->total_freed_pages;
1287 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1289 count += size_pool->total_allocated_objects;
1298 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1300 count += size_pool->total_freed_objects;
1305#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1306#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1308#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1309#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1310#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1311#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1312#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1313#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1314#define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
1315#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1317#if SIZEOF_LONG == SIZEOF_VOIDP
1318# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1319#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1320# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1321 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1323# error not supported
1326#define RANY(o) ((RVALUE*)(o))
1331 void (*dfree)(
void *);
1335#define RZOMBIE(o) ((struct RZombie *)(o))
1337#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1339#if RUBY_MARK_FREE_DEBUG
1340int ruby_gc_debug_indent = 0;
1343int ruby_disable_gc = 0;
1344int ruby_enable_autocompact = 0;
1345#if RGENGC_CHECK_MODE
1346gc_compact_compare_func ruby_autocompact_compare_func;
1349void rb_iseq_mark_and_move(
rb_iseq_t *iseq,
bool referece_updating);
1350void rb_iseq_free(
const rb_iseq_t *iseq);
1351size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
1352void rb_vm_update_references(
void *ptr);
1354void rb_gcdebug_print_obj_condition(
VALUE obj);
1356NORETURN(
static void *gc_vraise(
void *ptr));
1357NORETURN(
static void gc_raise(
VALUE exc,
const char *fmt, ...));
1358NORETURN(
static void negative_size_allocation_error(
const char *));
1361static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1363static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1366enum gc_enter_event {
1367 gc_enter_event_start,
1368 gc_enter_event_continue,
1369 gc_enter_event_rest,
1370 gc_enter_event_finalizer,
1371 gc_enter_event_rb_memerror,
1374static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1375static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1391static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1392NO_SANITIZE(
"memory",
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr));
1394static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
1395static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1400static double getrusage_time(
void);
1401static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1404static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1406static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1407static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1411#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1412 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1413 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1417#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1419#define gc_prof_record(objspace) (objspace)->profile.current_record
1420#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1422#ifdef HAVE_VA_ARGS_MACRO
1423# define gc_report(level, objspace, ...) \
1424 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1426# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1428PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1429static const char *obj_info(
VALUE obj);
1430static const char *obj_type_name(
VALUE obj);
1432static void gc_finalize_deferred(
void *dmy);
1452#if defined(__GNUC__) && defined(__i386__)
1453typedef unsigned long long tick_t;
1454#define PRItick "llu"
1458 unsigned long long int x;
1459 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1463#elif defined(__GNUC__) && defined(__x86_64__)
1464typedef unsigned long long tick_t;
1465#define PRItick "llu"
1467static __inline__ tick_t
1470 unsigned long hi, lo;
1471 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1472 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1475#elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1476typedef unsigned long long tick_t;
1477#define PRItick "llu"
1479static __inline__ tick_t
1482 unsigned long long val = __builtin_ppc_get_timebase();
1489#elif defined(__POWERPC__) && defined(__APPLE__)
1490typedef unsigned long long tick_t;
1491#define PRItick "llu"
1493static __inline__ tick_t
1496 unsigned long int upper, lower, tmp;
1497 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1498 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1503 }
while (tmp != upper);
1504 return ((tick_t)upper << 32) | lower;
1507#elif defined(__aarch64__) && defined(__GNUC__)
1508typedef unsigned long tick_t;
1511static __inline__ tick_t
1515 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1520#elif defined(_WIN32) && defined(_MSC_VER)
1522typedef unsigned __int64 tick_t;
1523#define PRItick "llu"
1532typedef clock_t tick_t;
1533#define PRItick "llu"
1543typedef double tick_t;
1544#define PRItick "4.9f"
1549 return getrusage_time();
1552#error "choose tick type"
1555#define MEASURE_LINE(expr) do { \
1556 volatile tick_t start_time = tick(); \
1557 volatile tick_t end_time; \
1559 end_time = tick(); \
1560 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1564#define MEASURE_LINE(expr) expr
1568asan_unpoison_object_temporary(
VALUE obj)
1570 void *ptr = asan_poisoned_object_p(obj);
1571 asan_unpoison_object(obj,
false);
1576asan_poison_object_restore(
VALUE obj,
void *ptr)
1579 asan_poison_object(obj);
1584#define asan_unpoisoning_object(obj) \
1585 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1586 *unpoisoning = &poisoned; \
1588 unpoisoning = asan_poison_object_restore(obj, poisoned))
1590#define FL_CHECK2(name, x, pred) \
1591 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1592 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1593#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1594#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1595#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1597#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1598#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1599#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1601#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1602#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1603#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1605#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1606#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1607#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1614check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1619 RB_VM_LOCK_ENTER_NO_BARRIER();
1622 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1625 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1628 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1630 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1631 if (page->start <= (uintptr_t)obj &&
1632 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1633 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1634 (
void *)obj, (
void *)page);
1641 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1647 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1648 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1649 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1650 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1651 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1652 const int age = RVALUE_AGE_GET((
VALUE)obj);
1654 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1655 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1659 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1663 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1667 obj_memsize_of((
VALUE)obj, FALSE);
1673 if (age > 0 && wb_unprotected_bit) {
1674 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1678 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1679 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1683 if (!is_full_marking(objspace)) {
1684 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1685 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1686 obj_info(obj), age);
1689 if (remembered_bit && age != RVALUE_OLD_AGE) {
1690 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1691 obj_info(obj), age);
1703 if (is_incremental_marking(objspace) && marking_bit) {
1704 if (!is_marking(objspace) && !mark_bit) {
1705 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1711 RB_VM_LOCK_LEAVE_NO_BARRIER();
1713 if (err > 0 && terminate) {
1714 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1719#if RGENGC_CHECK_MODE == 0
1721check_rvalue_consistency(
const VALUE obj)
1727check_rvalue_consistency(
const VALUE obj)
1729 check_rvalue_consistency_force(obj, TRUE);
1741 void *poisoned = asan_unpoison_object_temporary(obj);
1747 asan_poison_object(obj);
1754RVALUE_MARKED(
VALUE obj)
1756 check_rvalue_consistency(obj);
1757 return RVALUE_MARK_BITMAP(obj) != 0;
1761RVALUE_PINNED(
VALUE obj)
1763 check_rvalue_consistency(obj);
1764 return RVALUE_PIN_BITMAP(obj) != 0;
1768RVALUE_WB_UNPROTECTED(
VALUE obj)
1770 check_rvalue_consistency(obj);
1771 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1775RVALUE_MARKING(
VALUE obj)
1777 check_rvalue_consistency(obj);
1778 return RVALUE_MARKING_BITMAP(obj) != 0;
1782RVALUE_REMEMBERED(
VALUE obj)
1784 check_rvalue_consistency(obj);
1785 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1789RVALUE_UNCOLLECTIBLE(
VALUE obj)
1791 check_rvalue_consistency(obj);
1792 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1796RVALUE_OLD_P(
VALUE obj)
1799 check_rvalue_consistency(obj);
1808 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1809 objspace->rgengc.old_objects++;
1811#if RGENGC_PROFILE >= 2
1812 objspace->profile.total_promoted_count++;
1820 RB_DEBUG_COUNTER_INC(obj_promote);
1821 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1828 int age = RVALUE_AGE_GET((
VALUE)obj);
1830 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1831 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1835 RVALUE_AGE_SET(obj, age);
1837 if (age == RVALUE_OLD_AGE) {
1838 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1841 check_rvalue_consistency(obj);
1847 check_rvalue_consistency(obj);
1848 GC_ASSERT(!RVALUE_OLD_P(obj));
1849 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1850 check_rvalue_consistency(obj);
1854RVALUE_AGE_RESET(
VALUE obj)
1856 RVALUE_AGE_SET(obj, 0);
1862 check_rvalue_consistency(obj);
1863 GC_ASSERT(RVALUE_OLD_P(obj));
1865 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1866 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1869 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1870 RVALUE_AGE_RESET(obj);
1872 if (RVALUE_MARKED(obj)) {
1873 objspace->rgengc.old_objects--;
1876 check_rvalue_consistency(obj);
1880RVALUE_BLACK_P(
VALUE obj)
1882 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1887RVALUE_GREY_P(
VALUE obj)
1889 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1894RVALUE_WHITE_P(
VALUE obj)
1896 return RVALUE_MARKED(obj) == FALSE;
1906 return calloc(1, n);
1910rb_objspace_alloc(
void)
1913 objspace->flags.measure_gc = 1;
1914 malloc_limit = gc_params.malloc_limit_min;
1916 if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
1917 rb_bug(
"Could not preregister postponed job for GC");
1920 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1923 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1925 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1926 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1929 rb_darray_make_without_gc(&objspace->weak_references, 0);
1943 if (is_lazy_sweeping(objspace))
1944 rb_bug(
"lazy sweeping underway when freeing object space");
1946 free(objspace->profile.records);
1947 objspace->profile.records = NULL;
1951 for (list = global_list; list; list = next) {
1956 if (heap_pages_sorted) {
1958 size_t total_heap_pages = heap_allocated_pages;
1959 for (i = 0; i < total_heap_pages; ++i) {
1960 heap_page_free(objspace, heap_pages_sorted[i]);
1962 free(heap_pages_sorted);
1963 heap_allocated_pages = 0;
1964 heap_pages_sorted_length = 0;
1965 heap_pages_lomem = 0;
1966 heap_pages_himem = 0;
1968 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1970 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1971 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1974 st_free_table(objspace->id_to_obj_tbl);
1975 st_free_table(objspace->obj_to_id_tbl);
1977 free_stack_chunks(&objspace->mark_stack);
1978 mark_stack_free_cache(&objspace->mark_stack);
1980 rb_darray_free_without_gc(objspace->weak_references);
1986heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1991 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1994 if (heap_pages_sorted_length > 0) {
1995 sorted = (
struct heap_page **)realloc(heap_pages_sorted, size);
1996 if (sorted) heap_pages_sorted = sorted;
1999 sorted = heap_pages_sorted = (
struct heap_page **)malloc(size);
2006 heap_pages_sorted_length = next_length;
2017 size_t next_length = heap_allocatable_pages(objspace);
2018 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
2020 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
2021 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
2024 if (next_length > heap_pages_sorted_length) {
2025 heap_pages_expand_sorted_to(objspace, next_length);
2028 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2029 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2035 size_pool->allocatable_pages = s;
2036 heap_pages_expand_sorted(objspace);
2042 ASSERT_vm_locking();
2046 asan_unpoison_object(obj,
false);
2048 asan_unlock_freelist(page);
2050 p->as.free.flags = 0;
2051 p->as.free.next = page->freelist;
2053 asan_lock_freelist(page);
2055 RVALUE_AGE_RESET(obj);
2057 if (RGENGC_CHECK_MODE &&
2059 !(page->start <= (uintptr_t)obj &&
2060 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2061 obj % BASE_SLOT_SIZE == 0)) {
2062 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
2065 asan_poison_object(obj);
2066 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
2072 asan_unlock_freelist(page);
2073 GC_ASSERT(page->free_slots != 0);
2074 GC_ASSERT(page->freelist != NULL);
2076 page->free_next = heap->free_pages;
2077 heap->free_pages = page;
2079 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
2081 asan_lock_freelist(page);
2087 asan_unlock_freelist(page);
2088 GC_ASSERT(page->free_slots != 0);
2089 GC_ASSERT(page->freelist != NULL);
2091 page->free_next = heap->pooled_pages;
2092 heap->pooled_pages = page;
2093 objspace->rincgc.pooled_slots += page->free_slots;
2095 asan_lock_freelist(page);
2101 ccan_list_del(&page->page_node);
2102 heap->total_pages--;
2103 heap->total_slots -= page->total_slots;
2106static void rb_aligned_free(
void *ptr,
size_t size);
2111 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2113 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2115 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2116 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2117 rb_bug(
"heap_page_body_free: munmap failed");
2122 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2129 heap_allocated_pages--;
2130 page->size_pool->total_freed_pages++;
2131 heap_page_body_free(GET_PAGE_BODY(page->start));
2140 bool has_pages_in_tomb_heap = FALSE;
2141 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2142 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2143 has_pages_in_tomb_heap = TRUE;
2148 if (has_pages_in_tomb_heap) {
2149 for (i = j = 0; j < heap_allocated_pages; i++) {
2150 struct heap_page *page = heap_pages_sorted[i];
2152 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2153 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2154 heap_page_free(objspace, page);
2158 heap_pages_sorted[j] = page;
2164 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2165 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2166 GC_ASSERT(himem <= heap_pages_himem);
2167 heap_pages_himem = himem;
2169 struct heap_page *lopage = heap_pages_sorted[0];
2170 uintptr_t lomem = (uintptr_t)lopage->start;
2171 GC_ASSERT(lomem >= heap_pages_lomem);
2172 heap_pages_lomem = lomem;
2174 GC_ASSERT(j == heap_allocated_pages);
2179heap_page_body_allocate(
void)
2183 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2185 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2187 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2188 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2189 if (ptr == MAP_FAILED) {
2193 char *aligned = ptr + HEAP_PAGE_ALIGN;
2194 aligned -= ((
VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2195 GC_ASSERT(aligned > ptr);
2196 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2198 size_t start_out_of_range_size = aligned - ptr;
2199 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2200 if (start_out_of_range_size > 0) {
2201 if (munmap(ptr, start_out_of_range_size)) {
2202 rb_bug(
"heap_page_body_allocate: munmap failed for start");
2206 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2207 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2208 if (end_out_of_range_size > 0) {
2209 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2210 rb_bug(
"heap_page_body_allocate: munmap failed for end");
2218 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2221 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2229 uintptr_t start, end, p;
2231 uintptr_t hi, lo, mid;
2232 size_t stride = size_pool->slot_size;
2233 unsigned int limit = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)))/(int)stride;
2237 if (page_body == 0) {
2242 page = calloc1(
sizeof(
struct heap_page));
2244 heap_page_body_free(page_body);
2251 if (start % BASE_SLOT_SIZE != 0) {
2252 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2253 start = start + delta;
2254 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2260 if (NUM_IN_PAGE(start) == 1) {
2261 start += stride - BASE_SLOT_SIZE;
2264 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2266 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(
int)stride;
2268 end = start + (limit * (int)stride);
2272 hi = (uintptr_t)heap_allocated_pages;
2276 mid = (lo + hi) / 2;
2277 mid_page = heap_pages_sorted[mid];
2278 if ((uintptr_t)mid_page->start < start) {
2281 else if ((uintptr_t)mid_page->start > start) {
2285 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
2289 if (hi < (uintptr_t)heap_allocated_pages) {
2290 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi],
struct heap_page_header*, heap_allocated_pages - hi);
2293 heap_pages_sorted[hi] = page;
2295 heap_allocated_pages++;
2297 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2298 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2299 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2301 size_pool->total_allocated_pages++;
2303 if (heap_allocated_pages > heap_pages_sorted_length) {
2304 rb_bug(
"heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2305 heap_allocated_pages, heap_pages_sorted_length);
2308 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2309 if (heap_pages_himem < end) heap_pages_himem = end;
2311 page->start = start;
2312 page->total_slots = limit;
2313 page->slot_size = size_pool->slot_size;
2314 page->size_pool = size_pool;
2315 page_body->header.page = page;
2317 for (p = start; p != end; p += stride) {
2318 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
2319 heap_page_add_freeobj(objspace, page, (
VALUE)p);
2321 page->free_slots = limit;
2323 asan_lock_freelist(page);
2332 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2333 asan_unlock_freelist(page);
2334 if (page->freelist != NULL) {
2335 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2336 asan_lock_freelist(page);
2348 const char *method =
"recycle";
2350 size_pool->allocatable_pages--;
2352 page = heap_page_resurrect(objspace, size_pool);
2355 page = heap_page_allocate(objspace, size_pool);
2356 method =
"allocate";
2358 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
2359 "heap_allocated_pages: %"PRIdSIZE
", "
2360 "heap_allocated_pages: %"PRIdSIZE
", "
2361 "tomb->total_pages: %"PRIdSIZE
"\n",
2362 method, (
void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2370 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2371 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2372 ccan_list_add_tail(&heap->pages, &page->page_node);
2373 heap->total_pages++;
2374 heap->total_slots += page->total_slots;
2380 struct heap_page *page = heap_page_create(objspace, size_pool);
2381 heap_add_page(objspace, size_pool, heap, page);
2382 heap_add_freepage(heap, page);
2385#if GC_CAN_COMPILE_COMPACTION
2391 size_pool_allocatable_pages_set(objspace, size_pool, add);
2393 for (i = 0; i < add; i++) {
2394 heap_assign_page(objspace, size_pool, heap);
2397 GC_ASSERT(size_pool->allocatable_pages == 0);
2404 size_t multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2408 size_t slots_per_page = (HEAP_PAGE_OBJ_LIMIT / multiple) - 1;
2409 return CEILDIV(slots, slots_per_page);
2415 size_t size_pool_idx = size_pool - size_pools;
2416 size_t init_slots = gc_params.size_pool_init_slots[size_pool_idx];
2417 return slots_to_pages_for_size_pool(objspace, size_pool, init_slots);
2423 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2426 if (goal_ratio == 0.0) {
2427 next_used = (size_t)(used * gc_params.growth_factor);
2429 else if (total_slots == 0) {
2430 next_used = minimum_pages_for_size_pool(objspace, size_pool);
2436 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2438 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2439 if (f < 1.0) f = 1.1;
2441 next_used = (size_t)(f * used);
2445 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2446 " G(%1.2f), f(%1.2f),"
2447 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2448 free_slots, total_slots, free_slots/(
double)total_slots,
2449 goal_ratio, f, used, next_used);
2453 if (gc_params.growth_max_slots > 0) {
2454 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2455 if (next_used > max_used) next_used = max_used;
2458 size_t extend_page_count = next_used - used;
2460 if (extend_page_count == 0) extend_page_count = 1;
2462 return extend_page_count;
2468 if (size_pool->allocatable_pages > 0) {
2469 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2470 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2471 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2473 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2474 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2476 heap_assign_page(objspace, size_pool, heap);
2485 unsigned int lock_lev;
2486 gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2489 if (is_incremental_marking(objspace)) {
2490 if (gc_marks_continue(objspace, size_pool, heap)) {
2497 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2498 gc_sweep_continue(objspace, size_pool, heap);
2501 gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2507 GC_ASSERT(heap->free_pages == NULL);
2510 gc_continue(objspace, size_pool, heap);
2514 if (heap->free_pages == NULL &&
2515 (will_be_incremental_marking(objspace) ||
2516 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2517 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2522 gc_continue(objspace, size_pool, heap);
2527 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2528 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2529 rb_bug(
"cannot create a new page after GC");
2532 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2537 gc_continue(objspace, size_pool, heap);
2539 if (heap->free_pages == NULL &&
2540 !heap_increment(objspace, size_pool, heap)) {
2541 rb_bug(
"cannot create a new page after major GC");
2549 GC_ASSERT(heap->free_pages != NULL);
2563 if (UNLIKELY(!ec->cfp))
return;
2564 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2567#define gc_event_newobj_hook_needed_p(objspace) ((objspace)->flags.has_newobj_hook)
2568#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2570#define gc_event_hook_prep(objspace, event, data, prep) do { \
2571 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2573 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2577#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2582#if !__has_feature(memory_sanitizer)
2587 p->as.basic.
flags = flags;
2592 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2595#if RACTOR_CHECK_MODE
2596 rb_ractor_setup_belonging(obj);
2599#if RGENGC_CHECK_MODE
2600 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2602 RB_VM_LOCK_ENTER_NO_BARRIER();
2604 check_rvalue_consistency(obj);
2606 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2607 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2608 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2609 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2611 if (RVALUE_REMEMBERED((
VALUE)obj)) rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2613 RB_VM_LOCK_LEAVE_NO_BARRIER();
2616 if (UNLIKELY(wb_protected == FALSE)) {
2617 ASSERT_vm_locking();
2618 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2623 objspace->profile.total_generated_normal_object_count++;
2624#if RGENGC_PROFILE >= 2
2625 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2629 objspace->profile.total_generated_shady_object_count++;
2630#if RGENGC_PROFILE >= 2
2631 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2637 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2641 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2648rb_gc_obj_slot_size(
VALUE obj)
2650 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2654size_pool_slot_size(
unsigned char pool_id)
2656 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2658 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2660#if RGENGC_CHECK_MODE
2662 GC_ASSERT(size_pools[pool_id].slot_size == (
short)slot_size);
2665 slot_size -= RVALUE_OVERHEAD;
2671rb_size_pool_slot_size(
unsigned char pool_id)
2673 return size_pool_slot_size(pool_id);
2677rb_gc_size_allocatable_p(
size_t size)
2679 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2684 size_t size_pool_idx)
2687 RVALUE *p = size_pool_cache->freelist;
2689 if (is_incremental_marking(objspace)) {
2691 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2696 cache->incremental_mark_step_allocated_slots++;
2702 MAYBE_UNUSED(
const size_t) stride = size_pool_slot_size(size_pool_idx);
2703 size_pool_cache->freelist = p->as.free.next;
2704 asan_unpoison_memory_region(p, stride,
true);
2705#if RGENGC_CHECK_MODE
2706 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2708 MEMZERO((
char *)obj,
char, stride);
2720 ASSERT_vm_locking();
2724 if (heap->free_pages == NULL) {
2725 heap_prepare(objspace, size_pool, heap);
2728 page = heap->free_pages;
2729 heap->free_pages = page->free_next;
2731 GC_ASSERT(page->free_slots != 0);
2732 RUBY_DEBUG_LOG(
"page:%p freelist:%p cnt:%d", (
void *)page, (
void *)page->freelist, page->free_slots);
2734 asan_unlock_freelist(page);
2743 gc_report(3, &
rb_objspace,
"ractor_set_cache: Using page %p\n", (
void *)GET_PAGE_BODY(page->start));
2747 GC_ASSERT(size_pool_cache->freelist == NULL);
2748 GC_ASSERT(page->free_slots != 0);
2749 GC_ASSERT(page->freelist != NULL);
2751 size_pool_cache->using_page = page;
2752 size_pool_cache->freelist = page->freelist;
2753 page->free_slots = 0;
2754 page->freelist = NULL;
2756 asan_unpoison_object((
VALUE)size_pool_cache->freelist,
false);
2757 GC_ASSERT(RB_TYPE_P((
VALUE)size_pool_cache->freelist,
T_NONE));
2758 asan_poison_object((
VALUE)size_pool_cache->freelist);
2765 p->as.values.v1 = v1;
2766 p->as.values.v2 = v2;
2767 p->as.values.v3 = v3;
2772size_pool_idx_for_size(
size_t size)
2774 size += RVALUE_OVERHEAD;
2776 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2779 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2781 if (size_pool_idx >= SIZE_POOL_COUNT) {
2782 rb_bug(
"size_pool_idx_for_size: allocation size too large "
2783 "(size=%"PRIuSIZE
"u, size_pool_idx=%"PRIuSIZE
"u)", size, size_pool_idx);
2786#if RGENGC_CHECK_MODE
2788 GC_ASSERT(size <= (
size_t)size_pools[size_pool_idx].slot_size);
2789 if (size_pool_idx > 0) GC_ASSERT(size > (
size_t)size_pools[size_pool_idx - 1].slot_size);
2792 return size_pool_idx;
2799 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2802 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2804 if (UNLIKELY(obj ==
Qfalse)) {
2806 bool unlock_vm =
false;
2809 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2815 ASSERT_vm_locking();
2817 if (is_incremental_marking(objspace)) {
2818 gc_continue(objspace, size_pool, heap);
2819 cache->incremental_mark_step_allocated_slots = 0;
2822 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2827 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2828 ractor_cache_set_page(cache, size_pool_idx, page);
2831 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2833 GC_ASSERT(obj !=
Qfalse);
2838 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2842 size_pool->total_allocated_objects++;
2848newobj_zero_slot(
VALUE obj)
2850 memset((
char *)obj +
sizeof(
struct RBasic), 0, rb_gc_obj_slot_size(obj) -
sizeof(
struct RBasic));
2861 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2863 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2867 rb_bug(
"object allocation during garbage collection phase");
2870 if (ruby_gc_stressful) {
2871 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2877 obj = newobj_alloc(objspace, cr, size_pool_idx,
true);
2878 newobj_init(klass, flags, wb_protected, objspace, obj);
2882 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2887NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2889NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2895 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2901 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2910 RB_DEBUG_COUNTER_INC(obj_newobj);
2911 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2913 if (UNLIKELY(stress_to_class)) {
2915 for (i = 0; i < cnt; ++i) {
2916 if (klass ==
RARRAY_AREF(stress_to_class, i)) rb_memerror();
2920 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2923 flags |= (
VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2926 if (!UNLIKELY(during_gc ||
2927 ruby_gc_stressful ||
2928 gc_event_newobj_hook_needed_p(objspace)) &&
2930 obj = newobj_alloc(objspace, cr, size_pool_idx,
false);
2931 newobj_init(klass, flags, wb_protected, objspace, obj);
2934 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2936 obj = wb_protected ?
2937 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2938 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2947 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2948 return newobj_fill(obj, v1, v2, v3);
2952rb_wb_unprotected_newobj_of(
VALUE klass,
VALUE flags,
size_t size)
2955 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
2962 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2970 return newobj_of(GET_RACTOR(), 0,
T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2974rb_class_instance_allocate_internal(
VALUE klass,
VALUE flags,
bool wb_protected)
2977 GC_ASSERT(flags & ROBJECT_EMBED);
2980 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2983 if (!rb_gc_size_allocatable_p(size)) {
2984 size =
sizeof(
struct RObject);
2987 VALUE obj = newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, wb_protected, size);
2992 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2997 for (
size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
3016#define UNEXPECTED_NODE(func) \
3017 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
3018 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
3021rb_imemo_name(
enum imemo_type
type)
3025#define IMEMO_NAME(x) case imemo_##x: return #x;
3029 IMEMO_NAME(throw_data);
3036 IMEMO_NAME(parser_strterm);
3037 IMEMO_NAME(callinfo);
3038 IMEMO_NAME(callcache);
3039 IMEMO_NAME(constcache);
3050 size_t size = RVALUE_SIZE;
3052 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, TRUE, size);
3060 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, FALSE, size);
3064rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *buf,
size_t cnt)
3066 return rb_imemo_tmpbuf_new((
VALUE)buf, 0, (
VALUE)cnt, 0);
3076imemo_memsize(
VALUE obj)
3079 switch (imemo_type(obj)) {
3081 size +=
sizeof(RANY(obj)->as.imemo.ment.def);
3084 size += rb_iseq_memsize((
rb_iseq_t *)obj);
3087 size += RANY(obj)->as.imemo.env.env_size *
sizeof(
VALUE);
3090 size += RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
3093 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3097 case imemo_throw_data:
3100 case imemo_parser_strterm:
3113 VALUE memo = rb_imemo_new(
type, v1, v2, v3, v0);
3114 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo, imemo_type(memo), file, line);
3120rb_class_allocate_instance(
VALUE klass)
3126rb_data_object_check(
VALUE klass)
3128 if (klass != rb_cObject && (
rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3130 rb_warn(
"undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3138 if (klass) rb_data_object_check(klass);
3145 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3153 RBIMPL_NONNULL_ARG(
type);
3154 if (klass) rb_data_object_check(klass);
3156 return newobj_of(GET_RACTOR(), klass,
T_DATA, (
VALUE)
type, 1 | typed_flag, (
VALUE)datap, wb_protected, size);
3162 if (UNLIKELY(
type->flags & RUBY_TYPED_EMBEDDABLE)) {
3163 rb_raise(
rb_eTypeError,
"Cannot wrap an embeddable TypedData");
3166 return typed_data_alloc(klass, 0, datap,
type,
sizeof(
struct RTypedData));
3172 if (
type->flags & RUBY_TYPED_EMBEDDABLE) {
3173 if (!(
type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
3174 rb_raise(
rb_eTypeError,
"Embeddable TypedData must be freed immediately");
3177 size_t embed_size = offsetof(
struct RTypedData, data) + size;
3178 if (rb_gc_size_allocatable_p(embed_size)) {
3179 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0,
type, embed_size);
3180 memset((
char *)obj + offsetof(
struct RTypedData, data), 0, size);
3191rb_objspace_data_type_memsize(
VALUE obj)
3196 const void *ptr = RTYPEDDATA_GET_DATA(obj);
3198 if (
RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3199#ifdef HAVE_MALLOC_USABLE_SIZE
3200 size += malloc_usable_size((
void *)ptr);
3204 if (ptr &&
type->function.dsize) {
3205 size +=
type->function.dsize(ptr);
3213rb_objspace_data_type_name(
VALUE obj)
3224ptr_in_page_body_p(
const void *ptr,
const void *memb)
3227 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3229 if ((uintptr_t)ptr >= p_body) {
3230 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3243 if (ptr < (uintptr_t)heap_pages_lomem ||
3244 ptr > (uintptr_t)heap_pages_himem) {
3248 res = bsearch((
void *)ptr, heap_pages_sorted,
3249 (
size_t)heap_allocated_pages,
sizeof(
struct heap_page *),
3250 ptr_in_page_body_p);
3260PUREFUNC(
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);)
3264 register uintptr_t p = (uintptr_t)ptr;
3267 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3269 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
3270 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3272 if (p % BASE_SLOT_SIZE != 0)
return FALSE;
3273 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3275 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3277 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3278 if (page->flags.in_tomb) {
3282 if (p < page->start)
return FALSE;
3283 if (p >= page->start + (page->total_slots * page->slot_size))
return FALSE;
3284 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0)
return FALSE;
3292static enum rb_id_table_iterator_result
3293free_const_entry_i(
VALUE value,
void *data)
3297 return ID_TABLE_CONTINUE;
3303 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3304 rb_id_table_free(tbl);
3313 for (
int i=0; i<ccs->len; i++) {
3316 void *ptr = asan_unpoison_object_temporary((
VALUE)cc);
3318 if (is_pointer_to_heap(objspace, (
void *)cc) &&
3319 IMEMO_TYPE_P(cc, imemo_callcache) &&
3320 cc->klass == klass) {
3325 asan_poison_object((
VALUE)cc);
3330 asan_poison_object((
VALUE)cc);
3334 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
3335 vm_cc_invalidate(cc);
3337 ruby_xfree(ccs->entries);
3345 RB_DEBUG_COUNTER_INC(ccs_free);
3346 vm_ccs_free(ccs, TRUE, NULL,
Qundef);
3355static enum rb_id_table_iterator_result
3356cc_table_mark_i(
ID id,
VALUE ccs_ptr,
void *data_ptr)
3360 VM_ASSERT(vm_ccs_p(ccs));
3361 VM_ASSERT(
id == ccs->cme->called_id);
3363 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3364 rb_vm_ccs_free(ccs);
3365 return ID_TABLE_DELETE;
3368 gc_mark(data->objspace, (
VALUE)ccs->cme);
3370 for (
int i=0; i<ccs->len; i++) {
3371 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3372 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3374 gc_mark(data->objspace, (
VALUE)ccs->entries[i].ci);
3375 gc_mark(data->objspace, (
VALUE)ccs->entries[i].cc);
3377 return ID_TABLE_CONTINUE;
3384 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3387 .objspace = objspace,
3390 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3394static enum rb_id_table_iterator_result
3395cc_table_free_i(
VALUE ccs_ptr,
void *data_ptr)
3399 VM_ASSERT(vm_ccs_p(ccs));
3400 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3401 return ID_TABLE_CONTINUE;
3407 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3411 .objspace = objspace,
3415 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3416 rb_id_table_free(cc_tbl);
3420static enum rb_id_table_iterator_result
3421cvar_table_free_i(
VALUE value,
void * ctx)
3423 xfree((
void *) value);
3424 return ID_TABLE_CONTINUE;
3428rb_cc_table_free(
VALUE klass)
3436 struct RZombie *zombie = RZOMBIE(obj);
3438 zombie->dfree = dfree;
3439 zombie->data = data;
3440 VALUE prev, next = heap_pages_deferred_final;
3442 zombie->next = prev = next;
3444 }
while (next != prev);
3446 struct heap_page *page = GET_HEAP_PAGE(obj);
3447 page->final_slots++;
3448 heap_pages_final_slots++;
3454 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3455 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3461 ASSERT_vm_locking();
3462 st_data_t o = (st_data_t)obj,
id;
3467 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
3469 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
3472 rb_bug(
"Object ID seen, but not in mapping table: %s", obj_info(obj));
3481 int free_immediately =
false;
3482 void (*dfree)(
void *);
3485 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3486 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3489 dfree = RANY(obj)->as.data.dfree;
3494 if (!
RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
3496 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3499 else if (free_immediately) {
3501 if (
RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3505 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3508 make_zombie(objspace, obj, dfree, data);
3509 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3514 RB_DEBUG_COUNTER_INC(obj_data_empty);
3524 RB_DEBUG_COUNTER_INC(obj_free);
3534 rb_bug(
"obj_free() called for broken object");
3546 obj_free_object_id(objspace, obj);
3549 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3551#if RGENGC_CHECK_MODE
3552#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3553 CHECK(RVALUE_WB_UNPROTECTED);
3554 CHECK(RVALUE_MARKED);
3555 CHECK(RVALUE_MARKING);
3556 CHECK(RVALUE_UNCOLLECTIBLE);
3562 if (rb_shape_obj_too_complex(obj)) {
3563 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3564 st_free_table(ROBJECT_IV_HASH(obj));
3566 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3567 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3570 xfree(RANY(obj)->as.object.as.heap.ivptr);
3571 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3576 rb_id_table_free(RCLASS_M_TBL(obj));
3577 cc_table_free(objspace, obj, FALSE);
3578 if (rb_shape_obj_too_complex(obj)) {
3579 st_free_table((
st_table *)RCLASS_IVPTR(obj));
3581 else if (RCLASS_IVPTR(obj)) {
3582 xfree(RCLASS_IVPTR(obj));
3585 if (RCLASS_CONST_TBL(obj)) {
3586 rb_free_const_table(RCLASS_CONST_TBL(obj));
3588 if (RCLASS_CVC_TBL(obj)) {
3589 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3590 rb_id_table_free(RCLASS_CVC_TBL(obj));
3592 rb_class_remove_subclass_head(obj);
3593 rb_class_remove_from_module_subclasses(obj);
3594 rb_class_remove_from_super_subclasses(obj);
3595 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3596 xfree(RCLASS_SUPERCLASSES(obj));
3609#if USE_DEBUG_COUNTER
3612 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3615 RB_DEBUG_COUNTER_INC(obj_hash_1);
3618 RB_DEBUG_COUNTER_INC(obj_hash_2);
3621 RB_DEBUG_COUNTER_INC(obj_hash_3);
3624 RB_DEBUG_COUNTER_INC(obj_hash_4);
3630 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3634 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3637 if (RHASH_AR_TABLE_P(obj)) {
3638 if (RHASH_AR_TABLE(obj) == NULL) {
3639 RB_DEBUG_COUNTER_INC(obj_hash_null);
3642 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3646 RB_DEBUG_COUNTER_INC(obj_hash_st);
3653 if (RANY(obj)->as.regexp.ptr) {
3654 onig_free(RANY(obj)->as.regexp.ptr);
3655 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3659 if (!rb_data_free(objspace, obj))
return false;
3664#if USE_DEBUG_COUNTER
3665 if (rm->
regs.num_regs >= 8) {
3666 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3668 else if (rm->
regs.num_regs >= 4) {
3669 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3671 else if (rm->
regs.num_regs >= 1) {
3672 RB_DEBUG_COUNTER_INC(obj_match_under4);
3675 onig_region_free(&rm->
regs, 0);
3679 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3683 if (RANY(obj)->as.file.fptr) {
3684 make_io_zombie(objspace, obj);
3685 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3690 RB_DEBUG_COUNTER_INC(obj_rational);
3693 RB_DEBUG_COUNTER_INC(obj_complex);
3699 if (RICLASS_OWNS_M_TBL_P(obj)) {
3701 rb_id_table_free(RCLASS_M_TBL(obj));
3703 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3704 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3706 rb_class_remove_subclass_head(obj);
3707 cc_table_free(objspace, obj, FALSE);
3708 rb_class_remove_from_module_subclasses(obj);
3709 rb_class_remove_from_super_subclasses(obj);
3711 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3715 RB_DEBUG_COUNTER_INC(obj_float);
3719 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3720 xfree(BIGNUM_DIGITS(obj));
3721 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3724 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3729 UNEXPECTED_NODE(obj_free);
3733 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3734 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3735 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3738 xfree((
void *)RANY(obj)->as.rstruct.as.heap.ptr);
3739 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3745 rb_gc_free_dsymbol(obj);
3746 RB_DEBUG_COUNTER_INC(obj_symbol);
3751 switch (imemo_type(obj)) {
3753 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3754 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3757 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3758 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3761 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3763 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3766 xfree(RANY(obj)->as.imemo.alloc.ptr);
3767 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3770 rb_ast_free(&RANY(obj)->as.imemo.ast);
3771 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3774 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3777 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3779 case imemo_throw_data:
3780 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3783 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3786 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3788 case imemo_parser_strterm:
3789 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3791 case imemo_callinfo:
3797 if (ci->kwarg->references == 0)
xfree((
void *)ci->kwarg);
3799 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3802 case imemo_callcache:
3803 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3805 case imemo_constcache:
3806 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3812 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3817 make_zombie(objspace, obj, 0, 0);
3826#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3827#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3830object_id_cmp(st_data_t x, st_data_t y)
3832 if (RB_BIGNUM_TYPE_P(x)) {
3833 return !rb_big_eql(x, y);
3841object_id_hash(st_data_t n)
3843 if (RB_BIGNUM_TYPE_P(n)) {
3847 return st_numhash(n);
3850static const struct st_hash_type object_id_hash_type = {
3860#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3862 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3865 objspace->next_object_id =
INT2FIX(OBJ_ID_INITIAL);
3866 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3867 objspace->obj_to_id_tbl = st_init_numtable();
3869#if RGENGC_ESTIMATE_OLDMALLOC
3870 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3874 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3878 gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
3880 size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
3882 heap_pages_expand_sorted(objspace);
3884 init_mark_stack(&objspace->mark_stack);
3886 objspace->profile.invoke_time = getrusage_time();
3887 finalizer_table = st_init_numtable();
3895 gc_stress_set(objspace, ruby_initial_gc_stress);
3898typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
3899typedef int each_page_callback(
struct heap_page *,
void *);
3901static void objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected);
3902static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *,
VALUE,
void *),
void *);
3906 bool reenable_incremental;
3908 each_obj_callback *each_obj_callback;
3909 each_page_callback *each_page_callback;
3912 struct heap_page **pages[SIZE_POOL_COUNT];
3913 size_t pages_counts[SIZE_POOL_COUNT];
3917objspace_each_objects_ensure(
VALUE arg)
3923 if (data->reenable_incremental) {
3924 objspace->flags.dont_incremental = FALSE;
3927 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3928 struct heap_page **pages = data->pages[i];
3936objspace_each_objects_try(
VALUE arg)
3942 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3944 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages,
sizeof(
struct heap_page *),
rb_eRuntimeError);
3946 struct heap_page **pages = malloc(size);
3947 if (!pages) rb_memerror();
3955 size_t pages_count = 0;
3956 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3957 pages[pages_count] = page;
3960 data->pages[i] = pages;
3961 data->pages_counts[i] = pages_count;
3962 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3965 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3967 size_t pages_count = data->pages_counts[i];
3968 struct heap_page **pages = data->pages[i];
3970 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages,
struct heap_page, page_node);
3971 for (
size_t i = 0; i < pages_count; i++) {
3974 if (page == NULL)
break;
3978 if (pages[i] != page)
continue;
3980 uintptr_t pstart = (uintptr_t)page->start;
3981 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3983 if (!__asan_region_is_poisoned((
void *)pstart, pend - pstart)) {
3984 if (data->each_obj_callback &&
3985 (*data->each_obj_callback)((
void *)pstart, (
void *)pend, size_pool->slot_size, data->data)) {
3988 if (data->each_page_callback &&
3989 (*data->each_page_callback)(page, data->data)) {
3994 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
4040rb_objspace_each_objects(each_obj_callback *callback,
void *data)
4042 objspace_each_objects(&
rb_objspace, callback, data, TRUE);
4050 bool reenable_incremental = FALSE;
4052 reenable_incremental = !objspace->flags.dont_incremental;
4055 objspace->flags.dont_incremental = TRUE;
4066objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
4069 .objspace = objspace,
4070 .each_obj_callback = callback,
4071 .each_page_callback = NULL,
4078objspace_each_pages(
rb_objspace_t *objspace, each_page_callback *callback,
void *data,
bool protected)
4081 .objspace = objspace,
4082 .each_obj_callback = NULL,
4083 .each_page_callback = callback,
4090rb_objspace_each_objects_without_setup(each_obj_callback *callback,
void *data)
4092 objspace_each_objects(&
rb_objspace, callback, data, FALSE);
4101internal_object_p(
VALUE obj)
4104 void *ptr = asan_unpoison_object_temporary(obj);
4105 bool used_p = p->as.basic.
flags;
4110 UNEXPECTED_NODE(internal_object_p);
4119 if (!p->as.basic.
klass)
break;
4121 return rb_singleton_class_internal_p(obj);
4125 if (!p->as.basic.
klass)
break;
4129 if (ptr || ! used_p) {
4130 asan_poison_object(obj);
4136rb_objspace_internal_object_p(
VALUE obj)
4138 return internal_object_p(obj);
4142os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
4147 for (; v != (
VALUE)vend; v += stride) {
4148 if (!internal_object_p(v)) {
4168 rb_objspace_each_objects(os_obj_of_i, &oes);
4215 return os_obj_of(of);
4229 return rb_undefine_finalizer(obj);
4236 st_data_t data = obj;
4238 st_delete(finalizer_table, &data, 0);
4244should_be_callable(
VALUE block)
4247 rb_raise(rb_eArgError,
"wrong type argument %"PRIsVALUE
" (should be callable)",
4253should_be_finalizable(
VALUE obj)
4256 rb_raise(rb_eArgError,
"cannot define finalizer for %s",
4263rb_define_finalizer_no_check(
VALUE obj,
VALUE block)
4271 if (st_lookup(finalizer_table, obj, &data)) {
4272 table = (
VALUE)data;
4279 for (i = 0; i <
len; i++) {
4288 rb_ary_push(table, block);
4292 RBASIC_CLEAR_CLASS(table);
4293 st_add_direct(finalizer_table, obj, table);
4369 should_be_finalizable(obj);
4374 should_be_callable(block);
4377 if (rb_callable_receiver(block) == obj) {
4378 rb_warn(
"finalizer references object to be finalized");
4381 return rb_define_finalizer_no_check(obj, block);
4387 should_be_finalizable(obj);
4388 should_be_callable(block);
4389 return rb_define_finalizer_no_check(obj, block);
4400 if (st_lookup(finalizer_table, obj, &data)) {
4401 table = (
VALUE)data;
4402 st_insert(finalizer_table, dest, table);
4417 VALUE errinfo = ec->errinfo;
4418 rb_warn(
"Exception in finalizer %+"PRIsVALUE,
final);
4419 rb_ec_error_print(ec, errinfo);
4427 enum ruby_tag_type state;
4438#define RESTORE_FINALIZER() (\
4439 ec->cfp = saved.cfp, \
4440 ec->cfp->sp = saved.sp, \
4441 ec->errinfo = saved.errinfo)
4443 saved.errinfo = ec->errinfo;
4444 saved.objid = rb_obj_id(obj);
4445 saved.cfp = ec->cfp;
4446 saved.sp = ec->cfp->sp;
4451 state = EC_EXEC_TAG();
4452 if (state != TAG_NONE) {
4454 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final,
Qundef));
4456 for (i = saved.finished;
4458 saved.finished = ++i) {
4459 run_single_final(saved.final =
RARRAY_AREF(table, i), saved.objid);
4462#undef RESTORE_FINALIZER
4468 st_data_t key, table;
4470 if (RZOMBIE(zombie)->dfree) {
4471 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4474 key = (st_data_t)zombie;
4475 if (st_delete(finalizer_table, &key, &table)) {
4476 run_finalizer(objspace, zombie, (
VALUE)table);
4486 asan_unpoison_object(zombie,
false);
4487 next_zombie = RZOMBIE(zombie)->next;
4488 page = GET_HEAP_PAGE(zombie);
4490 run_final(objspace, zombie);
4496 obj_free_object_id(objspace, zombie);
4499 GC_ASSERT(heap_pages_final_slots > 0);
4500 GC_ASSERT(page->final_slots > 0);
4502 heap_pages_final_slots--;
4503 page->final_slots--;
4505 heap_page_add_freeobj(objspace, page, zombie);
4506 page->size_pool->total_freed_objects++;
4510 zombie = next_zombie;
4518 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4519 finalize_list(objspace, zombie);
4527 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4528 finalize_deferred_heap_pages(objspace);
4529 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4533gc_finalize_deferred(
void *dmy)
4536 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4538 finalize_deferred(objspace);
4539 ATOMIC_SET(finalizing, 0);
4554 if (is_incremental_marking(objspace)) {
4557 while (pop_mark_stack(&objspace->mark_stack, &obj));
4559 objspace->flags.during_incremental_marking = FALSE;
4562 if (is_lazy_sweeping(objspace)) {
4563 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
4565 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4567 heap->sweeping_page = NULL;
4570 ccan_list_for_each(&heap->pages, page, page_node) {
4571 page->flags.before_sweep =
false;
4576 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
4578 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4579 rgengc_mark_and_rememberset_clear(objspace, heap);
4582 gc_mode_set(objspace, gc_mode_none);
4592force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4603bool rb_obj_is_main_ractor(
VALUE gv);
4608 for (
size_t i = 0; i < heap_allocated_pages; i++) {
4609 struct heap_page *page = heap_pages_sorted[i];
4610 short stride = page->slot_size;
4612 uintptr_t p = (uintptr_t)page->start;
4613 uintptr_t pend = p + page->total_slots * stride;
4614 for (; p < pend; p += stride) {
4618 if (rb_obj_is_mutex(vp) || rb_obj_is_thread(vp) || rb_obj_is_main_ractor(vp)) {
4619 obj_free(objspace, vp);
4624 obj_free(objspace, vp);
4639#if RGENGC_CHECK_MODE >= 2
4640 gc_verify_internal_consistency(objspace);
4642 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4645 finalize_deferred(objspace);
4646 GC_ASSERT(heap_pages_deferred_final == 0);
4649 objspace->flags.dont_incremental = 1;
4652 while (finalizer_table->num_entries) {
4654 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4657 st_data_t obj = (st_data_t)curr->obj;
4658 run_finalizer(objspace, curr->obj, curr->table);
4659 st_delete(finalizer_table, &obj, 0);
4672 unsigned int lock_lev;
4673 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4676 for (i = 0; i < heap_allocated_pages; i++) {
4677 struct heap_page *page = heap_pages_sorted[i];
4678 short stride = page->slot_size;
4680 uintptr_t p = (uintptr_t)page->start;
4681 uintptr_t pend = p + page->total_slots * stride;
4682 for (; p < pend; p += stride) {
4684 void *poisoned = asan_unpoison_object_temporary(vp);
4687 if (!
DATA_PTR(p) || !RANY(p)->as.data.dfree)
break;
4688 if (rb_obj_is_thread(vp))
break;
4689 if (rb_obj_is_mutex(vp))
break;
4690 if (rb_obj_is_fiber(vp))
break;
4691 if (rb_obj_is_main_ractor(vp))
break;
4693 obj_free(objspace, vp);
4696 obj_free(objspace, vp);
4703 if (rb_free_at_exit) {
4704 obj_free(objspace, vp);
4710 asan_poison_object(vp);
4715 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4717 finalize_deferred_heap_pages(objspace);
4719 st_free_table(finalizer_table);
4720 finalizer_table = 0;
4721 ATOMIC_SET(finalizing, 0);
4725is_swept_object(
VALUE ptr)
4727 struct heap_page *page = GET_HEAP_PAGE(ptr);
4728 return page->flags.before_sweep ? FALSE : TRUE;
4735 if (!is_lazy_sweeping(objspace) ||
4736 is_swept_object(ptr) ||
4737 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4758 if (!is_garbage_object(objspace, ptr)) {
4767is_markable_object(
VALUE obj)
4770 check_rvalue_consistency(obj);
4775rb_objspace_markable_object_p(
VALUE obj)
4778 return is_markable_object(obj) && is_live_object(objspace, obj);
4782rb_objspace_garbage_object_p(
VALUE obj)
4785 return is_garbage_object(objspace, obj);
4789rb_gc_is_ptr_to_obj(
void *ptr)
4792 return is_pointer_to_heap(objspace, ptr);
4796rb_gc_id2ref_obj_tbl(
VALUE objid)
4801 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4827#if SIZEOF_LONG == SIZEOF_VOIDP
4828#define NUM2PTR(x) NUM2ULONG(x)
4829#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4830#define NUM2PTR(x) NUM2ULL(x)
4838 if (
FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4839 ptr = NUM2PTR(objid);
4846 ptr = obj_id_to_ref(objid);
4847 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
4850 if (!rb_static_id_valid_p(symid))
4856 if (!UNDEF_P(orig = rb_gc_id2ref_obj_tbl(objid)) &&
4857 is_live_object(objspace, orig)) {
4863 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4867 if (rb_int_ge(objid, objspace->next_object_id)) {
4868 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is not id value", rb_int2str(objid, 10));
4871 rb_raise(
rb_eRangeError,
"%+"PRIsVALUE
" is recycled object", rb_int2str(objid, 10));
4879 return id2ref(objid);
4889#if SIZEOF_LONG == SIZEOF_VOIDP
4899 return get_heap_object_id(obj);
4903cached_object_id(
VALUE obj)
4909 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &
id)) {
4915 id = objspace->next_object_id;
4916 objspace->next_object_id = rb_int_plus(
id,
INT2FIX(OBJ_ID_INCREMENT));
4918 VALUE already_disabled = rb_gc_disable_no_rest();
4919 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
4920 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
4921 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4930nonspecial_obj_id(
VALUE obj)
4932#if SIZEOF_LONG == SIZEOF_VOIDP
4934#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4937# error not supported
4944 return rb_find_object_id(obj, nonspecial_obj_id);
5006 return rb_find_object_id(obj, cached_object_id);
5009static enum rb_id_table_iterator_result
5010cc_table_memsize_i(
VALUE ccs_ptr,
void *data_ptr)
5012 size_t *total_size = data_ptr;
5014 *total_size +=
sizeof(*ccs);
5015 *total_size +=
sizeof(ccs->entries[0]) * ccs->capa;
5016 return ID_TABLE_CONTINUE;
5022 size_t total = rb_id_table_memsize(cc_table);
5023 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
5028obj_memsize_of(
VALUE obj,
int use_all_types)
5037 size += rb_generic_ivar_memsize(obj);
5042 if (rb_shape_obj_too_complex(obj)) {
5043 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
5045 else if (!(
RBASIC(obj)->flags & ROBJECT_EMBED)) {
5046 size += ROBJECT_IV_CAPACITY(obj) *
sizeof(
VALUE);
5051 if (RCLASS_M_TBL(obj)) {
5052 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5055 size +=
SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
5056 if (RCLASS_CVC_TBL(obj)) {
5057 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
5059 if (RCLASS_EXT(obj)->const_tbl) {
5060 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
5062 if (RCLASS_CC_TBL(obj)) {
5063 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5065 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
5066 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) *
sizeof(
VALUE);
5070 if (RICLASS_OWNS_M_TBL_P(obj)) {
5071 if (RCLASS_M_TBL(obj)) {
5072 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5075 if (RCLASS_CC_TBL(obj)) {
5076 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5080 size += rb_str_memsize(obj);
5083 size += rb_ary_memsize(obj);
5086 if (RHASH_ST_TABLE_P(obj)) {
5087 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
5089 size += st_memsize(RHASH_ST_TABLE(obj)) -
sizeof(
st_table);
5098 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
5103 size += onig_region_memsize(&rm->
regs);
5108 if (
RFILE(obj)->fptr) {
5109 size += rb_io_memsize(
RFILE(obj)->fptr);
5116 size += imemo_memsize(obj);
5124 if (!(
RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
5125 size += BIGNUM_LEN(obj) *
sizeof(BDIGIT);
5130 UNEXPECTED_NODE(obj_memsize_of);
5134 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
5135 RSTRUCT(obj)->as.heap.ptr) {
5136 size +=
sizeof(
VALUE) * RSTRUCT_LEN(obj);
5145 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
5149 return size + rb_gc_obj_slot_size(obj);
5153rb_obj_memsize_of(
VALUE obj)
5155 return obj_memsize_of(obj, TRUE);
5159set_zero(st_data_t key, st_data_t val, st_data_t arg)
5163 rb_hash_aset(hash, k,
INT2FIX(0));
5168type_sym(
size_t type)
5171#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5240count_objects(
int argc,
VALUE *argv,
VALUE os)
5251 if (!RB_TYPE_P(hash,
T_HASH))
5255 for (i = 0; i <=
T_MASK; i++) {
5259 for (i = 0; i < heap_allocated_pages; i++) {
5260 struct heap_page *page = heap_pages_sorted[i];
5261 short stride = page->slot_size;
5263 uintptr_t p = (uintptr_t)page->start;
5264 uintptr_t pend = p + page->total_slots * stride;
5265 for (;p < pend; p += stride) {
5267 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5269 void *poisoned = asan_unpoison_object_temporary(vp);
5270 if (RANY(p)->as.basic.flags) {
5278 asan_poison_object(vp);
5281 total += page->total_slots;
5285 hash = rb_hash_new();
5288 rb_hash_stlike_foreach(hash, set_zero, hash);
5293 for (i = 0; i <=
T_MASK; i++) {
5311 size_t total_slots = 0;
5312 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5314 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5315 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5323 return total_allocated_objects(objspace) - total_freed_objects(objspace) - heap_pages_final_slots;
5329 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5333gc_setup_mark_bits(
struct heap_page *page)
5336 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5343enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5349 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5352enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5353#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5359 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5360 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(
errno));
5363 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
5370 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5371 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(
errno));
5374 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
5381 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5383 struct heap_page *src_page = GET_HEAP_PAGE(src);
5391 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5393 asan_unlock_freelist(free_page);
5395 asan_lock_freelist(free_page);
5396 asan_unpoison_object(dest,
false);
5402 asan_unlock_freelist(free_page);
5403 free_page->freelist = RANY(dest)->as.free.next;
5404 asan_lock_freelist(free_page);
5408 if (src_page->slot_size > free_page->slot_size) {
5409 objspace->rcompactor.moved_down_count_table[
BUILTIN_TYPE(src)]++;
5411 else if (free_page->slot_size > src_page->slot_size) {
5412 objspace->rcompactor.moved_up_count_table[
BUILTIN_TYPE(src)]++;
5414 objspace->rcompactor.moved_count_table[
BUILTIN_TYPE(src)]++;
5415 objspace->rcompactor.total_moved++;
5417 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5418 gc_pin(objspace, src);
5419 free_page->free_slots--;
5427 struct heap_page *cursor = heap->compact_cursor;
5430 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5431 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5436#if GC_CAN_COMPILE_COMPACTION
5440#if defined(__MINGW32__) || defined(_WIN32)
5441# define GC_COMPACTION_SUPPORTED 1
5445# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5448#if GC_CAN_COMPILE_COMPACTION
5450read_barrier_handler(uintptr_t original_address)
5456 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5458 obj = (
VALUE)address;
5464 if (page_body == NULL) {
5465 rb_bug(
"read_barrier_handler: segmentation fault at %p", (
void *)original_address);
5470 unlock_page_body(objspace, page_body);
5472 objspace->profile.read_barrier_faults++;
5474 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5480#if !GC_CAN_COMPILE_COMPACTION
5482uninstall_handlers(
void)
5488install_handlers(
void)
5492#elif defined(_WIN32)
5493static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5494typedef void (*signal_handler)(int);
5495static signal_handler old_sigsegv_handler;
5498read_barrier_signal(EXCEPTION_POINTERS * info)
5501 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5506 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5507 return EXCEPTION_CONTINUE_EXECUTION;
5510 return EXCEPTION_CONTINUE_SEARCH;
5515uninstall_handlers(
void)
5517 signal(SIGSEGV, old_sigsegv_handler);
5518 SetUnhandledExceptionFilter(old_handler);
5522install_handlers(
void)
5525 old_sigsegv_handler = signal(SIGSEGV, NULL);
5528 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5531static struct sigaction old_sigbus_handler;
5532static struct sigaction old_sigsegv_handler;
5534#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5535static exception_mask_t old_exception_masks[32];
5536static mach_port_t old_exception_ports[32];
5537static exception_behavior_t old_exception_behaviors[32];
5538static thread_state_flavor_t old_exception_flavors[32];
5539static mach_msg_type_number_t old_exception_count;
5542disable_mach_bad_access_exc(
void)
5544 old_exception_count =
sizeof(old_exception_masks) /
sizeof(old_exception_masks[0]);
5545 task_swap_exception_ports(
5546 mach_task_self(), EXC_MASK_BAD_ACCESS,
5547 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5548 old_exception_masks, &old_exception_count,
5549 old_exception_ports, old_exception_behaviors, old_exception_flavors
5554restore_mach_bad_access_exc(
void)
5556 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5557 task_set_exception_ports(
5559 old_exception_masks[i], old_exception_ports[i],
5560 old_exception_behaviors[i], old_exception_flavors[i]
5567read_barrier_signal(
int sig, siginfo_t * info,
void * data)
5570 struct sigaction prev_sigbus, prev_sigsegv;
5571 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5572 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5575 sigset_t set, prev_set;
5577 sigaddset(&set, SIGBUS);
5578 sigaddset(&set, SIGSEGV);
5579 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5580#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5581 disable_mach_bad_access_exc();
5584 read_barrier_handler((uintptr_t)info->si_addr);
5587#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5588 restore_mach_bad_access_exc();
5590 sigaction(SIGBUS, &prev_sigbus, NULL);
5591 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5592 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5596uninstall_handlers(
void)
5598#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5599 restore_mach_bad_access_exc();
5601 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5602 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5606install_handlers(
void)
5608 struct sigaction action;
5609 memset(&action, 0,
sizeof(
struct sigaction));
5610 sigemptyset(&action.sa_mask);
5611 action.sa_sigaction = read_barrier_signal;
5612 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5614 sigaction(SIGBUS, &action, &old_sigbus_handler);
5615 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5616#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5617 disable_mach_bad_access_exc();
5625 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5627 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5628 gc_unprotect_pages(objspace, heap);
5631 uninstall_handlers();
5633 gc_update_references(objspace);
5634 objspace->profile.compact_count++;
5636 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5638 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5639 heap->compact_cursor = NULL;
5640 heap->free_pages = NULL;
5641 heap->compact_cursor_index = 0;
5644 if (gc_prof_enabled(objspace)) {
5646 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5648 objspace->flags.during_compacting = FALSE;
5661 struct heap_page * sweep_page = ctx->page;
5662 short slot_size = sweep_page->slot_size;
5663 short slot_bits = slot_size / BASE_SLOT_SIZE;
5664 GC_ASSERT(slot_bits > 0);
5668 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5670 asan_unpoison_object(vp,
false);
5674 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
5675#if RGENGC_CHECK_MODE
5676 if (!is_full_marking(objspace)) {
5677 if (RVALUE_OLD_P(vp)) rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
5678 if (RVALUE_REMEMBERED(vp)) rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
5681 if (obj_free(objspace, vp)) {
5684 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, BASE_SLOT_SIZE);
5685 heap_page_add_freeobj(objspace, sweep_page, vp);
5686 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5695 if (objspace->flags.during_compacting) {
5701 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished");
5703 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5705 heap_page_add_freeobj(objspace, sweep_page, vp);
5716 bitset >>= slot_bits;
5723 struct heap_page *sweep_page = ctx->page;
5724 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5727 bits_t *bits, bitset;
5729 gc_report(2, objspace,
"page_sweep: start.\n");
5731#if RGENGC_CHECK_MODE
5732 if (!objspace->flags.immediate_sweep) {
5733 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5736 sweep_page->flags.before_sweep = FALSE;
5737 sweep_page->free_slots = 0;
5739 p = (uintptr_t)sweep_page->start;
5740 bits = sweep_page->mark_bits;
5742 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5743 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5744 if (out_of_range_bits != 0) {
5745 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5751 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5752 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5753 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5757 bitset >>= NUM_IN_PAGE(p);
5759 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5761 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5763 for (
int i = 1; i < bitmap_plane_count; i++) {
5766 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5768 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5771 if (!heap->compact_cursor) {
5772 gc_setup_mark_bits(sweep_page);
5775#if GC_PROFILE_MORE_DETAIL
5776 if (gc_prof_enabled(objspace)) {
5778 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5779 record->empty_objects += ctx->empty_slots;
5782 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5784 sweep_page->total_slots,
5785 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5787 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5788 sweep_page->size_pool->total_freed_objects += ctx->freed_slots;
5790 if (heap_pages_deferred_final && !finalizing) {
5793 gc_finalize_deferred_register(objspace);
5797#if RGENGC_CHECK_MODE
5798 short freelist_len = 0;
5799 asan_unlock_freelist(sweep_page);
5800 RVALUE *ptr = sweep_page->freelist;
5803 ptr = ptr->as.free.next;
5805 asan_lock_freelist(sweep_page);
5806 if (freelist_len != sweep_page->free_slots) {
5807 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5811 gc_report(2, objspace,
"page_sweep: end.\n");
5815gc_mode_name(
enum gc_mode mode)
5818 case gc_mode_none:
return "none";
5819 case gc_mode_marking:
return "marking";
5820 case gc_mode_sweeping:
return "sweeping";
5821 case gc_mode_compacting:
return "compacting";
5822 default: rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
5827gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
5829#if RGENGC_CHECK_MODE
5830 enum gc_mode prev_mode = gc_mode(objspace);
5831 switch (prev_mode) {
5832 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
5833 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
5834 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting);
break;
5835 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none);
break;
5838 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5839 gc_mode_set(objspace, mode);
5846 asan_unlock_freelist(page);
5847 if (page->freelist) {
5848 RVALUE *p = page->freelist;
5849 asan_unpoison_object((
VALUE)p,
false);
5850 while (p->as.free.next) {
5852 p = p->as.free.next;
5853 asan_poison_object((
VALUE)prev);
5854 asan_unpoison_object((
VALUE)p,
false);
5856 p->as.free.next = freelist;
5857 asan_poison_object((
VALUE)p);
5860 page->freelist = freelist;
5862 asan_lock_freelist(page);
5869 heap->sweeping_page = ccan_list_top(&heap->pages,
struct heap_page, page_node);
5870 heap->free_pages = NULL;
5871 heap->pooled_pages = NULL;
5872 if (!objspace->flags.immediate_sweep) {
5875 ccan_list_for_each(&heap->pages, page, page_node) {
5876 page->flags.before_sweep = TRUE;
5881#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5885#if GC_CAN_COMPILE_COMPACTION
5886static void gc_sort_heap_by_compare_func(
rb_objspace_t *objspace, gc_compact_compare_func compare_func);
5887static int compare_pinned_slots(
const void *left,
const void *right,
void *d);
5893 gc_mode_transition(objspace, gc_mode_sweeping);
5894 objspace->rincgc.pooled_slots = 0;
5896#if GC_CAN_COMPILE_COMPACTION
5897 if (objspace->flags.during_compacting) {
5898 gc_sort_heap_by_compare_func(
5900 objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
5905 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5907 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5909 gc_sweep_start_heap(objspace, heap);
5912 if (heap->sweeping_page == NULL) {
5913 GC_ASSERT(heap->total_pages == 0);
5914 GC_ASSERT(heap->total_slots == 0);
5915 gc_sweep_finish_size_pool(objspace, size_pool);
5920 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5921 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5928 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5929 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5930 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5931 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5933 size_t init_slots = gc_params.size_pool_init_slots[size_pool - size_pools];
5934 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
5941 while (swept_slots < min_free_slots &&
5942 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5943 swept_slots += resurrected_page->free_slots;
5945 heap_add_page(objspace, size_pool, heap, resurrected_page);
5946 heap_add_freepage(heap, resurrected_page);
5949 if (swept_slots < min_free_slots) {
5950 bool grow_heap = is_full_marking(objspace);
5954 if (!is_full_marking(objspace) && size_pool->allocatable_pages == 0) {
5956 bool is_growth_heap = size_pool->empty_slots == 0 || size_pool->freed_slots > size_pool->empty_slots;
5961 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE ||
5962 total_slots < init_slots) {
5965 else if (is_growth_heap) {
5966 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5967 size_pool->force_major_gc_count++;
5972 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5974 if (extend_page_count > size_pool->allocatable_pages) {
5975 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5984 gc_report(1, objspace,
"gc_sweep_finish\n");
5986 gc_prof_set_heap_info(objspace);
5987 heap_pages_free_unused_pages(objspace);
5989 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5993 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5994 if (size_pool->allocatable_pages < tomb_pages) {
5995 size_pool->allocatable_pages = tomb_pages;
5998 size_pool->freed_slots = 0;
5999 size_pool->empty_slots = 0;
6001 if (!will_be_incremental_marking(objspace)) {
6002 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
6003 struct heap_page *end_page = eden_heap->free_pages;
6005 while (end_page->free_next) end_page = end_page->free_next;
6006 end_page->free_next = eden_heap->pooled_pages;
6009 eden_heap->free_pages = eden_heap->pooled_pages;
6011 eden_heap->pooled_pages = NULL;
6012 objspace->rincgc.pooled_slots = 0;
6015 heap_pages_expand_sorted(objspace);
6018 gc_mode_transition(objspace, gc_mode_none);
6020#if RGENGC_CHECK_MODE >= 2
6021 gc_verify_internal_consistency(objspace);
6028 struct heap_page *sweep_page = heap->sweeping_page;
6029 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
6030 int swept_slots = 0;
6031 int pooled_slots = 0;
6033 if (sweep_page == NULL)
return FALSE;
6035#if GC_ENABLE_LAZY_SWEEP
6036 gc_prof_sweep_timer_start(objspace);
6040 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
6048 gc_sweep_page(objspace, heap, &ctx);
6049 int free_slots = ctx.freed_slots + ctx.empty_slots;
6051 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
6053 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
6054 heap_pages_freeable_pages > 0 &&
6056 heap_pages_freeable_pages--;
6059 heap_unlink_page(objspace, heap, sweep_page);
6060 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
6062 else if (free_slots > 0) {
6063 size_pool->freed_slots += ctx.freed_slots;
6064 size_pool->empty_slots += ctx.empty_slots;
6066 if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
6067 heap_add_poolpage(objspace, heap, sweep_page);
6068 pooled_slots += free_slots;
6071 heap_add_freepage(heap, sweep_page);
6072 swept_slots += free_slots;
6073 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6079 sweep_page->free_next = NULL;
6081 }
while ((sweep_page = heap->sweeping_page));
6083 if (!heap->sweeping_page) {
6084 gc_sweep_finish_size_pool(objspace, size_pool);
6086 if (!has_sweeping_pages(objspace)) {
6087 gc_sweep_finish(objspace);
6091#if GC_ENABLE_LAZY_SWEEP
6092 gc_prof_sweep_timer_stop(objspace);
6095 return heap->free_pages != NULL;
6101 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6104 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6105 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6113 GC_ASSERT(dont_gc_val() == FALSE);
6114 if (!GC_ENABLE_LAZY_SWEEP)
return;
6116 gc_sweeping_enter(objspace);
6118 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6120 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6122 if (size_pool == sweep_size_pool) {
6123 if (size_pool->allocatable_pages > 0) {
6124 heap_increment(objspace, size_pool, heap);
6128 gc_sweep_rest(objspace);
6135 gc_sweeping_exit(objspace);
6138#if GC_CAN_COMPILE_COMPACTION
6149 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6150 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6152 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6154 object = rb_gc_location(forwarding_object);
6156 shape_id_t original_shape_id = 0;
6158 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6161 gc_move(objspace,
object, forwarding_object, GET_HEAP_PAGE(
object)->slot_size, page->slot_size);
6165 if (original_shape_id) {
6166 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6169 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
6170 orig_page->free_slots++;
6171 heap_page_add_freeobj(objspace, orig_page,
object);
6173 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6178 p += BASE_SLOT_SIZE;
6188 bits_t *mark_bits, *pin_bits;
6191 mark_bits = page->mark_bits;
6192 pin_bits = page->pinned_bits;
6194 uintptr_t p = page->start;
6197 bitset = pin_bits[0] & ~mark_bits[0];
6198 bitset >>= NUM_IN_PAGE(p);
6199 invalidate_moved_plane(objspace, page, p, bitset);
6200 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6202 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6205 bitset = pin_bits[i] & ~mark_bits[i];
6207 invalidate_moved_plane(objspace, page, p, bitset);
6208 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6217 gc_mode_transition(objspace, gc_mode_compacting);
6219 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6220 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6221 ccan_list_for_each(&heap->pages, page, page_node) {
6222 page->flags.before_sweep = TRUE;
6225 heap->compact_cursor = ccan_list_tail(&heap->pages,
struct heap_page, page_node);
6226 heap->compact_cursor_index = 0;
6229 if (gc_prof_enabled(objspace)) {
6231 record->moved_objects = objspace->rcompactor.total_moved;
6234 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
6235 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
6236 memset(objspace->rcompactor.moved_up_count_table, 0,
T_MASK *
sizeof(
size_t));
6237 memset(objspace->rcompactor.moved_down_count_table, 0,
T_MASK *
sizeof(
size_t));
6248 gc_sweeping_enter(objspace);
6250 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6252 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
6254 gc_sweep_start(objspace);
6255 if (objspace->flags.during_compacting) {
6256 gc_sweep_compact(objspace);
6259 if (immediate_sweep) {
6260#if !GC_ENABLE_LAZY_SWEEP
6261 gc_prof_sweep_timer_start(objspace);
6263 gc_sweep_rest(objspace);
6264#if !GC_ENABLE_LAZY_SWEEP
6265 gc_prof_sweep_timer_stop(objspace);
6271 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
6273 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6277 gc_sweeping_exit(objspace);
6283stack_chunk_alloc(
void)
6297 return stack->chunk == NULL;
6303 size_t size = stack->index;
6304 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6307 size += stack->limit;
6308 chunk = chunk->next;
6316 chunk->next = stack->cache;
6317 stack->cache = chunk;
6318 stack->cache_size++;
6326 if (stack->unused_cache_size > (stack->cache_size/2)) {
6327 chunk = stack->cache;
6328 stack->cache = stack->cache->next;
6329 stack->cache_size--;
6332 stack->unused_cache_size = stack->cache_size;
6340 GC_ASSERT(stack->index == stack->limit);
6342 if (stack->cache_size > 0) {
6343 next = stack->cache;
6344 stack->cache = stack->cache->next;
6345 stack->cache_size--;
6346 if (stack->unused_cache_size > stack->cache_size)
6347 stack->unused_cache_size = stack->cache_size;
6350 next = stack_chunk_alloc();
6352 next->next = stack->chunk;
6353 stack->chunk = next;
6362 prev = stack->chunk->next;
6363 GC_ASSERT(stack->index == 0);
6364 add_stack_chunk_cache(stack, stack->chunk);
6365 stack->chunk = prev;
6366 stack->index = stack->limit;
6374 while (chunk != NULL) {
6384 mark_stack_chunk_list_free(stack->chunk);
6390 mark_stack_chunk_list_free(stack->cache);
6391 stack->cache_size = 0;
6392 stack->unused_cache_size = 0;
6420 if (stack->index == stack->limit) {
6421 push_mark_stack_chunk(stack);
6423 stack->chunk->data[stack->index++] = data;
6433 rb_bug(
"push_mark_stack() called for broken object");
6437 UNEXPECTED_NODE(push_mark_stack);
6441 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6443 is_pointer_to_heap(&
rb_objspace, (
void *)data) ?
"corrupted object" :
"non object");
6449 if (is_mark_stack_empty(stack)) {
6452 if (stack->index == 1) {
6453 *data = stack->chunk->data[--stack->index];
6454 pop_mark_stack_chunk(stack);
6457 *data = stack->chunk->data[--stack->index];
6468 stack->index = stack->limit = STACK_CHUNK_SIZE;
6470 for (i=0; i < 4; i++) {
6471 add_stack_chunk_cache(stack, stack_chunk_alloc());
6473 stack->unused_cache_size = stack->cache_size;
6478#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6480#define STACK_START (ec->machine.stack_start)
6481#define STACK_END (ec->machine.stack_end)
6482#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6484#if STACK_GROW_DIRECTION < 0
6485# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6486#elif STACK_GROW_DIRECTION > 0
6487# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6489# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6490 : (size_t)(STACK_END - STACK_START + 1))
6492#if !STACK_GROW_DIRECTION
6493int ruby_stack_grow_direction;
6495ruby_get_stack_grow_direction(
volatile VALUE *addr)
6498 SET_MACHINE_STACK_END(&end);
6500 if (end > addr)
return ruby_stack_grow_direction = 1;
6501 return ruby_stack_grow_direction = -1;
6510 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6511 return STACK_LENGTH;
6514#define PREVENT_STACK_OVERFLOW 1
6515#ifndef PREVENT_STACK_OVERFLOW
6516#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6517# define PREVENT_STACK_OVERFLOW 1
6519# define PREVENT_STACK_OVERFLOW 0
6522#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6528 size_t length = STACK_LENGTH;
6529 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6531 return length > maximum_length;
6534#define stack_check(ec, water_mark) FALSE
6537#define STACKFRAME_FOR_CALL_CFUNC 2048
6542 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6548 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6568 if (end <= start)
return;
6570 each_location(objspace, start, n, cb);
6576 gc_mark_locations(&
rb_objspace, start, end, gc_mark_maybe);
6580rb_gc_mark_values(
long n,
const VALUE *values)
6585 for (i=0; i<n; i++) {
6586 gc_mark(objspace, values[i]);
6595 for (i=0; i<n; i++) {
6596 if (is_markable_object(values[i])) {
6597 gc_mark_and_pin(objspace, values[i]);
6603rb_gc_mark_vm_stack_values(
long n,
const VALUE *values)
6606 gc_mark_stack_values(objspace, n, values);
6610mark_value(st_data_t key, st_data_t value, st_data_t data)
6613 gc_mark(objspace, (
VALUE)value);
6618mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6621 gc_mark_and_pin(objspace, (
VALUE)value);
6628 if (!tbl || tbl->num_entries == 0)
return;
6629 st_foreach(tbl, mark_value, (st_data_t)objspace);
6635 if (!tbl || tbl->num_entries == 0)
return;
6636 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6640mark_key(st_data_t key, st_data_t value, st_data_t data)
6643 gc_mark_and_pin(objspace, (
VALUE)key);
6651 st_foreach(tbl, mark_key, (st_data_t)objspace);
6655pin_value(st_data_t key, st_data_t value, st_data_t data)
6658 gc_mark_and_pin(objspace, (
VALUE)value);
6666 st_foreach(tbl, pin_value, (st_data_t)objspace);
6676mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6680 gc_mark(objspace, (
VALUE)key);
6681 gc_mark(objspace, (
VALUE)value);
6686pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6690 gc_mark_and_pin(objspace, (
VALUE)key);
6691 gc_mark_and_pin(objspace, (
VALUE)value);
6696pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6700 gc_mark_and_pin(objspace, (
VALUE)key);
6701 gc_mark(objspace, (
VALUE)value);
6708 if (rb_hash_compare_by_id_p(hash)) {
6709 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6712 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6715 gc_mark(objspace, RHASH(hash)->ifnone);
6722 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6736 gc_mark(objspace, me->owner);
6737 gc_mark(objspace, me->defined_class);
6740 switch (def->type) {
6741 case VM_METHOD_TYPE_ISEQ:
6743 gc_mark(objspace, (
VALUE)def->body.iseq.
cref);
6745 if (def->iseq_overload && me->defined_class) {
6748 gc_mark_and_pin(objspace, (
VALUE)me);
6751 case VM_METHOD_TYPE_ATTRSET:
6752 case VM_METHOD_TYPE_IVAR:
6753 gc_mark(objspace, def->body.attr.location);
6755 case VM_METHOD_TYPE_BMETHOD:
6756 gc_mark(objspace, def->body.bmethod.proc);
6757 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6759 case VM_METHOD_TYPE_ALIAS:
6760 gc_mark(objspace, (
VALUE)def->body.alias.original_me);
6762 case VM_METHOD_TYPE_REFINED:
6763 gc_mark(objspace, (
VALUE)def->body.refined.orig_me);
6765 case VM_METHOD_TYPE_CFUNC:
6766 case VM_METHOD_TYPE_ZSUPER:
6767 case VM_METHOD_TYPE_MISSING:
6768 case VM_METHOD_TYPE_OPTIMIZED:
6769 case VM_METHOD_TYPE_UNDEF:
6770 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6776static enum rb_id_table_iterator_result
6777mark_method_entry_i(
VALUE me,
void *data)
6781 gc_mark(objspace, me);
6782 return ID_TABLE_CONTINUE;
6789 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6793static enum rb_id_table_iterator_result
6794mark_const_entry_i(
VALUE value,
void *data)
6799 gc_mark(objspace, ce->value);
6800 gc_mark(objspace, ce->file);
6801 return ID_TABLE_CONTINUE;
6808 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6811#if STACK_GROW_DIRECTION < 0
6812#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6813#elif STACK_GROW_DIRECTION > 0
6814#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6816#define GET_STACK_BOUNDS(start, end, appendix) \
6817 ((STACK_END < STACK_START) ? \
6818 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6824#if defined(__wasm__)
6827static VALUE *rb_stack_range_tmp[2];
6830rb_mark_locations(
void *begin,
void *end)
6832 rb_stack_range_tmp[0] = begin;
6833 rb_stack_range_tmp[1] = end;
6836# if defined(__EMSCRIPTEN__)
6841 emscripten_scan_stack(rb_mark_locations);
6842 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6844 emscripten_scan_registers(rb_mark_locations);
6845 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6852 VALUE *stack_start, *stack_end;
6854 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6855 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6857 rb_wasm_scan_locals(rb_mark_locations);
6858 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6870 VALUE v[
sizeof(rb_jmp_buf) / (
sizeof(
VALUE))];
6871 } save_regs_gc_mark;
6872 VALUE *stack_start, *stack_end;
6874 FLUSH_REGISTER_WINDOWS;
6875 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
6877 rb_setjmp(save_regs_gc_mark.j);
6883 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6885 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6887 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6895 VALUE *stack_start, *stack_end;
6897 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6898 RUBY_DEBUG_LOG(
"ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
6899 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6905 each_machine_stack_value(ec, gc_mark_maybe);
6913 gc_mark_locations(objspace, stack_start, stack_end, cb);
6915#if defined(__mc68000__)
6916 gc_mark_locations(objspace,
6917 (
VALUE*)((
char*)stack_start + 2),
6918 (
VALUE*)((
char*)stack_end - 2), cb);
6937 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
6939 if (is_pointer_to_heap(objspace, (
void *)obj)) {
6940 void *ptr = asan_unpoison_object_temporary(obj);
6948 gc_mark_and_pin(objspace, obj);
6954 asan_poison_object(obj);
6968 ASSERT_vm_locking();
6969 if (RVALUE_MARKED(obj))
return 0;
6970 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6977 struct heap_page *page = GET_HEAP_PAGE(obj);
6978 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6980 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6981 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
6982 MARK_IN_BITMAP(uncollectible_bits, obj);
6983 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6985#if RGENGC_PROFILE > 0
6986 objspace->profile.total_remembered_shady_object_count++;
6987#if RGENGC_PROFILE >= 2
6988 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
7001 const VALUE old_parent = objspace->rgengc.parent_object;
7004 if (RVALUE_WB_UNPROTECTED(obj) || !RVALUE_OLD_P(obj)) {
7005 rgengc_remember(objspace, old_parent);
7009 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
7015#if RGENGC_CHECK_MODE
7016 if (RVALUE_MARKED(obj) == FALSE) rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
7017 if (RVALUE_MARKING(obj) == TRUE) rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
7020 if (is_incremental_marking(objspace)) {
7021 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7024 push_mark_stack(&objspace->mark_stack, obj);
7030 struct heap_page *page = GET_HEAP_PAGE(obj);
7032 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7033 check_rvalue_consistency(obj);
7035 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7036 if (!RVALUE_OLD_P(obj)) {
7037 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
7038 RVALUE_AGE_INC(objspace, obj);
7040 else if (is_full_marking(objspace)) {
7041 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7042 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7045 check_rvalue_consistency(obj);
7047 objspace->marked_slots++;
7051static void reachable_objects_from_callback(
VALUE obj);
7056 if (LIKELY(during_gc)) {
7057 rgengc_check_relation(objspace, obj);
7058 if (!gc_mark_set(objspace, obj))
return;
7061 if (objspace->rgengc.parent_object) {
7062 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
7063 (
void *)obj, obj_type_name(obj),
7064 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7067 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
7071 if (UNLIKELY(RB_TYPE_P(obj,
T_NONE))) {
7073 rb_bug(
"try to mark T_NONE object");
7075 gc_aging(objspace, obj);
7076 gc_grey(objspace, obj);
7079 reachable_objects_from_callback(obj);
7086 GC_ASSERT(is_markable_object(obj));
7087 if (UNLIKELY(objspace->flags.during_compacting)) {
7088 if (LIKELY(during_gc)) {
7089 if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj)) {
7090 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
7091 GET_HEAP_PAGE(obj)->pinned_slots++;
7092 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7101 if (!is_markable_object(obj))
return;
7102 gc_pin(objspace, obj);
7103 gc_mark_ptr(objspace, obj);
7109 if (!is_markable_object(obj))
return;
7110 gc_mark_ptr(objspace, obj);
7126rb_gc_mark_and_move(
VALUE *ptr)
7131 if (UNLIKELY(objspace->flags.during_reference_updating)) {
7132 GC_ASSERT(objspace->flags.during_compacting);
7133 GC_ASSERT(during_gc);
7135 *ptr = rb_gc_location(*ptr);
7138 gc_mark_ptr(objspace, *ptr);
7143rb_gc_mark_weak(
VALUE *ptr)
7147 if (UNLIKELY(!during_gc))
return;
7152 GC_ASSERT(objspace->rgengc.parent_object == 0 ||
FL_TEST(objspace->rgengc.parent_object,
FL_WB_PROTECTED));
7154 if (UNLIKELY(RB_TYPE_P(obj,
T_NONE))) {
7156 rb_bug(
"try to mark T_NONE object");
7162 if (!is_full_marking(objspace) && RVALUE_OLD_P(obj)) {
7163 GC_ASSERT(RVALUE_MARKED(obj));
7164 GC_ASSERT(!objspace->flags.during_compacting);
7169 rgengc_check_relation(objspace, obj);
7171 rb_darray_append_without_gc(&objspace->weak_references, ptr);
7173 objspace->profile.weak_references_count++;
7177rb_gc_remove_weak(
VALUE parent_obj,
VALUE *ptr)
7183 if (!is_incremental_marking(objspace))
return;
7186 if (!RVALUE_MARKED(parent_obj))
return;
7189 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
7190 if (*ptr_ptr == ptr) {
7202rb_objspace_marked_object_p(
VALUE obj)
7204 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7210 if (RVALUE_OLD_P(obj)) {
7211 objspace->rgengc.parent_object = obj;
7214 objspace->rgengc.parent_object =
Qfalse;
7221 switch (imemo_type(obj)) {
7226 if (LIKELY(env->ep)) {
7228 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7229 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7230 rb_gc_mark_values((
long)env->env_size, env->env);
7231 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7232 gc_mark(objspace, (
VALUE)rb_vm_env_prev_env(env));
7233 gc_mark(objspace, (
VALUE)env->iseq);
7238 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7239 gc_mark(objspace, (
VALUE)RANY(obj)->as.imemo.cref.next);
7240 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7243 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7244 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7245 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7246 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7248 case imemo_throw_data:
7249 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7252 gc_mark_maybe(objspace, (
VALUE)RANY(obj)->as.imemo.ifunc.data);
7255 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7256 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7257 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7260 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7263 rb_iseq_mark_and_move((
rb_iseq_t *)obj,
false);
7269 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7270 }
while ((m = m->next) != NULL);
7274 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7276 case imemo_parser_strterm:
7278 case imemo_callinfo:
7280 case imemo_callcache:
7302 if (vm_cc_super_p(cc) || vm_cc_refinement_p(cc)) {
7303 gc_mark(objspace, (
VALUE)cc->cme_);
7307 case imemo_constcache:
7310 gc_mark(objspace, ice->value);
7313#if VM_CHECK_MODE > 0
7315 VM_UNREACHABLE(gc_mark_imemo);
7323 return (
type->flags & RUBY_TYPED_DECL_MARKING) != 0;
7331 register RVALUE *any = RANY(obj);
7332 gc_mark_set_parent(objspace, obj);
7335 rb_mark_generic_ivar(obj);
7348 rb_bug(
"rb_gc_mark() called for broken object");
7352 UNEXPECTED_NODE(rb_gc_mark);
7356 gc_mark_imemo(objspace, obj);
7363 gc_mark(objspace, any->as.basic.
klass);
7368 gc_mark(objspace, RCLASS_ATTACHED_OBJECT(obj));
7376 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7377 mark_cvc_tbl(objspace, obj);
7378 cc_table_mark(objspace, obj);
7379 if (rb_shape_obj_too_complex(obj)) {
7380 mark_tbl_no_pin(objspace, (
st_table *)RCLASS_IVPTR(obj));
7383 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7384 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7387 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7389 gc_mark(objspace, RCLASS_EXT(obj)->classpath);
7393 if (RICLASS_OWNS_M_TBL_P(obj)) {
7394 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7400 if (RCLASS_INCLUDER(obj)) {
7401 gc_mark(objspace, RCLASS_INCLUDER(obj));
7403 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7404 cc_table_mark(objspace, obj);
7408 if (ARY_SHARED_P(obj)) {
7409 VALUE root = ARY_SHARED_ROOT(obj);
7410 gc_mark(objspace, root);
7415 for (i=0; i <
len; i++) {
7416 gc_mark(objspace, ptr[i]);
7422 mark_hash(objspace, obj);
7426 if (STR_SHARED_P(obj)) {
7445 if (
RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.
type)) {
7446 size_t *offset_list = (
size_t *)RANY(obj)->as.typeddata.type->function.dmark;
7448 for (
size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
7449 rb_gc_mark_movable(*(
VALUE *)((
char *)ptr + offset));
7456 if (mark_func) (*mark_func)(ptr);
7464 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7465 if (rb_shape_obj_too_complex(obj)) {
7466 mark_tbl_no_pin(objspace, ROBJECT_IV_HASH(obj));
7471 uint32_t i,
len = ROBJECT_IV_COUNT(obj);
7472 for (i = 0; i <
len; i++) {
7473 gc_mark(objspace, ptr[i]);
7480 attr_index_t num_of_ivs = shape->next_iv_index;
7481 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7482 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7489 if (any->as.file.
fptr) {
7490 gc_mark(objspace, any->as.file.
fptr->
self);
7491 gc_mark(objspace, any->as.file.
fptr->
pathv);
7502 gc_mark(objspace, any->as.regexp.
src);
7506 gc_mark(objspace, any->as.match.
regexp);
7507 if (any->as.match.
str) {
7508 gc_mark(objspace, any->as.match.
str);
7513 gc_mark(objspace, any->as.rational.num);
7514 gc_mark(objspace, any->as.rational.den);
7518 gc_mark(objspace, any->as.complex.real);
7519 gc_mark(objspace, any->as.complex.imag);
7525 const long len = RSTRUCT_LEN(obj);
7526 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
7528 for (i=0; i<
len; i++) {
7529 gc_mark(objspace, ptr[i]);
7536 rb_gcdebug_print_obj_condition((
VALUE)obj);
7541 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
7543 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
7552gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
7556 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7557 size_t popped_count = 0;
7559 while (pop_mark_stack(mstack, &obj)) {
7560 if (UNDEF_P(obj))
continue;
7562 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7563 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7565 gc_mark_children(objspace, obj);
7568 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7569 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
7571 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7574 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7583 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7585 if (is_mark_stack_empty(mstack)) {
7586 shrink_stack_chunk_cache(mstack);
7595gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
7597 return gc_mark_stacked_objects(objspace, TRUE, count);
7603 return gc_mark_stacked_objects(objspace, FALSE, 0);
7607#define MAX_TICKS 0x100
7608static tick_t mark_ticks[MAX_TICKS];
7609static const char *mark_ticks_categories[MAX_TICKS];
7612show_mark_ticks(
void)
7615 fprintf(stderr,
"mark ticks result:\n");
7616 for (i=0; i<MAX_TICKS; i++) {
7617 const char *category = mark_ticks_categories[i];
7619 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
7630gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
7634 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7637 tick_t start_tick = tick();
7639 const char *prev_category = 0;
7641 if (mark_ticks_categories[0] == 0) {
7642 atexit(show_mark_ticks);
7646 if (categoryp) *categoryp =
"xxx";
7648 objspace->rgengc.parent_object =
Qfalse;
7651#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7652 if (prev_category) { \
7653 tick_t t = tick(); \
7654 mark_ticks[tick_count] = t - start_tick; \
7655 mark_ticks_categories[tick_count] = prev_category; \
7658 prev_category = category; \
7659 start_tick = tick(); \
7662#define MARK_CHECKPOINT_PRINT_TICK(category)
7665#define MARK_CHECKPOINT(category) do { \
7666 if (categoryp) *categoryp = category; \
7667 MARK_CHECKPOINT_PRINT_TICK(category); \
7670 MARK_CHECKPOINT(
"vm");
7673 if (vm->self) gc_mark(objspace, vm->self);
7675 MARK_CHECKPOINT(
"finalizers");
7676 mark_finalizer_tbl(objspace, finalizer_table);
7678 MARK_CHECKPOINT(
"machine_context");
7679 mark_current_machine_context(objspace, ec);
7682 MARK_CHECKPOINT(
"global_list");
7683 for (list = global_list; list; list = list->next) {
7684 gc_mark_maybe(objspace, *list->varptr);
7687 MARK_CHECKPOINT(
"end_proc");
7690 MARK_CHECKPOINT(
"global_tbl");
7691 rb_gc_mark_global_tbl();
7693 MARK_CHECKPOINT(
"object_id");
7694 rb_gc_mark(objspace->next_object_id);
7695 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl);
7697 if (stress_to_class) rb_gc_mark(stress_to_class);
7699 MARK_CHECKPOINT(
"finish");
7700#undef MARK_CHECKPOINT
7703#if RGENGC_CHECK_MODE >= 4
7705#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7706#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7707#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7715static struct reflist *
7716reflist_create(
VALUE obj)
7718 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
7721 refs->list[0] = obj;
7727reflist_destruct(
struct reflist *refs)
7734reflist_add(
struct reflist *refs,
VALUE obj)
7736 if (refs->pos == refs->size) {
7738 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
7741 refs->list[refs->pos++] = obj;
7745reflist_dump(
struct reflist *refs)
7748 for (i=0; i<refs->pos; i++) {
7749 VALUE obj = refs->list[i];
7750 if (IS_ROOTSIG(obj)) {
7751 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
7754 fprintf(stderr,
"<%s>", obj_info(obj));
7756 if (i+1 < refs->pos) fprintf(stderr,
", ");
7761reflist_referred_from_machine_context(
struct reflist *refs)
7764 for (i=0; i<refs->pos; i++) {
7765 VALUE obj = refs->list[i];
7766 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
7781 const char *category;
7787allrefs_add(
struct allrefs *data,
VALUE obj)
7789 struct reflist *refs;
7792 if (st_lookup(data->references, obj, &r)) {
7793 refs = (
struct reflist *)r;
7794 reflist_add(refs, data->root_obj);
7798 refs = reflist_create(data->root_obj);
7799 st_insert(data->references, obj, (st_data_t)refs);
7805allrefs_i(
VALUE obj,
void *ptr)
7807 struct allrefs *data = (
struct allrefs *)ptr;
7809 if (allrefs_add(data, obj)) {
7810 push_mark_stack(&data->mark_stack, obj);
7815allrefs_roots_i(
VALUE obj,
void *ptr)
7817 struct allrefs *data = (
struct allrefs *)ptr;
7818 if (strlen(data->category) == 0) rb_bug(
"!!!");
7819 data->root_obj = MAKE_ROOTSIG(data->category);
7821 if (allrefs_add(data, obj)) {
7822 push_mark_stack(&data->mark_stack, obj);
7825#define PUSH_MARK_FUNC_DATA(v) do { \
7826 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7827 GET_RACTOR()->mfd = (v);
7829#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7834 struct allrefs data;
7835 struct gc_mark_func_data_struct mfd;
7837 int prev_dont_gc = dont_gc_val();
7840 data.objspace = objspace;
7841 data.references = st_init_numtable();
7842 init_mark_stack(&data.mark_stack);
7844 mfd.mark_func = allrefs_roots_i;
7848 PUSH_MARK_FUNC_DATA(&mfd);
7849 GET_RACTOR()->mfd = &mfd;
7850 gc_mark_roots(objspace, &data.category);
7851 POP_MARK_FUNC_DATA();
7854 while (pop_mark_stack(&data.mark_stack, &obj)) {
7855 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7857 free_stack_chunks(&data.mark_stack);
7859 dont_gc_set(prev_dont_gc);
7860 return data.references;
7864objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7866 struct reflist *refs = (
struct reflist *)value;
7867 reflist_destruct(refs);
7872objspace_allrefs_destruct(
struct st_table *refs)
7874 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7875 st_free_table(refs);
7878#if RGENGC_CHECK_MODE >= 5
7880allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7883 struct reflist *refs = (
struct reflist *)v;
7884 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
7886 fprintf(stderr,
"\n");
7893 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7894 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
7895 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7900gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7903 struct reflist *refs = (
struct reflist *)v;
7907 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7908 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7909 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
7912 if (reflist_referred_from_machine_context(refs)) {
7913 fprintf(stderr,
" (marked from machine stack).\n");
7917 objspace->rgengc.error_count++;
7918 fprintf(stderr,
"\n");
7925gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
7927 size_t saved_malloc_increase = objspace->malloc_params.increase;
7928#if RGENGC_ESTIMATE_OLDMALLOC
7929 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7931 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7933 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7936 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7939 if (objspace->rgengc.error_count > 0) {
7940#if RGENGC_CHECK_MODE >= 5
7941 allrefs_dump(objspace);
7943 if (checker_name) rb_bug(
"%s: GC has problem.", checker_name);
7946 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7947 objspace->rgengc.allrefs_table = 0;
7949 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
7950 objspace->malloc_params.increase = saved_malloc_increase;
7951#if RGENGC_ESTIMATE_OLDMALLOC
7952 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7960 size_t live_object_count;
7961 size_t zombie_object_count;
7964 size_t old_object_count;
7965 size_t remembered_shady_count;
7969check_generation_i(
const VALUE child,
void *ptr)
7972 const VALUE parent = data->parent;
7974 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7976 if (!RVALUE_OLD_P(child)) {
7977 if (!RVALUE_REMEMBERED(parent) &&
7978 !RVALUE_REMEMBERED(child) &&
7979 !RVALUE_UNCOLLECTIBLE(child)) {
7980 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7987check_color_i(
const VALUE child,
void *ptr)
7990 const VALUE parent = data->parent;
7992 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7993 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7994 obj_info(parent), obj_info(child));
8000check_children_i(
const VALUE child,
void *ptr)
8003 if (check_rvalue_consistency_force(child, FALSE) != 0) {
8004 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
8005 obj_info(child), obj_info(data->parent));
8006 rb_print_backtrace(stderr);
8013verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
8019 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
8020 void *poisoned = asan_unpoison_object_temporary(obj);
8022 if (is_live_object(objspace, obj)) {
8024 data->live_object_count++;
8029 if (!gc_object_moved_p(objspace, obj)) {
8031 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
8035 if (RVALUE_OLD_P(obj)) data->old_object_count++;
8036 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
8038 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
8041 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
8044 if (is_incremental_marking(objspace)) {
8045 if (RVALUE_BLACK_P(obj)) {
8048 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
8055 data->zombie_object_count++;
8060 asan_poison_object(obj);
8070 unsigned int has_remembered_shady = FALSE;
8071 unsigned int has_remembered_old = FALSE;
8072 int remembered_old_objects = 0;
8073 int free_objects = 0;
8074 int zombie_objects = 0;
8076 short slot_size = page->slot_size;
8077 uintptr_t start = (uintptr_t)page->start;
8078 uintptr_t end = start + page->total_slots * slot_size;
8080 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8082 void *poisoned = asan_unpoison_object_temporary(val);
8087 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
8088 has_remembered_shady = TRUE;
8090 if (RVALUE_PAGE_MARKING(page, val)) {
8091 has_remembered_old = TRUE;
8092 remembered_old_objects++;
8097 asan_poison_object(val);
8101 if (!is_incremental_marking(objspace) &&
8102 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
8104 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8106 if (RVALUE_PAGE_MARKING(page, val)) {
8107 fprintf(stderr,
"marking -> %s\n", obj_info(val));
8110 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
8111 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
8114 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
8115 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
8116 (
void *)page, obj ? obj_info(obj) :
"");
8121 if (page->free_slots != free_objects) {
8122 rb_bug(
"page %p's free_slots should be %d, but %d", (
void *)page, page->free_slots, free_objects);
8125 if (page->final_slots != zombie_objects) {
8126 rb_bug(
"page %p's final_slots should be %d, but %d", (
void *)page, page->final_slots, zombie_objects);
8129 return remembered_old_objects;
8133gc_verify_heap_pages_(
rb_objspace_t *objspace,
struct ccan_list_head *head)
8135 int remembered_old_objects = 0;
8138 ccan_list_for_each(head, page, page_node) {
8139 asan_unlock_freelist(page);
8140 RVALUE *p = page->freelist;
8144 asan_unpoison_object(vp,
false);
8146 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8148 p = p->as.free.next;
8149 asan_poison_object(prev);
8151 asan_lock_freelist(page);
8153 if (page->flags.has_remembered_objects == FALSE) {
8154 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
8158 return remembered_old_objects;
8164 int remembered_old_objects = 0;
8165 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8166 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8167 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8169 return remembered_old_objects;
8183gc_verify_internal_consistency_m(
VALUE dummy)
8194 data.objspace = objspace;
8195 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
8198 for (
size_t i = 0; i < heap_allocated_pages; i++) {
8199 struct heap_page *page = heap_pages_sorted[i];
8200 short slot_size = page->slot_size;
8202 uintptr_t start = (uintptr_t)page->start;
8203 uintptr_t end = start + page->total_slots * slot_size;
8205 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
8208 if (data.err_count != 0) {
8209#if RGENGC_CHECK_MODE >= 5
8210 objspace->rgengc.error_count = data.err_count;
8211 gc_marks_check(objspace, NULL, NULL);
8212 allrefs_dump(objspace);
8214 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
8218 gc_verify_heap_pages(objspace);
8222 if (!is_lazy_sweeping(objspace) &&
8224 ruby_single_main_ractor != NULL) {
8225 if (objspace_live_slots(objspace) != data.live_object_count) {
8226 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", total_freed_objects: %"PRIdSIZE
"\n",
8227 heap_pages_final_slots, total_freed_objects(objspace));
8228 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8229 objspace_live_slots(objspace), data.live_object_count);
8233 if (!is_marking(objspace)) {
8234 if (objspace->rgengc.old_objects != data.old_object_count) {
8235 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8236 objspace->rgengc.old_objects, data.old_object_count);
8238 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8239 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
8240 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8245 size_t list_count = 0;
8248 VALUE z = heap_pages_deferred_final;
8251 z = RZOMBIE(z)->next;
8255 if (heap_pages_final_slots != data.zombie_object_count ||
8256 heap_pages_final_slots != list_count) {
8258 rb_bug(
"inconsistent finalizing object count:\n"
8259 " expect %"PRIuSIZE
"\n"
8260 " but %"PRIuSIZE
" zombies\n"
8261 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
8262 heap_pages_final_slots,
8263 data.zombie_object_count,
8268 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
8278 unsigned int prev_during_gc = during_gc;
8281 gc_verify_internal_consistency_(objspace);
8283 during_gc = prev_during_gc;
8289rb_gc_verify_internal_consistency(
void)
8295heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
8297 if (heap->pooled_pages) {
8298 if (heap->free_pages) {
8299 struct heap_page *free_pages_tail = heap->free_pages;
8300 while (free_pages_tail->free_next) {
8301 free_pages_tail = free_pages_tail->free_next;
8303 free_pages_tail->free_next = heap->pooled_pages;
8306 heap->free_pages = heap->pooled_pages;
8309 heap->pooled_pages = NULL;
8319 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
8320 gc_mode_transition(objspace, gc_mode_marking);
8323 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8324 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8326 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
8327 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
8328 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
8329 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8330 objspace->flags.during_minor_gc = FALSE;
8331 if (ruby_enable_autocompact) {
8332 objspace->flags.during_compacting |= TRUE;
8334 objspace->profile.major_gc_count++;
8335 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8336 objspace->rgengc.old_objects = 0;
8337 objspace->rgengc.last_major_gc = objspace->profile.count;
8338 objspace->marked_slots = 0;
8340 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8342 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8343 rgengc_mark_and_rememberset_clear(objspace, heap);
8344 heap_move_pooled_pages_to_free_pages(heap);
8346 if (objspace->flags.during_compacting) {
8349 ccan_list_for_each(&heap->pages, page, page_node) {
8350 page->pinned_slots = 0;
8356 objspace->flags.during_minor_gc = TRUE;
8357 objspace->marked_slots =
8358 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
8359 objspace->profile.minor_gc_count++;
8361 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8362 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8366 gc_mark_roots(objspace, NULL);
8368 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
8369 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->mark_stack));
8373gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8378 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
8379 GC_ASSERT(RVALUE_WB_UNPROTECTED((
VALUE)p));
8380 GC_ASSERT(RVALUE_MARKED((
VALUE)p));
8381 gc_mark_children(objspace, (
VALUE)p);
8383 p += BASE_SLOT_SIZE;
8394 ccan_list_for_each(&heap->pages, page, page_node) {
8395 bits_t *mark_bits = page->mark_bits;
8396 bits_t *wbun_bits = page->wb_unprotected_bits;
8397 uintptr_t p = page->start;
8400 bits_t bits = mark_bits[0] & wbun_bits[0];
8401 bits >>= NUM_IN_PAGE(p);
8402 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8403 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8405 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8406 bits_t bits = mark_bits[j] & wbun_bits[j];
8408 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8409 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8413 gc_mark_stacked_objects_all(objspace);
8419 size_t retained_weak_references_count = 0;
8421 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
8422 if (!*ptr_ptr)
continue;
8424 VALUE obj = **ptr_ptr;
8428 if (!RVALUE_MARKED(obj)) {
8432 retained_weak_references_count++;
8436 objspace->profile.retained_weak_references_count = retained_weak_references_count;
8438 rb_darray_clear(objspace->weak_references);
8439 rb_darray_resize_capa_without_gc(&objspace->weak_references, retained_weak_references_count);
8446 if (is_incremental_marking(objspace)) {
8447 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8448 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
8449 mark_stack_size(&objspace->mark_stack));
8452 gc_mark_roots(objspace, 0);
8453 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
false);
8455#if RGENGC_CHECK_MODE >= 2
8456 if (gc_verify_heap_pages(objspace) != 0) {
8457 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
8461 objspace->flags.during_incremental_marking = FALSE;
8463 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8464 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8468 gc_update_weak_references(objspace);
8470#if RGENGC_CHECK_MODE >= 2
8471 gc_verify_internal_consistency(objspace);
8474#if RGENGC_CHECK_MODE >= 4
8476 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
8482 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8483 size_t sweep_slots = total_slots - objspace->marked_slots;
8484 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8485 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8486 int full_marking = is_full_marking(objspace);
8487 const int r_cnt = GET_VM()->ractor.cnt;
8488 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
8490 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8493 size_t total_init_slots = 0;
8494 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8495 total_init_slots += gc_params.size_pool_init_slots[i] * r_mul;
8498 if (max_free_slots < total_init_slots) {
8499 max_free_slots = total_init_slots;
8502 if (sweep_slots > max_free_slots) {
8503 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8506 heap_pages_freeable_pages = 0;
8510 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8511 min_free_slots = gc_params.heap_free_slots * r_mul;
8514 if (sweep_slots < min_free_slots) {
8515 if (!full_marking) {
8516 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8517 full_marking = TRUE;
8522 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
8523 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8530 const double r = gc_params.oldobject_limit_factor;
8531 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
8532 (
size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
8533 (
size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
8535 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8538 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8539 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8541 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8542 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8544 if (RGENGC_FORCE_MAJOR_GC) {
8545 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8548 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
8549 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8550 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8551 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8552 objspace->rgengc.need_major_gc ?
"major" :
"minor");
8555 rb_ractor_finish_marking();
8561gc_compact_heap_cursors_met_p(
rb_heap_t *heap)
8563 return heap->sweeping_page == heap->compact_cursor;
8574 obj_size = rb_ary_size_as_embedded(src);
8578 if (rb_shape_obj_too_complex(src)) {
8579 return &size_pools[0];
8587 obj_size = rb_str_size_as_embedded(src);
8591 obj_size =
sizeof(
struct RHash) + (RHASH_ST_TABLE_P(src) ? sizeof(
st_table) : sizeof(ar_table));
8598 if (rb_gc_size_allocatable_p(obj_size)){
8599 idx = size_pool_idx_for_size(obj_size);
8601 return &size_pools[idx];
8608 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8610 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8611 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8615 if (gc_compact_heap_cursors_met_p(dheap)) {
8616 return dheap != heap;
8620 orig_shape = rb_shape_get_shape(src);
8621 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8622 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8623 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8626 dest_pool = size_pool;
8632 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8634 .page = dheap->sweeping_page,
8643 lock_page_body(objspace, GET_PAGE_BODY(src));
8644 gc_sweep_page(objspace, dheap, &ctx);
8645 unlock_page_body(objspace, GET_PAGE_BODY(src));
8647 if (dheap->sweeping_page->free_slots > 0) {
8648 heap_add_freepage(dheap, dheap->sweeping_page);
8651 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8652 if (gc_compact_heap_cursors_met_p(dheap)) {
8653 return dheap != heap;
8659 VALUE dest = rb_gc_location(src);
8660 rb_shape_set_shape(dest, new_shape);
8662 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8671 short slot_size = page->slot_size;
8672 short slot_bits = slot_size / BASE_SLOT_SIZE;
8673 GC_ASSERT(slot_bits > 0);
8677 GC_ASSERT(vp %
sizeof(
RVALUE) == 0);
8680 objspace->rcompactor.considered_count_table[
BUILTIN_TYPE(vp)]++;
8682 if (gc_is_moveable_obj(objspace, vp)) {
8683 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8690 bitset >>= slot_bits;
8700 GC_ASSERT(page == heap->compact_cursor);
8702 bits_t *mark_bits, *pin_bits;
8704 uintptr_t p = page->start;
8706 mark_bits = page->mark_bits;
8707 pin_bits = page->pinned_bits;
8710 bitset = (mark_bits[0] & ~pin_bits[0]);
8711 bitset >>= NUM_IN_PAGE(p);
8713 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8716 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8718 for (
int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8719 bitset = (mark_bits[j] & ~pin_bits[j]);
8721 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8724 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8733 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8735 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8737 if (heap->total_pages > 0 &&
8738 !gc_compact_heap_cursors_met_p(heap)) {
8749 gc_compact_start(objspace);
8750#if RGENGC_CHECK_MODE >= 2
8751 gc_verify_internal_consistency(objspace);
8754 while (!gc_compact_all_compacted_p(objspace)) {
8755 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8757 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8759 if (gc_compact_heap_cursors_met_p(heap)) {
8763 struct heap_page *start_page = heap->compact_cursor;
8765 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8766 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8773 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8774 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8778 gc_compact_finish(objspace);
8780#if RGENGC_CHECK_MODE >= 2
8781 gc_verify_internal_consistency(objspace);
8788 gc_report(1, objspace,
"gc_marks_rest\n");
8790 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8791 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8794 if (is_incremental_marking(objspace)) {
8795 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8798 gc_mark_stacked_objects_all(objspace);
8801 gc_marks_finish(objspace);
8807 bool marking_finished =
false;
8809 GC_ASSERT(is_marking(objspace));
8810 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8811 gc_marks_finish(objspace);
8813 marking_finished =
true;
8816 return marking_finished;
8822 GC_ASSERT(dont_gc_val() == FALSE);
8823 bool marking_finished =
true;
8825 gc_marking_enter(objspace);
8827 if (heap->free_pages) {
8828 gc_report(2, objspace,
"gc_marks_continue: has pooled pages");
8830 marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
8833 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8834 mark_stack_size(&objspace->mark_stack));
8835 size_pool->force_incremental_marking_finish_count++;
8836 gc_marks_rest(objspace);
8839 gc_marking_exit(objspace);
8841 return marking_finished;
8847 gc_prof_mark_timer_start(objspace);
8848 gc_marking_enter(objspace);
8850 bool marking_finished =
false;
8854 gc_marks_start(objspace, full_mark);
8855 if (!is_incremental_marking(objspace)) {
8856 gc_marks_rest(objspace);
8857 marking_finished =
true;
8860#if RGENGC_PROFILE > 0
8861 if (gc_prof_record(objspace)) {
8863 record->old_objects = objspace->rgengc.old_objects;
8867 gc_marking_exit(objspace);
8868 gc_prof_mark_timer_stop(objspace);
8870 return marking_finished;
8876gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
8878 if (level <= RGENGC_DEBUG) {
8882 const char *status =
" ";
8885 status = is_full_marking(objspace) ?
"+" :
"-";
8888 if (is_lazy_sweeping(objspace)) {
8891 if (is_incremental_marking(objspace)) {
8896 va_start(args, fmt);
8897 vsnprintf(buf, 1024, fmt, args);
8900 fprintf(out,
"%s|", status);
8910 struct heap_page *page = GET_HEAP_PAGE(obj);
8911 bits_t *bits = &page->remembered_bits[0];
8913 if (MARKED_IN_BITMAP(bits, obj)) {
8917 page->flags.has_remembered_objects = TRUE;
8918 MARK_IN_BITMAP(bits, obj);
8929 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
8930 RVALUE_REMEMBERED(obj) ?
"was already remembered" :
"is remembered now");
8932 check_rvalue_consistency(obj);
8934 if (RGENGC_CHECK_MODE) {
8935 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
8938#if RGENGC_PROFILE > 0
8939 if (!RVALUE_REMEMBERED(obj)) {
8940 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8941 objspace->profile.total_remembered_normal_object_count++;
8942#if RGENGC_PROFILE >= 2
8943 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
8949 return rgengc_remembersetbits_set(objspace, obj);
8952#ifndef PROFILE_REMEMBERSET_MARK
8953#define PROFILE_REMEMBERSET_MARK 0
8957rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8963 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8964 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8965 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8967 gc_mark_children(objspace, obj);
8969 p += BASE_SLOT_SIZE;
8980#if PROFILE_REMEMBERSET_MARK
8981 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8983 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
8985 ccan_list_for_each(&heap->pages, page, page_node) {
8986 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
8987 uintptr_t p = page->start;
8988 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8989 bits_t *remembered_bits = page->remembered_bits;
8990 bits_t *uncollectible_bits = page->uncollectible_bits;
8991 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8992#if PROFILE_REMEMBERSET_MARK
8993 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
8994 else if (page->flags.has_remembered_objects) has_old++;
8995 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
8997 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8998 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8999 remembered_bits[j] = 0;
9001 page->flags.has_remembered_objects = FALSE;
9004 bitset >>= NUM_IN_PAGE(p);
9005 rgengc_rememberset_mark_plane(objspace, p, bitset);
9006 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
9008 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
9010 rgengc_rememberset_mark_plane(objspace, p, bitset);
9011 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
9014#if PROFILE_REMEMBERSET_MARK
9021#if PROFILE_REMEMBERSET_MARK
9022 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
9024 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
9032 ccan_list_for_each(&heap->pages, page, page_node) {
9033 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9034 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9035 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9036 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9037 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9038 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
9039 page->flags.has_remembered_objects = FALSE;
9050 if (RGENGC_CHECK_MODE) {
9051 if (!RVALUE_OLD_P(a)) rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
9052 if ( RVALUE_OLD_P(b)) rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
9053 if (is_incremental_marking(objspace)) rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
9057 if (!RVALUE_REMEMBERED(a)) {
9058 RB_VM_LOCK_ENTER_NO_BARRIER();
9060 rgengc_remember(objspace, a);
9062 RB_VM_LOCK_LEAVE_NO_BARRIER();
9063 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
9066 check_rvalue_consistency(a);
9067 check_rvalue_consistency(b);
9073 gc_mark_set_parent(objspace, parent);
9074 rgengc_check_relation(objspace, obj);
9075 if (gc_mark_set(objspace, obj) == FALSE)
return;
9076 gc_aging(objspace, obj);
9077 gc_grey(objspace, obj);
9085 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
9087 if (RVALUE_BLACK_P(a)) {
9088 if (RVALUE_WHITE_P(b)) {
9089 if (!RVALUE_WB_UNPROTECTED(a)) {
9090 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
9091 gc_mark_from(objspace, b, a);
9094 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
9095 rgengc_remember(objspace, a);
9098 if (UNLIKELY(objspace->flags.during_compacting)) {
9099 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9109 if (RGENGC_CHECK_MODE) {
9110 if (
SPECIAL_CONST_P(a)) rb_bug(
"rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
9111 if (
SPECIAL_CONST_P(b)) rb_bug(
"rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
9115 if (!is_incremental_marking(objspace)) {
9116 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9120 gc_writebarrier_generational(a, b, objspace);
9126 RB_VM_LOCK_ENTER_NO_BARRIER();
9128 if (is_incremental_marking(objspace)) {
9129 gc_writebarrier_incremental(a, b, objspace);
9135 RB_VM_LOCK_LEAVE_NO_BARRIER();
9137 if (retry)
goto retry;
9145 if (RVALUE_WB_UNPROTECTED(obj)) {
9151 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9152 RVALUE_REMEMBERED(obj) ?
" (already remembered)" :
"");
9154 RB_VM_LOCK_ENTER_NO_BARRIER();
9156 if (RVALUE_OLD_P(obj)) {
9157 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9158 RVALUE_DEMOTE(objspace, obj);
9159 gc_mark_set(objspace, obj);
9160 gc_remember_unprotected(objspace, obj);
9163 objspace->profile.total_shade_operation_count++;
9164#if RGENGC_PROFILE >= 2
9165 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
9170 RVALUE_AGE_RESET(obj);
9173 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9174 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9176 RB_VM_LOCK_LEAVE_NO_BARRIER();
9184rb_gc_writebarrier_remember(
VALUE obj)
9188 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9190 if (is_incremental_marking(objspace)) {
9191 if (RVALUE_BLACK_P(obj)) {
9192 gc_grey(objspace, obj);
9196 if (RVALUE_OLD_P(obj)) {
9197 rgengc_remember(objspace, obj);
9203rb_copy_wb_protected_attribute(
VALUE dest,
VALUE obj)
9207 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9208 if (!RVALUE_OLD_P(dest)) {
9209 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9210 RVALUE_AGE_RESET(dest);
9213 RVALUE_DEMOTE(objspace, dest);
9217 check_rvalue_consistency(dest);
9223rb_obj_rgengc_writebarrier_protected_p(
VALUE obj)
9225 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9229rb_obj_rgengc_promoted_p(
VALUE obj)
9235rb_obj_gc_flags(
VALUE obj,
ID* flags,
size_t max)
9238 static ID ID_marked;
9239 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9242#define I(s) ID_##s = rb_intern(#s);
9252 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9253 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9254 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9255 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9256 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9257 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9266 newobj_cache->incremental_mark_step_allocated_slots = 0;
9268 for (
size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9271 struct heap_page *page = cache->using_page;
9272 RVALUE *freelist = cache->freelist;
9273 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
9275 heap_page_freelist_append(page, freelist);
9277 cache->using_page = NULL;
9278 cache->freelist = NULL;
9288#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9289#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9295 if (!is_pointer_to_heap(&
rb_objspace, (
void *)obj))
9300 VALUE ary_ary = GET_VM()->mark_object_ary;
9301 VALUE ary = rb_ary_last(0, 0, ary_ary);
9304 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9305 rb_ary_push(ary_ary, ary);
9308 rb_ary_push(ary, obj);
9322 tmp->next = global_list;
9332 rb_warn(
"Object is assigned to registering address already: %"PRIsVALUE,
9334 rb_print_backtrace(stderr);
9342 struct gc_list *tmp = global_list;
9344 if (tmp->varptr == addr) {
9345 global_list = tmp->next;
9350 if (tmp->next->varptr == addr) {
9351 struct gc_list *t = tmp->next;
9353 tmp->next = tmp->next->next;
9364 rb_gc_register_address(var);
9371 gc_stress_no_immediate_sweep,
9372 gc_stress_full_mark_after_malloc,
9376#define gc_stress_full_mark_after_malloc_p() \
9377 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9382 if (!heap->free_pages) {
9383 if (!heap_increment(objspace, size_pool, heap)) {
9384 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9385 heap_increment(objspace, size_pool, heap);
9393 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9394 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
9396 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9406gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
9408 gc_prof_set_malloc_info(objspace);
9410 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9411 size_t old_limit = malloc_limit;
9413 if (inc > malloc_limit) {
9414 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9415 if (malloc_limit > gc_params.malloc_limit_max) {
9416 malloc_limit = gc_params.malloc_limit_max;
9420 malloc_limit = (size_t)(malloc_limit * 0.98);
9421 if (malloc_limit < gc_params.malloc_limit_min) {
9422 malloc_limit = gc_params.malloc_limit_min;
9427 if (old_limit != malloc_limit) {
9428 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
9429 rb_gc_count(), old_limit, malloc_limit);
9432 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
9433 rb_gc_count(), malloc_limit);
9439#if RGENGC_ESTIMATE_OLDMALLOC
9441 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9442 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9443 objspace->rgengc.oldmalloc_increase_limit =
9444 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9446 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9447 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9451 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
9453 objspace->rgengc.need_major_gc,
9454 objspace->rgengc.oldmalloc_increase,
9455 objspace->rgengc.oldmalloc_increase_limit,
9456 gc_params.oldmalloc_limit_max);
9460 objspace->rgengc.oldmalloc_increase = 0;
9462 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9463 objspace->rgengc.oldmalloc_increase_limit =
9464 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9465 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9466 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9474garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
9480#if GC_PROFILE_MORE_DETAIL
9481 objspace->profile.prepare_time = getrusage_time();
9486#if GC_PROFILE_MORE_DETAIL
9487 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9490 ret = gc_start(objspace, reason);
9500 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9503 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9505 if (!heap_allocated_pages)
return FALSE;
9506 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
9508 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9509 GC_ASSERT(!is_lazy_sweeping(objspace));
9510 GC_ASSERT(!is_incremental_marking(objspace));
9512 unsigned int lock_lev;
9513 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9515#if RGENGC_CHECK_MODE >= 2
9516 gc_verify_internal_consistency(objspace);
9519 if (ruby_gc_stressful) {
9520 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
9522 if ((flag & (1<<gc_stress_no_major)) == 0) {
9523 do_full_mark = TRUE;
9526 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9529 if (objspace->rgengc.need_major_gc) {
9530 reason |= objspace->rgengc.need_major_gc;
9531 do_full_mark = TRUE;
9533 else if (RGENGC_FORCE_MAJOR_GC) {
9534 reason = GPR_FLAG_MAJOR_BY_FORCE;
9535 do_full_mark = TRUE;
9538 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9540 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9541 reason |= GPR_FLAG_MAJOR_BY_FORCE;
9544 if (objspace->flags.dont_incremental ||
9545 reason & GPR_FLAG_IMMEDIATE_MARK ||
9546 ruby_gc_stressful) {
9547 objspace->flags.during_incremental_marking = FALSE;
9550 objspace->flags.during_incremental_marking = do_full_mark;
9554 if (do_full_mark && ruby_enable_autocompact) {
9555 objspace->flags.during_compacting = TRUE;
9556#if RGENGC_CHECK_MODE
9557 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
9561 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9564 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9565 objspace->flags.immediate_sweep = TRUE;
9568 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9570 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
9572 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9574#if USE_DEBUG_COUNTER
9575 RB_DEBUG_COUNTER_INC(gc_count);
9577 if (reason & GPR_FLAG_MAJOR_MASK) {
9578 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9579 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9580 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9581 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9582#if RGENGC_ESTIMATE_OLDMALLOC
9583 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9587 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9588 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9589 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9590 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9591 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9595 objspace->profile.count++;
9596 objspace->profile.latest_gc_info = reason;
9597 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
9598 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9599 objspace->profile.weak_references_count = 0;
9600 objspace->profile.retained_weak_references_count = 0;
9601 gc_prof_setup_new_record(objspace, reason);
9602 gc_reset_malloc_info(objspace, do_full_mark);
9605 GC_ASSERT(during_gc);
9607 gc_prof_timer_start(objspace);
9609 if (gc_marks(objspace, do_full_mark)) {
9613 gc_prof_timer_stop(objspace);
9615 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9622 int marking = is_incremental_marking(objspace);
9623 int sweeping = is_lazy_sweeping(objspace);
9625 if (marking || sweeping) {
9626 unsigned int lock_lev;
9627 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9629 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9631 if (is_incremental_marking(objspace)) {
9632 gc_marking_enter(objspace);
9633 gc_marks_rest(objspace);
9634 gc_marking_exit(objspace);
9639 if (is_lazy_sweeping(objspace)) {
9640 gc_sweeping_enter(objspace);
9641 gc_sweep_rest(objspace);
9642 gc_sweeping_exit(objspace);
9645 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9651 unsigned int reason;
9658 if (is_marking(objspace)) {
9660 if (is_full_marking(objspace)) buff[i++] =
'F';
9661 if (is_incremental_marking(objspace)) buff[i++] =
'I';
9663 else if (is_sweeping(objspace)) {
9665 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
9676 static char buff[0x10];
9677 gc_current_status_fill(objspace, buff);
9681#if PRINT_ENTER_EXIT_TICK
9683static tick_t last_exit_tick;
9684static tick_t enter_tick;
9685static int enter_count = 0;
9686static char last_gc_status[0x10];
9689gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9691 if (direction == 0) {
9693 enter_tick = tick();
9694 gc_current_status_fill(objspace, last_gc_status);
9697 tick_t exit_tick = tick();
9698 char current_gc_status[0x10];
9699 gc_current_status_fill(objspace, current_gc_status);
9702 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9703 enter_tick - last_exit_tick,
9704 exit_tick - enter_tick,
9706 last_gc_status, current_gc_status,
9707 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9708 last_exit_tick = exit_tick;
9711 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9713 exit_tick - enter_tick,
9715 last_gc_status, current_gc_status,
9716 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9722gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9729gc_enter_event_cstr(
enum gc_enter_event event)
9732 case gc_enter_event_start:
return "start";
9733 case gc_enter_event_continue:
return "continue";
9734 case gc_enter_event_rest:
return "rest";
9735 case gc_enter_event_finalizer:
return "finalizer";
9736 case gc_enter_event_rb_memerror:
return "rb_memerror";
9742gc_enter_count(
enum gc_enter_event event)
9745 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
9746 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue);
break;
9747 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
9748 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
9749 case gc_enter_event_rb_memerror:
break;
9753static bool current_process_time(
struct timespec *ts);
9758 if (!current_process_time(ts)) {
9769 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
9770 current_process_time(&end_time) &&
9771 end_time.tv_sec >= ts->tv_sec) {
9772 return (uint64_t)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
9773 (end_time.tv_nsec - ts->tv_nsec);
9780gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9782 RB_VM_LOCK_ENTER_LEV(lock_lev);
9785 case gc_enter_event_rest:
9786 if (!is_marking(objspace))
break;
9788 case gc_enter_event_start:
9789 case gc_enter_event_continue:
9797 gc_enter_count(event);
9798 if (UNLIKELY(during_gc != 0)) rb_bug(
"during_gc != 0");
9799 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9802 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9803 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9804 gc_record(objspace, 0, gc_enter_event_cstr(event));
9809gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9811 GC_ASSERT(during_gc != 0);
9814 gc_record(objspace, 1, gc_enter_event_cstr(event));
9815 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9816 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9819 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9823#define MEASURE_GC (objspace->flags.measure_gc)
9829 GC_ASSERT(during_gc != 0);
9832 gc_clock_start(&objspace->profile.marking_start_time);
9839 GC_ASSERT(during_gc != 0);
9842 objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
9849 GC_ASSERT(during_gc != 0);
9852 gc_clock_start(&objspace->profile.sweeping_start_time);
9859 GC_ASSERT(during_gc != 0);
9862 objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
9867gc_with_gvl(
void *ptr)
9870 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
9874garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
9876 if (dont_gc_val())
return TRUE;
9877 if (ruby_thread_has_gvl_p()) {
9878 return garbage_collect(objspace, reason);
9883 oar.objspace = objspace;
9884 oar.reason = reason;
9889 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
9896gc_set_candidate_object_i(
void *vstart,
void *vend,
size_t stride,
void *data)
9900 for (; v != (
VALUE)vend; v += stride) {
9908 rb_enc_str_coderange(v);
9911 if (!RVALUE_OLD_P(v) && !RVALUE_WB_UNPROTECTED(v)) {
9912 RVALUE_AGE_SET_CANDIDATE(objspace, v);
9924 unsigned int reason = (GPR_FLAG_FULL_MARK |
9925 GPR_FLAG_IMMEDIATE_MARK |
9926 GPR_FLAG_IMMEDIATE_SWEEP |
9930 if (
RTEST(compact)) {
9931 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9933 reason |= GPR_FLAG_COMPACT;
9936 if (!
RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9937 if (!
RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9938 if (!
RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9941 garbage_collect(objspace, reason);
9942 gc_finalize_deferred(objspace);
9948free_empty_pages(
void)
9952 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
9955 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
9956 rb_heap_t *tomb_heap = SIZE_POOL_TOMB_HEAP(size_pool);
9958 size_t freed_pages = 0;
9960 struct heap_page **next_page_ptr = &heap->free_pages;
9961 struct heap_page *page = heap->free_pages;
9965 GC_ASSERT(page->final_slots == 0);
9967 struct heap_page *next_page = page->free_next;
9969 if (page->free_slots == page->total_slots) {
9970 heap_unlink_page(objspace, heap, page);
9971 heap_add_page(objspace, size_pool, tomb_heap, page);
9975 *next_page_ptr = page;
9976 next_page_ptr = &page->free_next;
9982 *next_page_ptr = NULL;
9984 size_pool_allocatable_pages_set(objspace, size_pool, size_pool->allocatable_pages + freed_pages);
9987 heap_pages_free_unused_pages(objspace);
9991rb_gc_prepare_heap(
void)
9993 rb_objspace_each_objects(gc_set_candidate_object_i, NULL);
9997#if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
10014 if (
DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->
id & ~ID_SCOPE_MASK)) {
10042 GC_ASSERT(st_is_member(finalizer_table, obj));
10046 GC_ASSERT(RVALUE_MARKED(obj));
10047 GC_ASSERT(!RVALUE_PINNED(obj));
10052 rb_bug(
"gc_is_moveable_obj: unreachable (%d)", (
int)
BUILTIN_TYPE(obj));
10063 int wb_unprotected;
10069 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void*)scan, (
void *)free);
10072 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
10074 GC_ASSERT(!RVALUE_MARKING((
VALUE)src));
10077 marked = rb_objspace_marked_object_p((
VALUE)src);
10078 wb_unprotected = RVALUE_WB_UNPROTECTED((
VALUE)src);
10079 uncollectible = RVALUE_UNCOLLECTIBLE((
VALUE)src);
10080 bool remembered = RVALUE_REMEMBERED((
VALUE)src);
10081 age = RVALUE_AGE_GET((
VALUE)src);
10084 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)src), (
VALUE)src);
10085 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)src), (
VALUE)src);
10086 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)src), (
VALUE)src);
10087 CLEAR_IN_BITMAP(GET_HEAP_PAGE((
VALUE)src)->remembered_bits, (
VALUE)src);
10091 DURING_GC_COULD_MALLOC_REGION_START();
10093 rb_mv_generic_ivar((
VALUE)src, (
VALUE)dest);
10095 DURING_GC_COULD_MALLOC_REGION_END();
10098 st_data_t srcid = (st_data_t)src,
id;
10102 if (st_lookup(objspace->obj_to_id_tbl, srcid, &
id)) {
10103 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
10105 DURING_GC_COULD_MALLOC_REGION_START();
10107 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
10108 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
10110 DURING_GC_COULD_MALLOC_REGION_END();
10114 memcpy(dest, src, MIN(src_slot_size, slot_size));
10116 if (RVALUE_OVERHEAD > 0) {
10117 void *dest_overhead = (
void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
10118 void *src_overhead = (
void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
10120 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
10123 memset(src, 0, src_slot_size);
10124 RVALUE_AGE_RESET((
VALUE)src);
10128 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (
VALUE)dest);
10131 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (
VALUE)dest);
10135 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10138 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((
VALUE)dest), (
VALUE)dest);
10141 if (wb_unprotected) {
10142 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10145 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((
VALUE)dest), (
VALUE)dest);
10148 if (uncollectible) {
10149 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10152 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((
VALUE)dest), (
VALUE)dest);
10155 RVALUE_AGE_SET((
VALUE)dest, age);
10157 src->as.moved.flags =
T_MOVED;
10158 src->as.moved.dummy =
Qundef;
10159 src->as.moved.destination = (
VALUE)dest;
10165#if GC_CAN_COMPILE_COMPACTION
10167compare_pinned_slots(
const void *left,
const void *right,
void *dummy)
10172 left_page = *(
struct heap_page *
const *)left;
10173 right_page = *(
struct heap_page *
const *)right;
10175 return left_page->pinned_slots - right_page->pinned_slots;
10179compare_free_slots(
const void *left,
const void *right,
void *dummy)
10184 left_page = *(
struct heap_page *
const *)left;
10185 right_page = *(
struct heap_page *
const *)right;
10187 return left_page->free_slots - right_page->free_slots;
10191gc_sort_heap_by_compare_func(
rb_objspace_t *objspace, gc_compact_compare_func compare_func)
10193 for (
int j = 0; j < SIZE_POOL_COUNT; j++) {
10196 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10198 struct heap_page *page = 0, **page_list = malloc(size);
10201 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10202 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10203 page_list[i++] = page;
10207 GC_ASSERT((
size_t)i == total_pages);
10214 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10216 for (i = 0; i < total_pages; i++) {
10217 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10218 if (page_list[i]->free_slots != 0) {
10219 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10231 if (ARY_SHARED_P(v)) {
10232 VALUE old_root =
RARRAY(v)->as.heap.aux.shared_root;
10234 UPDATE_IF_MOVED(objspace,
RARRAY(v)->as.heap.aux.shared_root);
10236 VALUE new_root =
RARRAY(v)->as.heap.aux.shared_root;
10238 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10239 size_t offset = (size_t)(
RARRAY(v)->as.heap.ptr -
RARRAY(old_root)->as.ary);
10240 GC_ASSERT(
RARRAY(v)->as.heap.ptr >=
RARRAY(old_root)->as.ary);
10241 RARRAY(v)->as.heap.ptr =
RARRAY(new_root)->as.ary + offset;
10249 for (
long i = 0; i <
len; i++) {
10250 UPDATE_IF_MOVED(objspace, ptr[i]);
10254 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10255 if (rb_ary_embeddable_p(v)) {
10256 rb_ary_make_embedded(v);
10269 if (rb_shape_obj_too_complex(v)) {
10270 gc_ref_update_table_values_only(objspace, ROBJECT_IV_HASH(v));
10274 size_t slot_size = rb_gc_obj_slot_size(v);
10276 if (slot_size >= embed_size && !
RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10278 memcpy(
ROBJECT(v)->as.ary, ptr,
sizeof(
VALUE) * ROBJECT_IV_COUNT(v));
10284 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10285 UPDATE_IF_MOVED(objspace, ptr[i]);
10290hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10294 if (gc_object_moved_p(objspace, (
VALUE)*key)) {
10295 *key = rb_gc_location((
VALUE)*key);
10298 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10299 *value = rb_gc_location((
VALUE)*value);
10302 return ST_CONTINUE;
10306hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp,
int error)
10312 if (gc_object_moved_p(objspace, (
VALUE)key)) {
10316 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10319 return ST_CONTINUE;
10323hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
10327 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10328 *value = rb_gc_location((
VALUE)*value);
10331 return ST_CONTINUE;
10335hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp,
int error)
10341 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10344 return ST_CONTINUE;
10350 if (!tbl || tbl->num_entries == 0)
return;
10352 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10358rb_gc_ref_update_table_values_only(
st_table *tbl)
10360 gc_ref_update_table_values_only(&
rb_objspace, tbl);
10366 if (!tbl || tbl->num_entries == 0)
return;
10368 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10378 gc_update_table_refs(objspace, ptr);
10384 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10392 UPDATE_IF_MOVED(objspace, me->owner);
10393 UPDATE_IF_MOVED(objspace, me->defined_class);
10396 switch (def->type) {
10397 case VM_METHOD_TYPE_ISEQ:
10398 if (def->body.iseq.
iseqptr) {
10401 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, def->body.iseq.
cref);
10403 case VM_METHOD_TYPE_ATTRSET:
10404 case VM_METHOD_TYPE_IVAR:
10405 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10407 case VM_METHOD_TYPE_BMETHOD:
10408 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10410 case VM_METHOD_TYPE_ALIAS:
10413 case VM_METHOD_TYPE_REFINED:
10416 case VM_METHOD_TYPE_CFUNC:
10417 case VM_METHOD_TYPE_ZSUPER:
10418 case VM_METHOD_TYPE_MISSING:
10419 case VM_METHOD_TYPE_OPTIMIZED:
10420 case VM_METHOD_TYPE_UNDEF:
10421 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10432 for (i=0; i<n; i++) {
10433 UPDATE_IF_MOVED(objspace, values[i]);
10438rb_gc_update_values(
long n,
VALUE *values)
10447 is_pointer_to_heap(objspace, (
void *)obj) &&
10454 switch (imemo_type(obj)) {
10458 if (LIKELY(env->ep)) {
10460 TYPED_UPDATE_IF_MOVED(objspace,
rb_iseq_t *, env->iseq);
10461 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10462 gc_update_values(objspace, (
long)env->env_size, (
VALUE *)env->env);
10467 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10468 TYPED_UPDATE_IF_MOVED(objspace,
struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10469 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10472 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10473 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10474 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10475 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10477 case imemo_throw_data:
10478 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10483 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10484 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10487 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10490 rb_iseq_mark_and_move((
rb_iseq_t *)obj,
true);
10493 rb_ast_update_references((
rb_ast_t *)obj);
10495 case imemo_callcache:
10503 if (moved_or_living_object_strictly_p(objspace, cc->klass) &&
10504 moved_or_living_object_strictly_p(objspace, (
VALUE)cc->cme_)) {
10505 UPDATE_IF_MOVED(objspace, cc->klass);
10509 vm_cc_invalidate(cc);
10514 case imemo_constcache:
10517 UPDATE_IF_MOVED(objspace, ice->value);
10520 case imemo_parser_strterm:
10522 case imemo_callinfo:
10525 rb_bug(
"not reachable %d", imemo_type(obj));
10530static enum rb_id_table_iterator_result
10531check_id_table_move(
VALUE value,
void *data)
10535 if (gc_object_moved_p(objspace, (
VALUE)value)) {
10536 return ID_TABLE_REPLACE;
10539 return ID_TABLE_CONTINUE;
10551 void *poisoned = asan_unpoison_object_temporary(value);
10554 destination = (
VALUE)RMOVED(value)->destination;
10558 destination = value;
10564 asan_poison_object(value);
10568 destination = value;
10571 return destination;
10574static enum rb_id_table_iterator_result
10575update_id_table(
VALUE *value,
void *data,
int existing)
10579 if (gc_object_moved_p(objspace, (
VALUE)*value)) {
10580 *value = rb_gc_location((
VALUE)*value);
10583 return ID_TABLE_CONTINUE;
10590 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10594static enum rb_id_table_iterator_result
10595update_cc_tbl_i(
VALUE ccs_ptr,
void *data)
10599 VM_ASSERT(vm_ccs_p(ccs));
10601 if (gc_object_moved_p(objspace, (
VALUE)ccs->cme)) {
10605 for (
int i=0; i<ccs->len; i++) {
10606 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].ci)) {
10607 ccs->entries[i].ci = (
struct rb_callinfo *)rb_gc_location((
VALUE)ccs->entries[i].ci);
10609 if (gc_object_moved_p(objspace, (
VALUE)ccs->entries[i].cc)) {
10610 ccs->entries[i].cc = (
struct rb_callcache *)rb_gc_location((
VALUE)ccs->entries[i].cc);
10615 return ID_TABLE_CONTINUE;
10623 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10627static enum rb_id_table_iterator_result
10628update_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10636 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, entry->cref);
10639 entry->class_value = rb_gc_location(entry->class_value);
10641 return ID_TABLE_CONTINUE;
10649 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10653static enum rb_id_table_iterator_result
10654mark_cvc_tbl_i(
VALUE cvc_entry,
void *data)
10662 gc_mark(objspace, (
VALUE) entry->cref);
10664 return ID_TABLE_CONTINUE;
10672 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
10676static enum rb_id_table_iterator_result
10677update_const_table(
VALUE value,
void *data)
10682 if (gc_object_moved_p(objspace, ce->value)) {
10683 ce->value = rb_gc_location(ce->value);
10686 if (gc_object_moved_p(objspace, ce->file)) {
10687 ce->file = rb_gc_location(ce->file);
10690 return ID_TABLE_CONTINUE;
10697 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10704 UPDATE_IF_MOVED(objspace, entry->klass);
10705 entry = entry->next;
10712 UPDATE_IF_MOVED(objspace, ext->origin_);
10713 UPDATE_IF_MOVED(objspace, ext->includer);
10714 UPDATE_IF_MOVED(objspace, ext->refined_class);
10715 update_subclass_entries(objspace, ext->subclasses);
10721 if (
FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10722 for (
size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10723 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10731 RVALUE *any = RANY(obj);
10733 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
10736 rb_ref_update_generic_ivar(obj);
10742 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
10747 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10749 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10750 update_cc_tbl(objspace, obj);
10751 update_cvc_tbl(objspace, obj);
10752 update_superclasses(objspace, obj);
10754 if (rb_shape_obj_too_complex(obj)) {
10755 gc_ref_update_table_values_only(objspace, RCLASS_IV_HASH(obj));
10758 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10759 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10763 update_class_ext(objspace, RCLASS_EXT(obj));
10764 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10766 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
10770 if (RICLASS_OWNS_M_TBL_P(obj)) {
10771 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10774 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
10776 update_class_ext(objspace, RCLASS_EXT(obj));
10777 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10778 update_cc_tbl(objspace, obj);
10782 gc_ref_update_imemo(objspace, obj);
10794 gc_ref_update_array(objspace, obj);
10798 gc_ref_update_hash(objspace, obj);
10799 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10804 if (STR_SHARED_P(obj)) {
10810 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10811 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10812 rb_str_make_embedded(obj);
10823 if (
RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.
type)) {
10824 size_t *offset_list = (
size_t *)RANY(obj)->as.typeddata.type->function.dmark;
10826 for (
size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
10827 VALUE *ref = (
VALUE *)((
char *)ptr + offset);
10829 *ref = rb_gc_location(*ref);
10834 if (compact_func) (*compact_func)(ptr);
10841 gc_ref_update_object(objspace, obj);
10845 if (any->as.file.
fptr) {
10846 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
self);
10847 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
pathv);
10856 UPDATE_IF_MOVED(objspace, any->as.regexp.
src);
10861 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10870 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10872 if (any->as.match.str) {
10873 UPDATE_IF_MOVED(objspace, any->as.match.str);
10878 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10879 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10883 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10884 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10890 long i,
len = RSTRUCT_LEN(obj);
10891 VALUE *ptr = (
VALUE *)RSTRUCT_CONST_PTR(obj);
10893 for (i = 0; i <
len; i++) {
10894 UPDATE_IF_MOVED(objspace, ptr[i]);
10900 rb_gcdebug_print_obj_condition((
VALUE)obj);
10901 rb_obj_info_dump(obj);
10902 rb_bug(
"unreachable");
10908 UPDATE_IF_MOVED(objspace,
RBASIC(obj)->klass);
10910 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
10917 asan_unlock_freelist(page);
10918 asan_lock_freelist(page);
10919 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
10920 page->flags.has_remembered_objects = FALSE;
10923 for (; v != (
VALUE)vend; v += stride) {
10924 void *poisoned = asan_unpoison_object_temporary(v);
10932 if (RVALUE_WB_UNPROTECTED(v)) {
10933 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
10935 if (RVALUE_REMEMBERED(v)) {
10936 page->flags.has_remembered_objects = TRUE;
10938 if (page->flags.before_sweep) {
10939 if (RVALUE_MARKED(v)) {
10940 gc_update_object_references(objspace, v);
10944 gc_update_object_references(objspace, v);
10949 asan_poison_object(v);
10957#define global_symbols ruby_global_symbols
10962 objspace->flags.during_reference_updating =
true;
10965 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10969 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10970 bool should_set_mark_bits = TRUE;
10972 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10974 ccan_list_for_each(&heap->pages, page, page_node) {
10975 uintptr_t start = (uintptr_t)page->start;
10976 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10978 gc_ref_update((
void *)start, (
void *)end, size_pool->slot_size, objspace, page);
10979 if (page == heap->sweeping_page) {
10980 should_set_mark_bits = FALSE;
10982 if (should_set_mark_bits) {
10983 gc_setup_mark_bits(page);
10987 rb_vm_update_references(vm);
10988 rb_gc_update_global_tbl();
10989 global_symbols.ids = rb_gc_location(global_symbols.ids);
10990 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10991 gc_ref_update_table_values_only(objspace, objspace->obj_to_id_tbl);
10992 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10993 gc_update_table_refs(objspace, global_symbols.str_sym);
10994 gc_update_table_refs(objspace, finalizer_table);
10996 objspace->flags.during_reference_updating =
false;
10999#if GC_CAN_COMPILE_COMPACTION
11013gc_compact_stats(
VALUE self)
11017 VALUE h = rb_hash_new();
11018 VALUE considered = rb_hash_new();
11019 VALUE moved = rb_hash_new();
11020 VALUE moved_up = rb_hash_new();
11021 VALUE moved_down = rb_hash_new();
11023 for (i=0; i<
T_MASK; i++) {
11024 if (objspace->rcompactor.considered_count_table[i]) {
11025 rb_hash_aset(considered, type_sym(i),
SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
11028 if (objspace->rcompactor.moved_count_table[i]) {
11029 rb_hash_aset(moved, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
11032 if (objspace->rcompactor.moved_up_count_table[i]) {
11033 rb_hash_aset(moved_up, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
11036 if (objspace->rcompactor.moved_down_count_table[i]) {
11037 rb_hash_aset(moved_down, type_sym(i),
SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
11041 rb_hash_aset(h,
ID2SYM(rb_intern(
"considered")), considered);
11042 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved")), moved);
11043 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_up")), moved_up);
11044 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_down")), moved_down);
11049# define gc_compact_stats rb_f_notimplement
11052#if GC_CAN_COMPILE_COMPACTION
11054root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
11057 rb_bug(
"ROOT %s points to MOVED: %p -> %s", category, (
void *)obj, obj_info(rb_gc_location(obj)));
11062reachable_object_check_moved_i(
VALUE ref,
void *data)
11066 rb_bug(
"Object %s points to MOVED: %p -> %s", obj_info(parent), (
void *)ref, obj_info(rb_gc_location(ref)));
11071heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
11074 for (; v != (
VALUE)vend; v += stride) {
11079 void *poisoned = asan_unpoison_object_temporary(v);
11086 if (!rb_objspace_garbage_object_p(v)) {
11087 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
11093 asan_poison_object(v);
11119gc_compact(
VALUE self)
11124 return gc_compact_stats(self);
11127# define gc_compact rb_f_notimplement
11130#if GC_CAN_COMPILE_COMPACTION
11132struct desired_compaction_pages_i_data {
11134 size_t required_slots[SIZE_POOL_COUNT];
11138desired_compaction_pages_i(
struct heap_page *page,
void *data)
11140 struct desired_compaction_pages_i_data *tdata = data;
11143 VALUE vend = vstart + (
VALUE)(page->total_slots * page->size_pool->slot_size);
11146 for (
VALUE v = vstart; v != vend; v += page->size_pool->slot_size) {
11148 void *poisoned = asan_unpoison_object_temporary(v);
11151 asan_poison_object(v);
11156 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, page->size_pool, v);
11157 size_t dest_pool_idx = dest_pool - size_pools;
11158 tdata->required_slots[dest_pool_idx]++;
11172 if (
RTEST(double_heap)) {
11173 rb_warn(
"double_heap is deprecated, please use expand_heap instead");
11176 RB_VM_LOCK_ENTER();
11181 if (
RTEST(expand_heap)) {
11182 struct desired_compaction_pages_i_data desired_compaction = {
11183 .objspace = objspace,
11184 .required_slots = {0},
11187 objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
11190 size_t max_existing_pages = 0;
11191 for(
int i = 0; i < SIZE_POOL_COUNT; i++) {
11193 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11194 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
11197 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11199 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11201 size_t pages_to_add = 0;
11208 pages_to_add += max_existing_pages - heap->total_pages;
11213 pages_to_add += slots_to_pages_for_size_pool(objspace, size_pool, desired_compaction.required_slots[i]);
11220 heap_add_pages(objspace, size_pool, heap, pages_to_add);
11223 else if (
RTEST(double_heap)) {
11224 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11226 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11227 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
11232 if (
RTEST(toward_empty)) {
11233 objspace->rcompactor.compare_func = compare_free_slots;
11236 RB_VM_LOCK_LEAVE();
11240 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
11241 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
11243 objspace->rcompactor.compare_func = NULL;
11244 return gc_compact_stats(self);
11247# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
11260 unless_objspace(objspace) {
return; }
11261 unsigned int reason = GPR_DEFAULT_REASON;
11262 garbage_collect(objspace, reason);
11268 unless_objspace(objspace) {
return FALSE; }
11272#if RGENGC_PROFILE >= 2
11274static const char *type_name(
int type,
VALUE obj);
11277gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
11281 for (i=0; i<
T_MASK; i++) {
11282 const char *
type = type_name(i, 0);
11285 rb_hash_aset(hash,
ID2SYM(rb_intern(name)), result);
11302gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
11304 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11305 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11306#if RGENGC_ESTIMATE_OLDMALLOC
11307 static VALUE sym_oldmalloc;
11309 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11310 static VALUE sym_none, sym_marking, sym_sweeping;
11311 static VALUE sym_weak_references_count, sym_retained_weak_references_count;
11313 VALUE major_by, need_major_by;
11314 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11319 else if (RB_TYPE_P(hash_or_key,
T_HASH)) {
11320 hash = hash_or_key;
11326 if (
NIL_P(sym_major_by)) {
11327#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11330 S(immediate_sweep);
11340#if RGENGC_ESTIMATE_OLDMALLOC
11352 S(weak_references_count);
11353 S(retained_weak_references_count);
11357#define SET(name, attr) \
11358 if (key == sym_##name) \
11360 else if (hash != Qnil) \
11361 rb_hash_aset(hash, sym_##name, (attr));
11364 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11365 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11366 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11367 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11368#if RGENGC_ESTIMATE_OLDMALLOC
11369 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11372 SET(major_by, major_by);
11374 if (orig_flags == 0) {
11375 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11377 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11378 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11379 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11380 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11381#if RGENGC_ESTIMATE_OLDMALLOC
11382 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11385 SET(need_major_by, need_major_by);
11389 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11390 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11391 (flags & GPR_FLAG_METHOD) ? sym_method :
11392 (flags & GPR_FLAG_CAPI) ? sym_capi :
11393 (flags & GPR_FLAG_STRESS) ? sym_stress :
11397 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11398 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11400 if (orig_flags == 0) {
11401 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11402 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11405 SET(weak_references_count,
LONG2FIX(objspace->profile.weak_references_count));
11406 SET(retained_weak_references_count,
LONG2FIX(objspace->profile.retained_weak_references_count));
11410 rb_raise(rb_eArgError,
"unknown key: %"PRIsVALUE,
rb_sym2str(key));
11420 return gc_info_decode(objspace, key, 0);
11429 arg = rb_hash_new();
11435 return gc_info_decode(objspace, arg, 0);
11441 gc_stat_sym_marking_time,
11442 gc_stat_sym_sweeping_time,
11443 gc_stat_sym_heap_allocated_pages,
11444 gc_stat_sym_heap_sorted_length,
11445 gc_stat_sym_heap_allocatable_pages,
11446 gc_stat_sym_heap_available_slots,
11447 gc_stat_sym_heap_live_slots,
11448 gc_stat_sym_heap_free_slots,
11449 gc_stat_sym_heap_final_slots,
11450 gc_stat_sym_heap_marked_slots,
11451 gc_stat_sym_heap_eden_pages,
11452 gc_stat_sym_heap_tomb_pages,
11453 gc_stat_sym_total_allocated_pages,
11454 gc_stat_sym_total_freed_pages,
11455 gc_stat_sym_total_allocated_objects,
11456 gc_stat_sym_total_freed_objects,
11457 gc_stat_sym_malloc_increase_bytes,
11458 gc_stat_sym_malloc_increase_bytes_limit,
11459 gc_stat_sym_minor_gc_count,
11460 gc_stat_sym_major_gc_count,
11461 gc_stat_sym_compact_count,
11462 gc_stat_sym_read_barrier_faults,
11463 gc_stat_sym_total_moved_objects,
11464 gc_stat_sym_remembered_wb_unprotected_objects,
11465 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11466 gc_stat_sym_old_objects,
11467 gc_stat_sym_old_objects_limit,
11468#if RGENGC_ESTIMATE_OLDMALLOC
11469 gc_stat_sym_oldmalloc_increase_bytes,
11470 gc_stat_sym_oldmalloc_increase_bytes_limit,
11472 gc_stat_sym_weak_references_count,
11474 gc_stat_sym_total_generated_normal_object_count,
11475 gc_stat_sym_total_generated_shady_object_count,
11476 gc_stat_sym_total_shade_operation_count,
11477 gc_stat_sym_total_promoted_count,
11478 gc_stat_sym_total_remembered_normal_object_count,
11479 gc_stat_sym_total_remembered_shady_object_count,
11484static VALUE gc_stat_symbols[gc_stat_sym_last];
11487setup_gc_stat_symbols(
void)
11489 if (gc_stat_symbols[0] == 0) {
11490#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11495 S(heap_allocated_pages);
11496 S(heap_sorted_length);
11497 S(heap_allocatable_pages);
11498 S(heap_available_slots);
11499 S(heap_live_slots);
11500 S(heap_free_slots);
11501 S(heap_final_slots);
11502 S(heap_marked_slots);
11503 S(heap_eden_pages);
11504 S(heap_tomb_pages);
11505 S(total_allocated_pages);
11506 S(total_freed_pages);
11507 S(total_allocated_objects);
11508 S(total_freed_objects);
11509 S(malloc_increase_bytes);
11510 S(malloc_increase_bytes_limit);
11514 S(read_barrier_faults);
11515 S(total_moved_objects);
11516 S(remembered_wb_unprotected_objects);
11517 S(remembered_wb_unprotected_objects_limit);
11519 S(old_objects_limit);
11520#if RGENGC_ESTIMATE_OLDMALLOC
11521 S(oldmalloc_increase_bytes);
11522 S(oldmalloc_increase_bytes_limit);
11524 S(weak_references_count);
11526 S(total_generated_normal_object_count);
11527 S(total_generated_shady_object_count);
11528 S(total_shade_operation_count);
11529 S(total_promoted_count);
11530 S(total_remembered_normal_object_count);
11531 S(total_remembered_shady_object_count);
11538ns_to_ms(uint64_t ns)
11540 return ns / (1000 * 1000);
11544gc_stat_internal(
VALUE hash_or_sym)
11549 setup_gc_stat_symbols();
11551 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
11552 hash = hash_or_sym;
11561#define SET(name, attr) \
11562 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11564 else if (hash != Qnil) \
11565 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11567 SET(count, objspace->profile.count);
11568 SET(time, (
size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns));
11569 SET(marking_time, (
size_t)ns_to_ms(objspace->profile.marking_time_ns));
11570 SET(sweeping_time, (
size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
11573 SET(heap_allocated_pages, heap_allocated_pages);
11574 SET(heap_sorted_length, heap_pages_sorted_length);
11575 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11576 SET(heap_available_slots, objspace_available_slots(objspace));
11577 SET(heap_live_slots, objspace_live_slots(objspace));
11578 SET(heap_free_slots, objspace_free_slots(objspace));
11579 SET(heap_final_slots, heap_pages_final_slots);
11580 SET(heap_marked_slots, objspace->marked_slots);
11581 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11582 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11583 SET(total_allocated_pages, total_allocated_pages(objspace));
11584 SET(total_freed_pages, total_freed_pages(objspace));
11585 SET(total_allocated_objects, total_allocated_objects(objspace));
11586 SET(total_freed_objects, total_freed_objects(objspace));
11587 SET(malloc_increase_bytes, malloc_increase);
11588 SET(malloc_increase_bytes_limit, malloc_limit);
11589 SET(minor_gc_count, objspace->profile.minor_gc_count);
11590 SET(major_gc_count, objspace->profile.major_gc_count);
11591 SET(compact_count, objspace->profile.compact_count);
11592 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11593 SET(total_moved_objects, objspace->rcompactor.total_moved);
11594 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11595 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11596 SET(old_objects, objspace->rgengc.old_objects);
11597 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11598#if RGENGC_ESTIMATE_OLDMALLOC
11599 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11600 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11604 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11605 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11606 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11607 SET(total_promoted_count, objspace->profile.total_promoted_count);
11608 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11609 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11614 rb_raise(rb_eArgError,
"unknown key: %"PRIsVALUE,
rb_sym2str(key));
11617#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11618 if (hash !=
Qnil) {
11619 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11620 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11621 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
11622 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
11623 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11624 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11635 arg = rb_hash_new();
11638 size_t value = gc_stat_internal(arg);
11641 else if (RB_TYPE_P(arg,
T_HASH)) {
11648 gc_stat_internal(arg);
11656 size_t value = gc_stat_internal(key);
11660 gc_stat_internal(key);
11666enum gc_stat_heap_sym {
11667 gc_stat_heap_sym_slot_size,
11668 gc_stat_heap_sym_heap_allocatable_pages,
11669 gc_stat_heap_sym_heap_eden_pages,
11670 gc_stat_heap_sym_heap_eden_slots,
11671 gc_stat_heap_sym_heap_tomb_pages,
11672 gc_stat_heap_sym_heap_tomb_slots,
11673 gc_stat_heap_sym_total_allocated_pages,
11674 gc_stat_heap_sym_total_freed_pages,
11675 gc_stat_heap_sym_force_major_gc_count,
11676 gc_stat_heap_sym_force_incremental_marking_finish_count,
11677 gc_stat_heap_sym_total_allocated_objects,
11678 gc_stat_heap_sym_total_freed_objects,
11679 gc_stat_heap_sym_last
11682static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11685setup_gc_stat_heap_symbols(
void)
11687 if (gc_stat_heap_symbols[0] == 0) {
11688#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11690 S(heap_allocatable_pages);
11691 S(heap_eden_pages);
11692 S(heap_eden_slots);
11693 S(heap_tomb_pages);
11694 S(heap_tomb_slots);
11695 S(total_allocated_pages);
11696 S(total_freed_pages);
11697 S(force_major_gc_count);
11698 S(force_incremental_marking_finish_count);
11699 S(total_allocated_objects);
11700 S(total_freed_objects);
11706gc_stat_heap_internal(
int size_pool_idx,
VALUE hash_or_sym)
11711 setup_gc_stat_heap_symbols();
11713 if (RB_TYPE_P(hash_or_sym,
T_HASH)) {
11714 hash = hash_or_sym;
11723 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11724 rb_raise(rb_eArgError,
"size pool index out of range");
11729#define SET(name, attr) \
11730 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11732 else if (hash != Qnil) \
11733 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11735 SET(slot_size, size_pool->slot_size);
11736 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11737 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11738 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11739 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11740 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11741 SET(total_allocated_pages, size_pool->total_allocated_pages);
11742 SET(total_freed_pages, size_pool->total_freed_pages);
11743 SET(force_major_gc_count, size_pool->force_major_gc_count);
11744 SET(force_incremental_marking_finish_count, size_pool->force_incremental_marking_finish_count);
11745 SET(total_allocated_objects, size_pool->total_allocated_objects);
11746 SET(total_freed_objects, size_pool->total_freed_objects);
11750 rb_raise(rb_eArgError,
"unknown key: %"PRIsVALUE,
rb_sym2str(key));
11759 if (
NIL_P(heap_name)) {
11761 arg = rb_hash_new();
11763 else if (RB_TYPE_P(arg,
T_HASH)) {
11770 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
11773 hash = rb_hash_new();
11774 rb_hash_aset(arg,
INT2FIX(i), hash);
11776 gc_stat_heap_internal(i, hash);
11780 int size_pool_idx =
FIX2INT(heap_name);
11783 arg = rb_hash_new();
11786 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11789 else if (RB_TYPE_P(arg,
T_HASH)) {
11796 gc_stat_heap_internal(size_pool_idx, arg);
11799 rb_raise(
rb_eTypeError,
"heap_name must be nil or an Integer");
11809 return ruby_gc_stress_mode;
11815 objspace->flags.gc_stressful =
RTEST(flag);
11816 objspace->gc_stress_mode = flag;
11823 gc_stress_set(objspace, flag);
11831 return rb_objspace_gc_enable(objspace);
11837 int old = dont_gc_val();
11846 return rb_gc_enable();
11850rb_gc_disable_no_rest(
void)
11853 return gc_disable_no_rest(objspace);
11859 int old = dont_gc_val();
11868 return rb_objspace_gc_disable(objspace);
11875 return gc_disable_no_rest(objspace);
11881 return rb_gc_disable();
11884#if GC_CAN_COMPILE_COMPACTION
11898 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11900 ruby_enable_autocompact =
RTEST(v);
11902#if RGENGC_CHECK_MODE
11903 ruby_autocompact_compare_func = NULL;
11907 if (
id == rb_intern(
"empty")) {
11908 ruby_autocompact_compare_func = compare_free_slots;
11916# define gc_set_auto_compact rb_f_notimplement
11919#if GC_CAN_COMPILE_COMPACTION
11927gc_get_auto_compact(
VALUE _)
11929 return RBOOL(ruby_enable_autocompact);
11932# define gc_get_auto_compact rb_f_notimplement
11936get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
11938 const char *ptr = getenv(name);
11941 if (ptr != NULL && *ptr) {
11944#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11945 val = strtoll(ptr, &end, 0);
11947 val = strtol(ptr, &end, 0);
11950 case 'k':
case 'K':
11954 case 'm':
case 'M':
11958 case 'g':
case 'G':
11959 unit = 1024*1024*1024;
11963 while (*end && isspace((
unsigned char)*end)) end++;
11965 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
11969 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11970 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
11975 if (val > 0 && (
size_t)val > lower_bound) {
11977 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
11979 *default_value = (size_t)val;
11984 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
11985 name, val, *default_value, lower_bound);
11994get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
11996 const char *ptr = getenv(name);
11999 if (ptr != NULL && *ptr) {
12001 val =
strtod(ptr, &end);
12002 if (!*ptr || *end) {
12003 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
12007 if (accept_zero && val == 0.0) {
12010 else if (val <= lower_bound) {
12012 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
12013 name, val, *default_value, lower_bound);
12016 else if (upper_bound != 0.0 &&
12017 val > upper_bound) {
12019 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
12020 name, val, *default_value, upper_bound);
12030 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
12031 *default_value = val;
12040 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
12042 char env_key[
sizeof(
"RUBY_GC_HEAP_" "_INIT_SLOTS") +
DECIMAL_SIZE_OF_BITS(
sizeof(
int) * CHAR_BIT)];
12043 snprintf(env_key,
sizeof(env_key),
"RUBY_GC_HEAP_%d_INIT_SLOTS", i);
12045 size_t size_pool_init_slots = gc_params.size_pool_init_slots[i];
12046 if (get_envparam_size(env_key, &size_pool_init_slots, 0)) {
12047 gc_params.size_pool_init_slots[i] = size_pool_init_slots;
12050 if (size_pool_init_slots > size_pool->eden_heap.total_slots) {
12051 size_t slots = size_pool_init_slots - size_pool->eden_heap.total_slots;
12052 size_pool->allocatable_pages = slots_to_pages_for_size_pool(objspace, size_pool, slots);
12057 size_pool->allocatable_pages = 0;
12060 heap_pages_expand_sorted(objspace);
12104ruby_gc_set_params(
void)
12108 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
12112 gc_set_initial_pages(objspace);
12114 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
12115 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
12116 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
12118 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
12119 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
12120 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
12121 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
12122 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
12123 get_envparam_double(
"RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
12125 if (get_envparam_size(
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
12126 malloc_limit = gc_params.malloc_limit_min;
12128 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
12129 if (!gc_params.malloc_limit_max) {
12130 gc_params.malloc_limit_max = SIZE_MAX;
12132 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
12134#if RGENGC_ESTIMATE_OLDMALLOC
12135 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
12136 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
12138 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
12139 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
12144reachable_objects_from_callback(
VALUE obj)
12147 cr->mfd->mark_func(obj, cr->mfd->data);
12151rb_objspace_reachable_objects_from(
VALUE obj,
void (func)(
VALUE,
void *),
void *data)
12155 RB_VM_LOCK_ENTER();
12157 if (during_gc) rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
12159 if (is_markable_object(obj)) {
12161 struct gc_mark_func_data_struct mfd = {
12164 }, *prev_mfd = cr->mfd;
12167 gc_mark_children(objspace, obj);
12168 cr->mfd = prev_mfd;
12171 RB_VM_LOCK_LEAVE();
12175 const char *category;
12176 void (*func)(
const char *category,
VALUE,
void *);
12181root_objects_from(
VALUE obj,
void *ptr)
12184 (*data->func)(data->category, obj, data->data);
12188rb_objspace_reachable_objects_from_root(
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
12191 objspace_reachable_objects_from_root(objspace, func, passing_data);
12195objspace_reachable_objects_from_root(
rb_objspace_t *objspace,
void (func)(
const char *category,
VALUE,
void *),
void *passing_data)
12197 if (during_gc) rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
12202 .data = passing_data,
12204 struct gc_mark_func_data_struct mfd = {
12205 .mark_func = root_objects_from,
12207 }, *prev_mfd = cr->mfd;
12210 gc_mark_roots(objspace, &data.category);
12211 cr->mfd = prev_mfd;
12225gc_vraise(
void *ptr)
12228 rb_vraise(argv->exc, argv->fmt, *argv->ap);
12233gc_raise(
VALUE exc,
const char *fmt, ...)
12241 if (ruby_thread_has_gvl_p()) {
12251 fprintf(stderr,
"%s",
"[FATAL] ");
12252 vfprintf(stderr, fmt, ap);
12259static void objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t size);
12262negative_size_allocation_error(
const char *msg)
12268ruby_memerror_body(
void *dummy)
12274NORETURN(
static void ruby_memerror(
void));
12279 if (ruby_thread_has_gvl_p()) {
12288 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12291 exit(EXIT_FAILURE);
12298 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
12309 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12314 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12315 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
12316 exit(EXIT_FAILURE);
12318 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12319 rb_ec_raised_clear(ec);
12322 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12323 exc = ruby_vm_special_exception_copy(exc);
12326 EC_JUMP_TAG(ec, TAG_RAISE);
12330rb_aligned_malloc(
size_t alignment,
size_t size)
12333 GC_ASSERT(((alignment - 1) & alignment) == 0);
12334 GC_ASSERT(alignment %
sizeof(
void*) == 0);
12338#if defined __MINGW32__
12339 res = __mingw_aligned_malloc(size, alignment);
12340#elif defined _WIN32
12341 void *_aligned_malloc(
size_t,
size_t);
12342 res = _aligned_malloc(size, alignment);
12343#elif defined(HAVE_POSIX_MEMALIGN)
12344 if (posix_memalign(&res, alignment, size) != 0) {
12347#elif defined(HAVE_MEMALIGN)
12348 res = memalign(alignment, size);
12351 res = malloc(alignment + size +
sizeof(
void*));
12352 aligned = (
char*)res + alignment +
sizeof(
void*);
12353 aligned -= ((
VALUE)aligned & (alignment - 1));
12354 ((
void**)aligned)[-1] = res;
12355 res = (
void*)aligned;
12358 GC_ASSERT((uintptr_t)res % alignment == 0);
12364rb_aligned_free(
void *ptr,
size_t size)
12366#if defined __MINGW32__
12367 __mingw_aligned_free(ptr);
12368#elif defined _WIN32
12369 _aligned_free(ptr);
12370#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12373 free(((
void**)ptr)[-1]);
12377static inline size_t
12378objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
12380#ifdef HAVE_MALLOC_USABLE_SIZE
12381 return malloc_usable_size(ptr);
12388 MEMOP_TYPE_MALLOC = 0,
12394atomic_sub_nounderflow(
size_t *var,
size_t sub)
12396 if (sub == 0)
return;
12400 if (val < sub) sub = val;
12401 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val)
break;
12409 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12410 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12412 if (gc_stress_full_mark_after_malloc_p()) {
12413 reason |= GPR_FLAG_FULL_MARK;
12415 garbage_collect_with_gvl(objspace, reason);
12420objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12422 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
12424 type == MEMOP_TYPE_MALLOC ?
"malloc" :
12425 type == MEMOP_TYPE_FREE ?
"free " :
12426 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
12427 new_size, old_size);
12432objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
12434 if (new_size > old_size) {
12435 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12436#if RGENGC_ESTIMATE_OLDMALLOC
12437 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12441 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12442#if RGENGC_ESTIMATE_OLDMALLOC
12443 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12447 if (
type == MEMOP_TYPE_MALLOC) {
12450 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12454 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12458#if MALLOC_ALLOCATED_SIZE
12459 if (new_size >= old_size) {
12460 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12463 size_t dec_size = old_size - new_size;
12464 size_t allocated_size = objspace->malloc_params.allocated_size;
12466#if MALLOC_ALLOCATED_SIZE_CHECK
12467 if (allocated_size < dec_size) {
12468 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
12471 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12475 case MEMOP_TYPE_MALLOC:
12476 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12478 case MEMOP_TYPE_FREE:
12480 size_t allocations = objspace->malloc_params.allocations;
12481 if (allocations > 0) {
12482 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12484#if MALLOC_ALLOCATED_SIZE_CHECK
12486 GC_ASSERT(objspace->malloc_params.allocations > 0);
12491 case MEMOP_TYPE_REALLOC:
break;
12497#define objspace_malloc_increase(...) \
12498 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12499 !malloc_increase_done; \
12500 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12504#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12511#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12512const char *ruby_malloc_info_file;
12513int ruby_malloc_info_line;
12516static inline size_t
12517objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
12519 if (size == 0) size = 1;
12521#if CALC_EXACT_MALLOC_SIZE
12535 return during_gc && !dont_gc_val() && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12538static inline void *
12539objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
12541 size = objspace_malloc_size(objspace, mem, size);
12542 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12544#if CALC_EXACT_MALLOC_SIZE
12548#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12549 info->gen = objspace->profile.count;
12550 info->file = ruby_malloc_info_file;
12551 info->line = info->file ? ruby_malloc_info_line : 0;
12560#if defined(__GNUC__) && RUBY_DEBUG
12561#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12564#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12565# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12568#define GC_MEMERROR(...) \
12569 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12571#define TRY_WITH_GC(siz, expr) do { \
12572 const gc_profile_record_flag gpr = \
12573 GPR_FLAG_FULL_MARK | \
12574 GPR_FLAG_IMMEDIATE_MARK | \
12575 GPR_FLAG_IMMEDIATE_SWEEP | \
12577 objspace_malloc_gc_stress(objspace); \
12579 if (LIKELY((expr))) { \
12582 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12584 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12586 else if ((expr)) { \
12590 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12591 "%"PRIdSIZE" bytes for %s", \
12597check_malloc_not_in_gc(
rb_objspace_t *objspace,
const char *msg)
12599 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12602 rb_bug(
"Cannot %s during GC", msg);
12612 check_malloc_not_in_gc(objspace,
"malloc");
12616 size = objspace_malloc_prepare(objspace, size);
12617 TRY_WITH_GC(size, mem = malloc(size));
12618 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12619 return objspace_malloc_fixup(objspace, mem, size);
12622static inline size_t
12623xmalloc2_size(
const size_t count,
const size_t elsize)
12625 return size_mul_or_raise(count, elsize, rb_eArgError);
12629objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
12631 check_malloc_not_in_gc(objspace,
"realloc");
12635 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
12642 if (new_size == 0) {
12643 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12666 objspace_xfree(objspace, ptr, old_size);
12680#if CALC_EXACT_MALLOC_SIZE
12685 old_size = info->size;
12689 old_size = objspace_malloc_size(objspace, ptr, old_size);
12691 new_size = objspace_malloc_size(objspace, mem, new_size);
12693#if CALC_EXACT_MALLOC_SIZE
12696 info->size = new_size;
12701 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12703 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12707#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12709#define MALLOC_INFO_GEN_SIZE 100
12710#define MALLOC_INFO_SIZE_SIZE 10
12711static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12712static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12713static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12714static st_table *malloc_info_file_table;
12717mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12719 const char *file = (
void *)key;
12720 const size_t *data = (
void *)val;
12722 fprintf(stderr,
"%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file, data[0], data[1]);
12724 return ST_CONTINUE;
12729rb_malloc_info_show_results(
void)
12733 fprintf(stderr,
"* malloc_info gen statistics\n");
12734 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12735 if (i == MALLOC_INFO_GEN_SIZE-1) {
12736 fprintf(stderr,
"more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12739 fprintf(stderr,
"%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12743 fprintf(stderr,
"* malloc_info size statistics\n");
12744 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12746 fprintf(stderr,
"%d\t%"PRIdSIZE
"\n", s, malloc_info_size[i]);
12748 fprintf(stderr,
"more\t%"PRIdSIZE
"\n", malloc_info_size[i]);
12750 if (malloc_info_file_table) {
12751 fprintf(stderr,
"* malloc_info file statistics\n");
12752 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12757rb_malloc_info_show_results(
void)
12763objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
12772#if CALC_EXACT_MALLOC_SIZE
12775 old_size = info->size;
12777#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12779 int gen = (int)(objspace->profile.count - info->gen);
12780 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12783 malloc_info_gen_cnt[gen_index]++;
12784 malloc_info_gen_size[gen_index] += info->size;
12786 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12787 size_t s = 16 << i;
12788 if (info->size <= s) {
12789 malloc_info_size[i]++;
12793 malloc_info_size[i]++;
12797 st_data_t key = (st_data_t)info->file, d;
12800 if (malloc_info_file_table == NULL) {
12801 malloc_info_file_table = st_init_numtable_with_size(1024);
12803 if (st_lookup(malloc_info_file_table, key, &d)) {
12805 data = (
size_t *)d;
12808 data = malloc(xmalloc2_size(2,
sizeof(
size_t)));
12809 if (data == NULL) rb_bug(
"objspace_xfree: can not allocate memory");
12810 data[0] = data[1] = 0;
12811 st_insert(malloc_info_file_table, key, (st_data_t)data);
12814 data[1] += info->size;
12816 if (0 && gen >= 2) {
12818 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
12819 info->size, gen, info->file, info->line);
12822 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d\n",
12829 old_size = objspace_malloc_size(objspace, ptr, old_size);
12831 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12834 RB_DEBUG_COUNTER_INC(heap_xfree);
12839ruby_xmalloc0(
size_t size)
12845ruby_xmalloc_body(
size_t size)
12847 if ((ssize_t)size < 0) {
12848 negative_size_allocation_error(
"too large allocation size");
12850 return ruby_xmalloc0(size);
12854ruby_malloc_size_overflow(
size_t count,
size_t elsize)
12856 rb_raise(rb_eArgError,
12857 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
12862ruby_xmalloc2_body(
size_t n,
size_t size)
12864 return objspace_xmalloc0(&
rb_objspace, xmalloc2_size(n, size));
12870 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12871 rb_warn(
"calloc during GC detected, this could cause crashes if it triggers another GC");
12872#if RGENGC_CHECK_MODE || RUBY_DEBUG
12873 rb_bug(
"Cannot calloc during GC");
12879 size = objspace_malloc_prepare(objspace, size);
12880 TRY_WITH_GC(size, mem = calloc1(size));
12881 return objspace_malloc_fixup(objspace, mem, size);
12885ruby_xcalloc_body(
size_t n,
size_t size)
12887 return objspace_xcalloc(&
rb_objspace, xmalloc2_size(n, size));
12890#ifdef ruby_sized_xrealloc
12891#undef ruby_sized_xrealloc
12894ruby_sized_xrealloc(
void *ptr,
size_t new_size,
size_t old_size)
12896 if ((ssize_t)new_size < 0) {
12897 negative_size_allocation_error(
"too large allocation size");
12900 return objspace_xrealloc(&
rb_objspace, ptr, new_size, old_size);
12904ruby_xrealloc_body(
void *ptr,
size_t new_size)
12906 return ruby_sized_xrealloc(ptr, new_size, 0);
12909#ifdef ruby_sized_xrealloc2
12910#undef ruby_sized_xrealloc2
12913ruby_sized_xrealloc2(
void *ptr,
size_t n,
size_t size,
size_t old_n)
12915 size_t len = xmalloc2_size(n, size);
12920ruby_xrealloc2_body(
void *ptr,
size_t n,
size_t size)
12922 return ruby_sized_xrealloc2(ptr, n, size, 0);
12925#ifdef ruby_sized_xfree
12926#undef ruby_sized_xfree
12929ruby_sized_xfree(
void *x,
size_t size)
12935 if (LIKELY(GET_VM())) {
12947 ruby_sized_xfree(x, 0);
12951rb_xmalloc_mul_add(
size_t x,
size_t y,
size_t z)
12953 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12954 return ruby_xmalloc(w);
12958rb_xcalloc_mul_add(
size_t x,
size_t y,
size_t z)
12960 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12961 return ruby_xcalloc(w, 1);
12965rb_xrealloc_mul_add(
const void *p,
size_t x,
size_t y,
size_t z)
12967 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12968 return ruby_xrealloc((
void *)p, w);
12972rb_xmalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12974 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12975 return ruby_xmalloc(u);
12979rb_xcalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
12981 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12982 return ruby_xcalloc(u, 1);
12989ruby_mimmalloc(
size_t size)
12992#if CALC_EXACT_MALLOC_SIZE
12995 mem = malloc(size);
12996#if CALC_EXACT_MALLOC_SIZE
13005#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13017ruby_mimfree(
void *ptr)
13019#if CALC_EXACT_MALLOC_SIZE
13027rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
13035 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
13037 ptr = ruby_xmalloc0(size);
13045rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
13049 if (
len < 0 || (cnt = (
long)roomof(
len,
sizeof(
VALUE))) < 0) {
13050 rb_raise(rb_eArgError,
"negative buffer size (or size too big)");
13053 return rb_alloc_tmp_buffer_with_count(store,
len, cnt);
13057rb_free_tmp_buffer(
volatile VALUE *store)
13061 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
13067#if MALLOC_ALLOCATED_SIZE
13078gc_malloc_allocated_size(
VALUE self)
13093gc_malloc_allocations(
VALUE self)
13100rb_gc_adjust_memory_usage(ssize_t diff)
13102 unless_objspace(objspace) {
return; }
13105 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
13107 else if (diff < 0) {
13108 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
13116#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13119current_process_time(
struct timespec *ts)
13121#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13123 static int try_clock_gettime = 1;
13124 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13128 try_clock_gettime = 0;
13135 struct rusage usage;
13137 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13138 time = usage.ru_utime;
13139 ts->tv_sec = time.tv_sec;
13140 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13148 FILETIME creation_time, exit_time, kernel_time, user_time;
13151 if (GetProcessTimes(GetCurrentProcess(),
13152 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13153 memcpy(&ui, &user_time,
sizeof(FILETIME));
13154#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13155 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13156 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13166getrusage_time(
void)
13169 if (current_process_time(&ts)) {
13170 return ts.tv_sec + ts.tv_nsec * 1e-9;
13179gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
13181 if (objspace->profile.run) {
13182 size_t index = objspace->profile.next_index;
13186 objspace->profile.next_index++;
13188 if (!objspace->profile.records) {
13189 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13190 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13192 if (index >= objspace->profile.size) {
13194 objspace->profile.size += 1000;
13195 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
13196 if (!ptr) rb_memerror();
13197 objspace->profile.records = ptr;
13199 if (!objspace->profile.records) {
13200 rb_bug(
"gc_profile malloc or realloc miss");
13202 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13206 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13207#if MALLOC_ALLOCATED_SIZE
13208 record->allocated_size = malloc_allocated_size;
13210#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13213 struct rusage usage;
13214 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13215 record->maxrss = usage.ru_maxrss;
13216 record->minflt = usage.ru_minflt;
13217 record->majflt = usage.ru_majflt;
13228 if (gc_prof_enabled(objspace)) {
13230#if GC_PROFILE_MORE_DETAIL
13231 record->prepare_time = objspace->profile.prepare_time;
13233 record->gc_time = 0;
13234 record->gc_invoke_time = getrusage_time();
13239elapsed_time_from(
double time)
13241 double now = getrusage_time();
13253 if (gc_prof_enabled(objspace)) {
13255 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13256 record->gc_invoke_time -= objspace->profile.invoke_time;
13260#define RUBY_DTRACE_GC_HOOK(name) \
13261 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13265 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13266#if GC_PROFILE_MORE_DETAIL
13267 if (gc_prof_enabled(objspace)) {
13268 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13276 RUBY_DTRACE_GC_HOOK(MARK_END);
13277#if GC_PROFILE_MORE_DETAIL
13278 if (gc_prof_enabled(objspace)) {
13280 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13288 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13289 if (gc_prof_enabled(objspace)) {
13292 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13293 objspace->profile.gc_sweep_start_time = getrusage_time();
13301 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13303 if (gc_prof_enabled(objspace)) {
13307 if (record->gc_time > 0) {
13308 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13310 record->gc_time += sweep_time;
13312 else if (GC_PROFILE_MORE_DETAIL) {
13313 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13316#if GC_PROFILE_MORE_DETAIL
13317 record->gc_sweep_time += sweep_time;
13318 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13320 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13327#if GC_PROFILE_MORE_DETAIL
13328 if (gc_prof_enabled(objspace)) {
13330 record->allocate_increase = malloc_increase;
13331 record->allocate_limit = malloc_limit;
13339 if (gc_prof_enabled(objspace)) {
13341 size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
13342 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13344#if GC_PROFILE_MORE_DETAIL
13345 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13346 record->heap_live_objects = live;
13347 record->heap_free_objects = total - live;
13350 record->heap_total_objects = total;
13351 record->heap_use_size = live *
sizeof(
RVALUE);
13352 record->heap_total_size = total *
sizeof(
RVALUE);
13365gc_profile_clear(
VALUE _)
13368 void *p = objspace->profile.records;
13369 objspace->profile.records = NULL;
13370 objspace->profile.size = 0;
13371 objspace->profile.next_index = 0;
13372 objspace->profile.current_record = 0;
13428gc_profile_record_get(
VALUE _)
13431 VALUE gc_profile = rb_ary_new();
13435 if (!objspace->profile.run) {
13439 for (i =0; i < objspace->profile.next_index; i++) {
13442 prof = rb_hash_new();
13443 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
13444 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_TIME")),
DBL2NUM(record->gc_time));
13445 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_INVOKE_TIME")),
DBL2NUM(record->gc_invoke_time));
13446 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_SIZE")),
SIZET2NUM(record->heap_use_size));
13447 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_SIZE")),
SIZET2NUM(record->heap_total_size));
13448 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_OBJECTS")),
SIZET2NUM(record->heap_total_objects));
13449 rb_hash_aset(prof,
ID2SYM(rb_intern(
"MOVED_OBJECTS")),
SIZET2NUM(record->moved_objects));
13450 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_IS_MARKED")),
Qtrue);
13451#if GC_PROFILE_MORE_DETAIL
13452 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_MARK_TIME")),
DBL2NUM(record->gc_mark_time));
13453 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_SWEEP_TIME")),
DBL2NUM(record->gc_sweep_time));
13454 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_INCREASE")),
SIZET2NUM(record->allocate_increase));
13455 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_LIMIT")),
SIZET2NUM(record->allocate_limit));
13456 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_PAGES")),
SIZET2NUM(record->heap_use_pages));
13457 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_LIVE_OBJECTS")),
SIZET2NUM(record->heap_live_objects));
13458 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_FREE_OBJECTS")),
SIZET2NUM(record->heap_free_objects));
13460 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMOVING_OBJECTS")),
SIZET2NUM(record->removing_objects));
13461 rb_hash_aset(prof,
ID2SYM(rb_intern(
"EMPTY_OBJECTS")),
SIZET2NUM(record->empty_objects));
13463 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13466#if RGENGC_PROFILE > 0
13467 rb_hash_aset(prof,
ID2SYM(rb_intern(
"OLD_OBJECTS")),
SIZET2NUM(record->old_objects));
13468 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_NORMAL_OBJECTS")),
SIZET2NUM(record->remembered_normal_objects));
13469 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_SHADY_OBJECTS")),
SIZET2NUM(record->remembered_shady_objects));
13471 rb_ary_push(gc_profile, prof);
13477#if GC_PROFILE_MORE_DETAIL
13478#define MAJOR_REASON_MAX 0x10
13481gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
13483 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13486 if (reason == GPR_FLAG_NONE) {
13492 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13493 buff[i++] = #x[0]; \
13494 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13500#if RGENGC_ESTIMATE_OLDMALLOC
13513 size_t count = objspace->profile.next_index;
13514#ifdef MAJOR_REASON_MAX
13515 char reason_str[MAJOR_REASON_MAX];
13518 if (objspace->profile.run && count ) {
13522 append(out, rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
13523 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13525 for (i = 0; i < count; i++) {
13526 record = &objspace->profile.records[i];
13527 append(out, rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
13528 i+1, record->gc_invoke_time, record->heap_use_size,
13529 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13532#if GC_PROFILE_MORE_DETAIL
13533 const char *str =
"\n\n" \
13535 "Prepare Time = Previously GC's rest sweep time\n"
13536 "Index Flags Allocate Inc. Allocate Limit"
13537#if CALC_EXACT_MALLOC_SIZE
13540 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13542 " OldgenObj RemNormObj RemShadObj"
13544#if GC_PROFILE_DETAIL_MEMORY
13545 " MaxRSS(KB) MinorFLT MajorFLT"
13550 for (i = 0; i < count; i++) {
13551 record = &objspace->profile.records[i];
13552 append(out, rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
13553#
if CALC_EXACT_MALLOC_SIZE
13556 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13558 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
13560#
if GC_PROFILE_DETAIL_MEMORY
13566 gc_profile_dump_major_reason(record->flags, reason_str),
13567 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
13568 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
13569 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
13570 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
13571 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
13572 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
13573 record->allocate_increase, record->allocate_limit,
13574#
if CALC_EXACT_MALLOC_SIZE
13575 record->allocated_size,
13577 record->heap_use_pages,
13578 record->gc_mark_time*1000,
13579 record->gc_sweep_time*1000,
13580 record->prepare_time*1000,
13582 record->heap_live_objects,
13583 record->heap_free_objects,
13584 record->removing_objects,
13585 record->empty_objects
13588 record->old_objects,
13589 record->remembered_normal_objects,
13590 record->remembered_shady_objects
13592#
if GC_PROFILE_DETAIL_MEMORY
13594 record->maxrss / 1024,
13617gc_profile_result(
VALUE _)
13619 VALUE str = rb_str_buf_new(0);
13620 gc_profile_dump_on(str, rb_str_buf_append);
13634gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
13639 gc_profile_dump_on(out, rb_io_write);
13652gc_profile_total_time(
VALUE self)
13657 if (objspace->profile.run && objspace->profile.next_index > 0) {
13659 size_t count = objspace->profile.next_index;
13661 for (i = 0; i < count; i++) {
13662 time += objspace->profile.records[i].gc_time;
13676gc_profile_enable_get(
VALUE self)
13679 return RBOOL(objspace->profile.run);
13691gc_profile_enable(
VALUE _)
13694 objspace->profile.run = TRUE;
13695 objspace->profile.current_record = 0;
13708gc_profile_disable(
VALUE _)
13712 objspace->profile.run = FALSE;
13713 objspace->profile.current_record = 0;
13725#define TYPE_NAME(t) case (t): return #t;
13752 if (obj && rb_objspace_data_type_name(obj)) {
13753 return rb_objspace_data_type_name(obj);
13762obj_type_name(
VALUE obj)
13764 return type_name(
TYPE(obj), obj);
13768rb_method_type_name(rb_method_type_t
type)
13771 case VM_METHOD_TYPE_ISEQ:
return "iseq";
13772 case VM_METHOD_TYPE_ATTRSET:
return "attrest";
13773 case VM_METHOD_TYPE_IVAR:
return "ivar";
13774 case VM_METHOD_TYPE_BMETHOD:
return "bmethod";
13775 case VM_METHOD_TYPE_ALIAS:
return "alias";
13776 case VM_METHOD_TYPE_REFINED:
return "refined";
13777 case VM_METHOD_TYPE_CFUNC:
return "cfunc";
13778 case VM_METHOD_TYPE_ZSUPER:
return "zsuper";
13779 case VM_METHOD_TYPE_MISSING:
return "missing";
13780 case VM_METHOD_TYPE_OPTIMIZED:
return "optimized";
13781 case VM_METHOD_TYPE_UNDEF:
return "undef";
13782 case VM_METHOD_TYPE_NOTIMPLEMENTED:
return "notimplemented";
13784 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
13788rb_raw_iseq_info(
char *
const buff,
const size_t buff_size,
const rb_iseq_t *iseq)
13790 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj,
T_MOVED)) {
13791 VALUE path = rb_iseq_path(iseq);
13792 int n = ISEQ_BODY(iseq)->location.first_lineno;
13793 snprintf(buff, buff_size,
" %s@%s:%d",
13794 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
13795 RSTRING_PTR(path), n);
13800str_len_no_raise(
VALUE str)
13802 long len = RSTRING_LEN(str);
13803 if (
len < 0)
return 0;
13804 if (
len > INT_MAX)
return INT_MAX;
13808#define BUFF_ARGS buff + pos, buff_size - pos
13809#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13810#define APPEND_S(s) do { \
13811 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13815 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13818#define TF(c) ((c) != 0 ? "true" : "false")
13819#define C(c, s) ((c) != 0 ? (s) : " ")
13822rb_raw_obj_info_common(
char *
const buff,
const size_t buff_size,
const VALUE obj)
13827 APPEND_F(
"%s", obj_type_name(obj));
13833 APPEND_F(
" %s", rb_id2name(
SYM2ID(obj)));
13837 const int age = RVALUE_AGE_GET(obj);
13839 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
13840 APPEND_F(
"%p [%d%s%s%s%s%s%s] %s ",
13842 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),
"L"),
13843 C(RVALUE_MARK_BITMAP(obj),
"M"),
13844 C(RVALUE_PIN_BITMAP(obj),
"P"),
13845 C(RVALUE_MARKING_BITMAP(obj),
"R"),
13846 C(RVALUE_WB_UNPROTECTED_BITMAP(obj),
"U"),
13847 C(rb_objspace_garbage_object_p(obj),
"G"),
13848 obj_type_name(obj));
13852 APPEND_F(
"%p [%dXXXX] %s",
13854 obj_type_name(obj));
13857 if (internal_object_p(obj)) {
13860 else if (
RBASIC(obj)->klass == 0) {
13861 APPEND_S(
"(temporary internal)");
13865 if (!
NIL_P(class_path)) {
13866 APPEND_F(
"(%s)", RSTRING_PTR(class_path));
13871 APPEND_F(
"@%s:%d", RANY(obj)->file, RANY(obj)->line);
13880rb_raw_obj_info_buitin_type(
char *
const buff,
const size_t buff_size,
const VALUE obj,
size_t pos)
13887 UNEXPECTED_NODE(rb_raw_obj_info);
13890 if (ARY_SHARED_P(obj)) {
13891 APPEND_S(
"shared -> ");
13892 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
13894 else if (ARY_EMBED_P(obj)) {
13895 APPEND_F(
"[%s%s] len: %ld (embed)",
13896 C(ARY_EMBED_P(obj),
"E"),
13897 C(ARY_SHARED_P(obj),
"S"),
13901 APPEND_F(
"[%s%s] len: %ld, capa:%ld ptr:%p",
13902 C(ARY_EMBED_P(obj),
"E"),
13903 C(ARY_SHARED_P(obj),
"S"),
13905 ARY_EMBED_P(obj) ? -1L :
RARRAY(obj)->as.heap.aux.capa,
13910 if (STR_SHARED_P(obj)) {
13911 APPEND_F(
" [shared] len: %ld", RSTRING_LEN(obj));
13914 if (STR_EMBED_P(obj)) APPEND_S(
" [embed]");
13916 APPEND_F(
" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj),
rb_str_capacity(obj));
13918 APPEND_F(
" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
13922 VALUE fstr = RSYMBOL(obj)->fstr;
13923 ID id = RSYMBOL(obj)->id;
13925 APPEND_F(
":%s id:%d", RSTRING_PTR(fstr), (
unsigned int)
id);
13928 APPEND_F(
"(%p) id:%d", (
void *)fstr, (
unsigned int)
id);
13933 APPEND_F(
"-> %p", (
void*)rb_gc_location(obj));
13937 APPEND_F(
"[%c] %"PRIdSIZE,
13938 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
13946 if (!
NIL_P(class_path)) {
13947 APPEND_F(
"%s", RSTRING_PTR(class_path));
13950 APPEND_S(
"(anon)");
13957 if (!
NIL_P(class_path)) {
13958 APPEND_F(
"src:%s", RSTRING_PTR(class_path));
13964 if (rb_shape_obj_too_complex(obj)) {
13965 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
13966 APPEND_F(
"(too_complex) len:%"PRIuSIZE
"", hash_len);
13969 uint32_t
len = ROBJECT_IV_CAPACITY(obj);
13971 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13972 APPEND_F(
"(embed) len:%d",
len);
13976 APPEND_F(
"len:%d ptr:%p",
len, (
void *)ptr);
13985 (block = vm_proc_block(obj)) != NULL &&
13986 (vm_block_type(block) == block_type_iseq) &&
13987 (iseq = vm_block_iseq(block)) != NULL) {
13988 rb_raw_iseq_info(BUFF_ARGS, iseq);
13990 else if (rb_ractor_p(obj)) {
13993 APPEND_F(
"r:%d", r->pub.id);
13997 const char *
const type_name = rb_objspace_data_type_name(obj);
13999 APPEND_F(
"%s", type_name);
14005 APPEND_F(
"<%s> ", rb_imemo_name(imemo_type(obj)));
14007 switch (imemo_type(obj)) {
14012 APPEND_F(
":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
14013 rb_id2name(me->called_id),
14014 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ?
"pub" :
14015 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ?
"pri" :
"pro",
14016 METHOD_ENTRY_COMPLEMENTED(me) ?
",cmp" :
"",
14017 METHOD_ENTRY_CACHED(me) ?
",cc" :
"",
14018 METHOD_ENTRY_INVALIDATED(me) ?
",inv" :
"",
14019 me->def ? rb_method_type_name(me->def->type) :
"NULL",
14020 me->def ? me->def->aliased : -1,
14022 (
void *)me->defined_class);
14025 switch (me->def->type) {
14026 case VM_METHOD_TYPE_ISEQ:
14027 APPEND_S(
" (iseq:");
14028 rb_raw_obj_info(BUFF_ARGS, (
VALUE)me->def->body.iseq.
iseqptr);
14040 rb_raw_iseq_info(BUFF_ARGS, iseq);
14043 case imemo_callinfo:
14046 APPEND_F(
"(mid:%s, flag:%x argc:%d, kwarg:%s)",
14047 rb_id2name(vm_ci_mid(ci)),
14050 vm_ci_kwarg(ci) ?
"available" :
"NULL");
14053 case imemo_callcache:
14059 APPEND_F(
"(klass:%s cme:%s%s (%p) call:%p",
14060 NIL_P(class_path) ? (cc->klass ?
"??" :
"<NULL>") : RSTRING_PTR(class_path),
14061 cme ? rb_id2name(cme->called_id) :
"<NULL>",
14062 cme ? (METHOD_ENTRY_INVALIDATED(cme) ?
" [inv]" :
"") :
"",
14064 (
void *)vm_cc_call(cc));
14084rb_raw_obj_info(
char *
const buff,
const size_t buff_size,
VALUE obj)
14086 asan_unpoisoning_object(obj) {
14087 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14088 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14089 if (pos >= buff_size) {}
14100#define OBJ_INFO_BUFFERS_NUM 10
14101#define OBJ_INFO_BUFFERS_SIZE 0x100
14103static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14111 if (UNLIKELY(oldval >= maxval - 1)) {
14122 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14123 char *
const buff = obj_info_buffers[index];
14124 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14130 return obj_type_name(obj);
14135rb_obj_info(
VALUE obj)
14137 return obj_info(obj);
14141rb_obj_info_dump(
VALUE obj)
14144 fprintf(stderr,
"rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14148rb_obj_info_dump_loc(
VALUE obj,
const char *file,
int line,
const char *func)
14151 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14157rb_gcdebug_print_obj_condition(
VALUE obj)
14161 fprintf(stderr,
"created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14164 fprintf(stderr,
"moved?: true\n");
14167 fprintf(stderr,
"moved?: false\n");
14169 if (is_pointer_to_heap(objspace, (
void *)obj)) {
14170 fprintf(stderr,
"pointer to heap?: true\n");
14173 fprintf(stderr,
"pointer to heap?: false\n");
14177 fprintf(stderr,
"marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ?
"true" :
"false");
14178 fprintf(stderr,
"pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ?
"true" :
"false");
14179 fprintf(stderr,
"age? : %d\n", RVALUE_AGE_GET(obj));
14180 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
14181 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
14182 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
14184 if (is_lazy_sweeping(objspace)) {
14185 fprintf(stderr,
"lazy sweeping?: true\n");
14186 fprintf(stderr,
"swept?: %s\n", is_swept_object(obj) ?
"done" :
"not yet");
14189 fprintf(stderr,
"lazy sweeping?: false\n");
14196 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
14201rb_gcdebug_sentinel(
VALUE obj,
const char *name)
14216rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14220 if (!stress_to_class) {
14221 set_stress_to_class(rb_ary_hidden_new(argc));
14223 rb_ary_cat(stress_to_class, argv, argc);
14236rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
14241 if (stress_to_class) {
14242 for (i = 0; i < argc; ++i) {
14243 rb_ary_delete_same(stress_to_class, argv[i]);
14246 set_stress_to_class(0);
14304 malloc_offset = gc_compute_malloc_offset();
14306 VALUE rb_mObjSpace;
14307 VALUE rb_mProfiler;
14308 VALUE gc_constants;
14312 gc_constants = rb_hash_new();
14313 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"DEBUG")), RBOOL(GC_DEBUG));
14314 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"BASE_SLOT_SIZE")),
SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14315 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OVERHEAD")),
SIZET2NUM(RVALUE_OVERHEAD));
14317 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_OBJ_LIMIT")),
SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14318 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_BITMAP_SIZE")),
SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14319 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_SIZE")),
SIZET2NUM(HEAP_PAGE_SIZE));
14320 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"SIZE_POOL_COUNT")),
LONG2FIX(SIZE_POOL_COUNT));
14321 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVARGC_MAX_ALLOCATE_SIZE")),
LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14322 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OLD_AGE")),
LONG2FIX(RVALUE_OLD_AGE));
14323 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
14324 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RB_BUG_INSTEAD_OF_RB_MEMERROR")),
Qtrue);
14349 rb_vm_register_special_exception(ruby_error_nomemory,
rb_eNoMemError,
"failed to allocate memory");
14358#if MALLOC_ALLOCATED_SIZE
14363 if (GC_COMPACTION_SUPPORTED) {
14378 if (GC_DEBUG_STRESS_TO_CLASS) {
14387#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14391 OPT(RGENGC_CHECK_MODE);
14392 OPT(RGENGC_PROFILE);
14393 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14394 OPT(GC_PROFILE_MORE_DETAIL);
14395 OPT(GC_ENABLE_LAZY_SWEEP);
14396 OPT(CALC_EXACT_MALLOC_SIZE);
14397 OPT(MALLOC_ALLOCATED_SIZE);
14398 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14399 OPT(GC_PROFILE_DETAIL_MEMORY);
14400 OPT(GC_COMPACTION_SUPPORTED);
14409#ifdef ruby_xmalloc2
14410#undef ruby_xmalloc2
14415#ifdef ruby_xrealloc
14416#undef ruby_xrealloc
14418#ifdef ruby_xrealloc2
14419#undef ruby_xrealloc2
14423ruby_xmalloc(
size_t size)
14425#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14426 ruby_malloc_info_file = __FILE__;
14427 ruby_malloc_info_line = __LINE__;
14429 return ruby_xmalloc_body(size);
14433ruby_xmalloc2(
size_t n,
size_t size)
14435#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14436 ruby_malloc_info_file = __FILE__;
14437 ruby_malloc_info_line = __LINE__;
14439 return ruby_xmalloc2_body(n, size);
14443ruby_xcalloc(
size_t n,
size_t size)
14445#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14446 ruby_malloc_info_file = __FILE__;
14447 ruby_malloc_info_line = __LINE__;
14449 return ruby_xcalloc_body(n, size);
14453ruby_xrealloc(
void *ptr,
size_t new_size)
14455#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14456 ruby_malloc_info_file = __FILE__;
14457 ruby_malloc_info_line = __LINE__;
14459 return ruby_xrealloc_body(ptr, new_size);
14463ruby_xrealloc2(
void *ptr,
size_t n,
size_t new_size)
14465#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14466 ruby_malloc_info_file = __FILE__;
14467 ruby_malloc_info_line = __LINE__;
14469 return ruby_xrealloc2_body(ptr, n, new_size);
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
VALUE rb_define_module(const char *name)
Defines a top-level module.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define TYPE(_)
Old name of rb_type.
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define T_NIL
Old name of RUBY_T_NIL.
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_ABLE
Old name of RB_FL_ABLE.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
int ruby_stack_check(void)
Checks for stack overflow.
VALUE rb_eNoMemError
NoMemoryError exception.
VALUE rb_eRangeError
RangeError exception.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
VALUE rb_mKernel
Kernel module.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_stdout
STDOUT constant.
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define rb_check_frozen
Just another name of rb_check_frozen.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_free(VALUE str)
Destroys the given string for no reason.
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
#define RB_SYM2ID
Just another name of rb_sym2id.
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
int len
Length of the buffer.
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY(obj)
Convenient casting macro.
#define RARRAY_AREF(a, i)
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define RCLASS(obj)
Convenient casting macro.
#define DATA_PTR(obj)
Convenient getter macro.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RFILE(obj)
Convenient casting macro.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define ROBJECT(obj)
Convenient casting macro.
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
#define RREGEXP_PTR(obj)
Convenient accessor macro.
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
#define errno
Ractor-aware version of errno.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Ruby's object's, base components.
const VALUE klass
Class of an object.
VALUE flags
Per-object flags.
Internal header for Complex.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
struct rb_io * fptr
IO's specific fields.
Regular expression execution context.
VALUE regexp
The expression of this match.
VALUE str
The target string that the match was made against.
Internal header for Rational.
Ruby's regular expression.
const VALUE src
Source code of this expression.
union RString::@50 as
String's specific fields.
struct RString::@50::@51 heap
Strings that use separated memory region for contents use this pattern.
union RString::@50::@51::@53 aux
Auxiliary info.
VALUE shared
Parent of the string.
const rb_data_type_t *const type
This field stores various information about how Ruby should handle a data.
This is the struct that holds necessary info for a struct.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
VALUE ecopts
Flags as Ruby hash.
Ruby's IO, metadata and buffers.
struct rb_io_encoding encs
Decomposed encoding flags.
VALUE self
The IO's Ruby level counterpart.
VALUE write_lock
This is a Ruby level mutex.
VALUE timeout
The timeout associated with this IO when performing blocking operations.
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
VALUE tied_io_for_writing
Duplex IO object, if set.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
VALUE pathv
pathname for file
struct rmatch_offset * char_offset
Capture group offsets, in C array.
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
struct re_registers regs
"Registers" of a match.
rb_cref_t * cref
class reference, should be marked
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Internal header for Class.
Represents the region of a capture group.
IFUNC (Internal FUNCtion)
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
ruby_value_type
C-level type of an object.
@ RUBY_T_MASK
Bitmask of ruby_value_type.