Ruby 3.3.2p78 (2024-05-30 revision e5a195edf62fe1bf7146a191da13fa1c4fecbd71)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#include <signal.h>
23
24#define sighandler_t ruby_sighandler_t
25
26#ifndef _WIN32
27#include <unistd.h>
28#include <sys/mman.h>
29#endif
30
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
34#else
35# include <setjmp.h>
36#endif
37#include <stdarg.h>
38#include <stdio.h>
39
40/* MALLOC_HEADERS_BEGIN */
41#ifndef HAVE_MALLOC_USABLE_SIZE
42# ifdef _WIN32
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
48# endif
49#endif
50
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
53/* Alternative malloc header is included in ruby/missing.h */
54# elif defined(HAVE_MALLOC_H)
55# include <malloc.h>
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
60# endif
61#endif
62
63#ifdef HAVE_MALLOC_TRIM
64# include <malloc.h>
65
66# ifdef __EMSCRIPTEN__
67/* malloc_trim is defined in emscripten/emmalloc.h on emscripten. */
68# include <emscripten/emmalloc.h>
69# endif
70#endif
71
72#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
73/* LIST_HEAD conflicts with sys/queue.h on macOS */
74# include <sys/user.h>
75#endif
76/* MALLOC_HEADERS_END */
77
78#ifdef HAVE_SYS_TIME_H
79# include <sys/time.h>
80#endif
81
82#ifdef HAVE_SYS_RESOURCE_H
83# include <sys/resource.h>
84#endif
85
86#if defined _WIN32 || defined __CYGWIN__
87# include <windows.h>
88#elif defined(HAVE_POSIX_MEMALIGN)
89#elif defined(HAVE_MEMALIGN)
90# include <malloc.h>
91#endif
92
93#include <sys/types.h>
94
95#ifdef __EMSCRIPTEN__
96#include <emscripten.h>
97#endif
98
99#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
100# include <mach/task.h>
101# include <mach/mach_init.h>
102# include <mach/mach_port.h>
103#endif
104#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
105
106#include "constant.h"
107#include "darray.h"
108#include "debug_counter.h"
109#include "eval_intern.h"
110#include "id_table.h"
111#include "internal.h"
112#include "internal/class.h"
113#include "internal/compile.h"
114#include "internal/complex.h"
115#include "internal/cont.h"
116#include "internal/error.h"
117#include "internal/eval.h"
118#include "internal/gc.h"
119#include "internal/hash.h"
120#include "internal/imemo.h"
121#include "internal/io.h"
122#include "internal/numeric.h"
123#include "internal/object.h"
124#include "internal/proc.h"
125#include "internal/rational.h"
126#include "internal/sanitizers.h"
127#include "internal/struct.h"
128#include "internal/symbol.h"
129#include "internal/thread.h"
130#include "internal/variable.h"
131#include "internal/warnings.h"
132#include "rjit.h"
133#include "probes.h"
134#include "regint.h"
135#include "ruby/debug.h"
136#include "ruby/io.h"
137#include "ruby/re.h"
138#include "ruby/st.h"
139#include "ruby/thread.h"
140#include "ruby/util.h"
141#include "ruby_assert.h"
142#include "ruby_atomic.h"
143#include "symbol.h"
144#include "vm_core.h"
145#include "vm_sync.h"
146#include "vm_callinfo.h"
147#include "ractor_core.h"
148
149#include "builtin.h"
150#include "shape.h"
151
152#define rb_setjmp(env) RUBY_SETJMP(env)
153#define rb_jmp_buf rb_jmpbuf_t
154#undef rb_data_object_wrap
155
156#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
157#define MAP_ANONYMOUS MAP_ANON
158#endif
159
160
161static size_t malloc_offset = 0;
162#if defined(HAVE_MALLOC_USABLE_SIZE)
163static size_t
164gc_compute_malloc_offset(void)
165{
166 // Different allocators use different metadata storage strategies which result in different
167 // ideal sizes.
168 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
169 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
170 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
171 // waste memory.
172 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
173 // no wasted memory.
174 size_t offset = 0;
175 for (offset = 0; offset <= 16; offset += 8) {
176 size_t allocated = (64 - offset);
177 void *test_ptr = malloc(allocated);
178 size_t wasted = malloc_usable_size(test_ptr) - allocated;
179 free(test_ptr);
180
181 if (wasted == 0) {
182 return offset;
183 }
184 }
185 return 0;
186}
187#else
188static size_t
189gc_compute_malloc_offset(void)
190{
191 // If we don't have malloc_usable_size, we use powers of 2.
192 return 0;
193}
194#endif
195
196size_t
197rb_malloc_grow_capa(size_t current, size_t type_size)
198{
199 size_t current_capacity = current;
200 if (current_capacity < 4) {
201 current_capacity = 4;
202 }
203 current_capacity *= type_size;
204
205 // We double the current capacity.
206 size_t new_capacity = (current_capacity * 2);
207
208 // And round up to the next power of 2 if it's not already one.
209 if (rb_popcount64(new_capacity) != 1) {
210 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
211 }
212
213 new_capacity -= malloc_offset;
214 new_capacity /= type_size;
215 if (current > new_capacity) {
216 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
217 }
218 RUBY_ASSERT(new_capacity > current);
219 return new_capacity;
220}
221
222static inline struct rbimpl_size_mul_overflow_tag
223size_add_overflow(size_t x, size_t y)
224{
225 size_t z;
226 bool p;
227#if 0
228
229#elif __has_builtin(__builtin_add_overflow)
230 p = __builtin_add_overflow(x, y, &z);
231
232#elif defined(DSIZE_T)
233 RB_GNUC_EXTENSION DSIZE_T dx = x;
234 RB_GNUC_EXTENSION DSIZE_T dy = y;
235 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
236 p = dz > SIZE_MAX;
237 z = (size_t)dz;
238
239#else
240 z = x + y;
241 p = z < y;
242
243#endif
244 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
245}
246
247static inline struct rbimpl_size_mul_overflow_tag
248size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
249{
250 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
251 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
252 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
253}
254
255static inline struct rbimpl_size_mul_overflow_tag
256size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
257{
258 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
259 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
260 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
261 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
262}
263
264PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
265
266static inline size_t
267size_mul_or_raise(size_t x, size_t y, VALUE exc)
268{
269 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
270 if (LIKELY(!t.left)) {
271 return t.right;
272 }
273 else if (rb_during_gc()) {
274 rb_memerror(); /* or...? */
275 }
276 else {
277 gc_raise(
278 exc,
279 "integer overflow: %"PRIuSIZE
280 " * %"PRIuSIZE
281 " > %"PRIuSIZE,
282 x, y, (size_t)SIZE_MAX);
283 }
284}
285
286size_t
287rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
288{
289 return size_mul_or_raise(x, y, exc);
290}
291
292static inline size_t
293size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
294{
295 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
296 if (LIKELY(!t.left)) {
297 return t.right;
298 }
299 else if (rb_during_gc()) {
300 rb_memerror(); /* or...? */
301 }
302 else {
303 gc_raise(
304 exc,
305 "integer overflow: %"PRIuSIZE
306 " * %"PRIuSIZE
307 " + %"PRIuSIZE
308 " > %"PRIuSIZE,
309 x, y, z, (size_t)SIZE_MAX);
310 }
311}
312
313size_t
314rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
315{
316 return size_mul_add_or_raise(x, y, z, exc);
317}
318
319static inline size_t
320size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
321{
322 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
323 if (LIKELY(!t.left)) {
324 return t.right;
325 }
326 else if (rb_during_gc()) {
327 rb_memerror(); /* or...? */
328 }
329 else {
330 gc_raise(
331 exc,
332 "integer overflow: %"PRIdSIZE
333 " * %"PRIdSIZE
334 " + %"PRIdSIZE
335 " * %"PRIdSIZE
336 " > %"PRIdSIZE,
337 x, y, z, w, (size_t)SIZE_MAX);
338 }
339}
340
341#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
342/* trick the compiler into thinking a external signal handler uses this */
343volatile VALUE rb_gc_guarded_val;
344volatile VALUE *
345rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
346{
347 rb_gc_guarded_val = val;
348
349 return ptr;
350}
351#endif
352
353#ifndef GC_HEAP_INIT_SLOTS
354#define GC_HEAP_INIT_SLOTS 10000
355#endif
356#ifndef GC_HEAP_FREE_SLOTS
357#define GC_HEAP_FREE_SLOTS 4096
358#endif
359#ifndef GC_HEAP_GROWTH_FACTOR
360#define GC_HEAP_GROWTH_FACTOR 1.8
361#endif
362#ifndef GC_HEAP_GROWTH_MAX_SLOTS
363#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
364#endif
365#ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
366# define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
367#endif
368#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
369#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
370#endif
371
372#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
373#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
374#endif
375#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
376#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
377#endif
378#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
379#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
380#endif
381
382#ifndef GC_MALLOC_LIMIT_MIN
383#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
384#endif
385#ifndef GC_MALLOC_LIMIT_MAX
386#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
387#endif
388#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
389#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
390#endif
391
392#ifndef GC_OLDMALLOC_LIMIT_MIN
393#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
394#endif
395#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
396#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
397#endif
398#ifndef GC_OLDMALLOC_LIMIT_MAX
399#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
400#endif
401
402#ifndef GC_CAN_COMPILE_COMPACTION
403#if defined(__wasi__) /* WebAssembly doesn't support signals */
404# define GC_CAN_COMPILE_COMPACTION 0
405#else
406# define GC_CAN_COMPILE_COMPACTION 1
407#endif
408#endif
409
410#ifndef PRINT_MEASURE_LINE
411#define PRINT_MEASURE_LINE 0
412#endif
413#ifndef PRINT_ENTER_EXIT_TICK
414#define PRINT_ENTER_EXIT_TICK 0
415#endif
416#ifndef PRINT_ROOT_TICKS
417#define PRINT_ROOT_TICKS 0
418#endif
419
420#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
421#define TICK_TYPE 1
422
423typedef struct {
424 size_t size_pool_init_slots[SIZE_POOL_COUNT];
425 size_t heap_free_slots;
426 double growth_factor;
427 size_t growth_max_slots;
428
429 double heap_free_slots_min_ratio;
430 double heap_free_slots_goal_ratio;
431 double heap_free_slots_max_ratio;
432 double uncollectible_wb_unprotected_objects_limit_ratio;
433 double oldobject_limit_factor;
434
435 size_t malloc_limit_min;
436 size_t malloc_limit_max;
437 double malloc_limit_growth_factor;
438
439 size_t oldmalloc_limit_min;
440 size_t oldmalloc_limit_max;
441 double oldmalloc_limit_growth_factor;
442
443 VALUE gc_stress;
445
446static ruby_gc_params_t gc_params = {
447 { 0 },
448 GC_HEAP_FREE_SLOTS,
449 GC_HEAP_GROWTH_FACTOR,
450 GC_HEAP_GROWTH_MAX_SLOTS,
451
452 GC_HEAP_FREE_SLOTS_MIN_RATIO,
453 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
454 GC_HEAP_FREE_SLOTS_MAX_RATIO,
455 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
456 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
457
458 GC_MALLOC_LIMIT_MIN,
459 GC_MALLOC_LIMIT_MAX,
460 GC_MALLOC_LIMIT_GROWTH_FACTOR,
461
462 GC_OLDMALLOC_LIMIT_MIN,
463 GC_OLDMALLOC_LIMIT_MAX,
464 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
465
466 FALSE,
467};
468
469/* GC_DEBUG:
470 * enable to embed GC debugging information.
471 */
472#ifndef GC_DEBUG
473#define GC_DEBUG 0
474#endif
475
476/* RGENGC_DEBUG:
477 * 1: basic information
478 * 2: remember set operation
479 * 3: mark
480 * 4:
481 * 5: sweep
482 */
483#ifndef RGENGC_DEBUG
484#ifdef RUBY_DEVEL
485#define RGENGC_DEBUG -1
486#else
487#define RGENGC_DEBUG 0
488#endif
489#endif
490#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
491# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
492#elif defined(HAVE_VA_ARGS_MACRO)
493# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
494#else
495# define RGENGC_DEBUG_ENABLED(level) 0
496#endif
497int ruby_rgengc_debug;
498
499/* RGENGC_CHECK_MODE
500 * 0: disable all assertions
501 * 1: enable assertions (to debug RGenGC)
502 * 2: enable internal consistency check at each GC (for debugging)
503 * 3: enable internal consistency check at each GC steps (for debugging)
504 * 4: enable liveness check
505 * 5: show all references
506 */
507#ifndef RGENGC_CHECK_MODE
508#define RGENGC_CHECK_MODE 0
509#endif
510
511// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
512#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
513
514/* RGENGC_PROFILE
515 * 0: disable RGenGC profiling
516 * 1: enable profiling for basic information
517 * 2: enable profiling for each types
518 */
519#ifndef RGENGC_PROFILE
520#define RGENGC_PROFILE 0
521#endif
522
523/* RGENGC_ESTIMATE_OLDMALLOC
524 * Enable/disable to estimate increase size of malloc'ed size by old objects.
525 * If estimation exceeds threshold, then will invoke full GC.
526 * 0: disable estimation.
527 * 1: enable estimation.
528 */
529#ifndef RGENGC_ESTIMATE_OLDMALLOC
530#define RGENGC_ESTIMATE_OLDMALLOC 1
531#endif
532
533/* RGENGC_FORCE_MAJOR_GC
534 * Force major/full GC if this macro is not 0.
535 */
536#ifndef RGENGC_FORCE_MAJOR_GC
537#define RGENGC_FORCE_MAJOR_GC 0
538#endif
539
540#ifndef GC_PROFILE_MORE_DETAIL
541#define GC_PROFILE_MORE_DETAIL 0
542#endif
543#ifndef GC_PROFILE_DETAIL_MEMORY
544#define GC_PROFILE_DETAIL_MEMORY 0
545#endif
546#ifndef GC_ENABLE_LAZY_SWEEP
547#define GC_ENABLE_LAZY_SWEEP 1
548#endif
549#ifndef CALC_EXACT_MALLOC_SIZE
550#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
551#endif
552#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
553#ifndef MALLOC_ALLOCATED_SIZE
554#define MALLOC_ALLOCATED_SIZE 0
555#endif
556#else
557#define MALLOC_ALLOCATED_SIZE 0
558#endif
559#ifndef MALLOC_ALLOCATED_SIZE_CHECK
560#define MALLOC_ALLOCATED_SIZE_CHECK 0
561#endif
562
563#ifndef GC_DEBUG_STRESS_TO_CLASS
564#define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
565#endif
566
567#ifndef RGENGC_OBJ_INFO
568#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
569#endif
570
571typedef enum {
572 GPR_FLAG_NONE = 0x000,
573 /* major reason */
574 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
575 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
576 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
577 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
578#if RGENGC_ESTIMATE_OLDMALLOC
579 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
580#endif
581 GPR_FLAG_MAJOR_MASK = 0x0ff,
582
583 /* gc reason */
584 GPR_FLAG_NEWOBJ = 0x100,
585 GPR_FLAG_MALLOC = 0x200,
586 GPR_FLAG_METHOD = 0x400,
587 GPR_FLAG_CAPI = 0x800,
588 GPR_FLAG_STRESS = 0x1000,
589
590 /* others */
591 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
592 GPR_FLAG_HAVE_FINALIZE = 0x4000,
593 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
594 GPR_FLAG_FULL_MARK = 0x10000,
595 GPR_FLAG_COMPACT = 0x20000,
596
597 GPR_DEFAULT_REASON =
598 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
599 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
600} gc_profile_record_flag;
601
602typedef struct gc_profile_record {
603 unsigned int flags;
604
605 double gc_time;
606 double gc_invoke_time;
607
608 size_t heap_total_objects;
609 size_t heap_use_size;
610 size_t heap_total_size;
611 size_t moved_objects;
612
613#if GC_PROFILE_MORE_DETAIL
614 double gc_mark_time;
615 double gc_sweep_time;
616
617 size_t heap_use_pages;
618 size_t heap_live_objects;
619 size_t heap_free_objects;
620
621 size_t allocate_increase;
622 size_t allocate_limit;
623
624 double prepare_time;
625 size_t removing_objects;
626 size_t empty_objects;
627#if GC_PROFILE_DETAIL_MEMORY
628 long maxrss;
629 long minflt;
630 long majflt;
631#endif
632#endif
633#if MALLOC_ALLOCATED_SIZE
634 size_t allocated_size;
635#endif
636
637#if RGENGC_PROFILE > 0
638 size_t old_objects;
639 size_t remembered_normal_objects;
640 size_t remembered_shady_objects;
641#endif
643
644struct RMoved {
645 VALUE flags;
646 VALUE dummy;
647 VALUE destination;
648 shape_id_t original_shape_id;
649};
650
651#define RMOVED(obj) ((struct RMoved *)(obj))
652
653typedef struct RVALUE {
654 union {
655 struct {
656 VALUE flags; /* always 0 for freed obj */
657 struct RVALUE *next;
658 } free;
659 struct RMoved moved;
660 struct RBasic basic;
661 struct RObject object;
662 struct RClass klass;
663 struct RFloat flonum;
664 struct RString string;
665 struct RArray array;
666 struct RRegexp regexp;
667 struct RHash hash;
668 struct RData data;
669 struct RTypedData typeddata;
670 struct RStruct rstruct;
671 struct RBignum bignum;
672 struct RFile file;
673 struct RMatch match;
674 struct RRational rational;
675 struct RComplex complex;
676 struct RSymbol symbol;
677 union {
678 rb_cref_t cref;
679 struct vm_svar svar;
680 struct vm_throw_data throw_data;
681 struct vm_ifunc ifunc;
682 struct MEMO memo;
683 struct rb_method_entry_struct ment;
684 const rb_iseq_t iseq;
685 rb_env_t env;
686 struct rb_imemo_tmpbuf_struct alloc;
687 rb_ast_t ast;
688 } imemo;
689 struct {
690 struct RBasic basic;
691 VALUE v1;
692 VALUE v2;
693 VALUE v3;
694 } values;
695 } as;
696
697 /* Start of RVALUE_OVERHEAD.
698 * Do not directly read these members from the RVALUE as they're located
699 * at the end of the slot (which may differ in size depending on the size
700 * pool). */
701#if RACTOR_CHECK_MODE
702 uint32_t _ractor_belonging_id;
703#endif
704#if GC_DEBUG
705 const char *file;
706 int line;
707#endif
708} RVALUE;
709
710#if RACTOR_CHECK_MODE
711# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
712#elif GC_DEBUG
713# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
714#else
715# define RVALUE_OVERHEAD 0
716#endif
717
718STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5) + RVALUE_OVERHEAD);
719STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
720
721typedef uintptr_t bits_t;
722enum {
723 BITS_SIZE = sizeof(bits_t),
724 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
725};
726#define popcount_bits rb_popcount_intptr
727
729 struct heap_page *page;
730};
731
733 struct heap_page_header header;
734 /* char gap[]; */
735 /* RVALUE values[]; */
736};
737
738struct gc_list {
739 VALUE *varptr;
740 struct gc_list *next;
741};
742
743#define STACK_CHUNK_SIZE 500
744
745typedef struct stack_chunk {
746 VALUE data[STACK_CHUNK_SIZE];
747 struct stack_chunk *next;
749
750typedef struct mark_stack {
751 stack_chunk_t *chunk;
752 stack_chunk_t *cache;
753 int index;
754 int limit;
755 size_t cache_size;
756 size_t unused_cache_size;
758
759#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
760#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
761
762typedef int (*gc_compact_compare_func)(const void *l, const void *r, void *d);
763
764typedef struct rb_heap_struct {
765 struct heap_page *free_pages;
766 struct ccan_list_head pages;
767 struct heap_page *sweeping_page; /* iterator for .pages */
768 struct heap_page *compact_cursor;
769 uintptr_t compact_cursor_index;
770 struct heap_page *pooled_pages;
771 size_t total_pages; /* total page count in a heap */
772 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
773} rb_heap_t;
774
775typedef struct rb_size_pool_struct {
776 short slot_size;
777
778 size_t allocatable_pages;
779
780 /* Basic statistics */
781 size_t total_allocated_pages;
782 size_t total_freed_pages;
783 size_t force_major_gc_count;
784 size_t force_incremental_marking_finish_count;
785 size_t total_allocated_objects;
786 size_t total_freed_objects;
787
788 /* Sweeping statistics */
789 size_t freed_slots;
790 size_t empty_slots;
791
792 rb_heap_t eden_heap;
793 rb_heap_t tomb_heap;
795
796enum gc_mode {
797 gc_mode_none,
798 gc_mode_marking,
799 gc_mode_sweeping,
800 gc_mode_compacting,
801};
802
803typedef struct rb_objspace {
804 struct {
805 size_t limit;
806 size_t increase;
807#if MALLOC_ALLOCATED_SIZE
808 size_t allocated_size;
809 size_t allocations;
810#endif
811
812 } malloc_params;
813
814 struct {
815 unsigned int mode : 2;
816 unsigned int immediate_sweep : 1;
817 unsigned int dont_gc : 1;
818 unsigned int dont_incremental : 1;
819 unsigned int during_gc : 1;
820 unsigned int during_compacting : 1;
821 unsigned int during_reference_updating : 1;
822 unsigned int gc_stressful: 1;
823 unsigned int has_newobj_hook: 1;
824 unsigned int during_minor_gc : 1;
825 unsigned int during_incremental_marking : 1;
826 unsigned int measure_gc : 1;
827 } flags;
828
829 rb_event_flag_t hook_events;
830 VALUE next_object_id;
831
832 rb_size_pool_t size_pools[SIZE_POOL_COUNT];
833
834 struct {
835 rb_atomic_t finalizing;
836 } atomic_flags;
837
839 size_t marked_slots;
840
841 struct {
842 struct heap_page **sorted;
843 size_t allocated_pages;
844 size_t allocatable_pages;
845 size_t sorted_length;
846 uintptr_t range[2];
847 size_t freeable_pages;
848
849 /* final */
850 size_t final_slots;
851 VALUE deferred_final;
852 } heap_pages;
853
854 st_table *finalizer_table;
855
856 struct {
857 int run;
858 unsigned int latest_gc_info;
859 gc_profile_record *records;
860 gc_profile_record *current_record;
861 size_t next_index;
862 size_t size;
863
864#if GC_PROFILE_MORE_DETAIL
865 double prepare_time;
866#endif
867 double invoke_time;
868
869 size_t minor_gc_count;
870 size_t major_gc_count;
871 size_t compact_count;
872 size_t read_barrier_faults;
873#if RGENGC_PROFILE > 0
874 size_t total_generated_normal_object_count;
875 size_t total_generated_shady_object_count;
876 size_t total_shade_operation_count;
877 size_t total_promoted_count;
878 size_t total_remembered_normal_object_count;
879 size_t total_remembered_shady_object_count;
880
881#if RGENGC_PROFILE >= 2
882 size_t generated_normal_object_count_types[RUBY_T_MASK];
883 size_t generated_shady_object_count_types[RUBY_T_MASK];
884 size_t shade_operation_count_types[RUBY_T_MASK];
885 size_t promoted_types[RUBY_T_MASK];
886 size_t remembered_normal_object_count_types[RUBY_T_MASK];
887 size_t remembered_shady_object_count_types[RUBY_T_MASK];
888#endif
889#endif /* RGENGC_PROFILE */
890
891 /* temporary profiling space */
892 double gc_sweep_start_time;
893 size_t total_allocated_objects_at_gc_start;
894 size_t heap_used_at_gc_start;
895
896 /* basic statistics */
897 size_t count;
898 uint64_t marking_time_ns;
899 struct timespec marking_start_time;
900 uint64_t sweeping_time_ns;
901 struct timespec sweeping_start_time;
902
903 /* Weak references */
904 size_t weak_references_count;
905 size_t retained_weak_references_count;
906 } profile;
907 struct gc_list *global_list;
908
909 VALUE gc_stress_mode;
910
911 struct {
912 VALUE parent_object;
913 int need_major_gc;
914 size_t last_major_gc;
915 size_t uncollectible_wb_unprotected_objects;
916 size_t uncollectible_wb_unprotected_objects_limit;
917 size_t old_objects;
918 size_t old_objects_limit;
919
920#if RGENGC_ESTIMATE_OLDMALLOC
921 size_t oldmalloc_increase;
922 size_t oldmalloc_increase_limit;
923#endif
924
925#if RGENGC_CHECK_MODE >= 2
926 struct st_table *allrefs_table;
927 size_t error_count;
928#endif
929 } rgengc;
930
931 struct {
932 size_t considered_count_table[T_MASK];
933 size_t moved_count_table[T_MASK];
934 size_t moved_up_count_table[T_MASK];
935 size_t moved_down_count_table[T_MASK];
936 size_t total_moved;
937
938 /* This function will be used, if set, to sort the heap prior to compaction */
939 gc_compact_compare_func compare_func;
940 } rcompactor;
941
942 struct {
943 size_t pooled_slots;
944 size_t step_slots;
945 } rincgc;
946
947 st_table *id_to_obj_tbl;
948 st_table *obj_to_id_tbl;
949
950#if GC_DEBUG_STRESS_TO_CLASS
951 VALUE stress_to_class;
952#endif
953
954 rb_darray(VALUE *) weak_references;
955 rb_postponed_job_handle_t finalize_deferred_pjob;
957
958
959#ifndef HEAP_PAGE_ALIGN_LOG
960/* default tiny heap size: 64KiB */
961#define HEAP_PAGE_ALIGN_LOG 16
962#endif
963
964#define BASE_SLOT_SIZE sizeof(RVALUE)
965
966#define CEILDIV(i, mod) roomof(i, mod)
967enum {
968 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
969 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
970 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
971 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE),
972 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
973 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
974};
975#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
976#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
977
978#if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
979# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
980#endif
981
982#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
983/* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
984 * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
985
986#ifndef HAVE_MMAP
987/* We can't use mmap of course, if it is not available. */
988static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
989
990#elif defined(__wasm__)
991/* wasmtime does not have proper support for mmap.
992 * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
993 */
994static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
995
996#elif HAVE_CONST_PAGE_SIZE
997/* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
998static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
999
1000#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
1001/* If we can use the maximum page size. */
1002static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
1003
1004#elif defined(PAGE_SIZE)
1005/* If the PAGE_SIZE macro can be used dynamically. */
1006# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
1007
1008#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
1009/* If we can use sysconf to determine the page size. */
1010# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
1011
1012#else
1013/* Otherwise we can't determine the system page size, so don't use mmap. */
1014static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
1015#endif
1016
1017#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
1018/* We can determine the system page size at runtime. */
1019# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
1020
1021static bool heap_page_alloc_use_mmap;
1022#endif
1023
1024#define RVALUE_AGE_BIT_COUNT 2
1025#define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
1026
1028 short slot_size;
1029 short total_slots;
1030 short free_slots;
1031 short final_slots;
1032 short pinned_slots;
1033 struct {
1034 unsigned int before_sweep : 1;
1035 unsigned int has_remembered_objects : 1;
1036 unsigned int has_uncollectible_wb_unprotected_objects : 1;
1037 unsigned int in_tomb : 1;
1038 } flags;
1039
1040 rb_size_pool_t *size_pool;
1041
1042 struct heap_page *free_next;
1043 uintptr_t start;
1044 RVALUE *freelist;
1045 struct ccan_list_node page_node;
1046
1047 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
1048 /* the following three bitmaps are cleared at the beginning of full GC */
1049 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
1050 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
1051 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
1052
1053 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
1054
1055 /* If set, the object is not movable */
1056 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
1057 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
1058};
1059
1060/*
1061 * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
1062 */
1063static void
1064asan_lock_freelist(struct heap_page *page)
1065{
1066 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1067}
1068
1069/*
1070 * When asan is enabled, this will enable the ability to write to the freelist
1071 */
1072static void
1073asan_unlock_freelist(struct heap_page *page)
1074{
1075 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1076}
1077
1078#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
1079#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
1080#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
1081
1082#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
1083#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
1084#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
1085#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1086
1087/* Bitmap Operations */
1088#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1089#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1090#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1091
1092/* getting bitmap */
1093#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1094#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1095#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1096#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1097#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1098
1099#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1100
1101#define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
1102#define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
1103
1104#define RVALUE_OLD_AGE 3
1105
1106static int
1107RVALUE_AGE_GET(VALUE obj)
1108{
1109 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1110 return (int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
1111}
1112
1113static void
1114RVALUE_AGE_SET(VALUE obj, int age)
1115{
1116 RUBY_ASSERT(age <= RVALUE_OLD_AGE);
1117 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
1118 // clear the bits
1119 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
1120 // shift the correct value in
1121 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
1122 if (age == RVALUE_OLD_AGE) {
1124 }
1125 else {
1127 }
1128}
1129
1130/* Aliases */
1131#define rb_objspace (*rb_objspace_of(GET_VM()))
1132#define rb_objspace_of(vm) ((vm)->objspace)
1133#define unless_objspace(objspace) \
1134 rb_objspace_t *objspace; \
1135 rb_vm_t *unless_objspace_vm = GET_VM(); \
1136 if (unless_objspace_vm) objspace = unless_objspace_vm->objspace; \
1137 else /* return; or objspace will be warned uninitialized */
1138
1139#define ruby_initial_gc_stress gc_params.gc_stress
1140
1141VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1142
1143#define malloc_limit objspace->malloc_params.limit
1144#define malloc_increase objspace->malloc_params.increase
1145#define malloc_allocated_size objspace->malloc_params.allocated_size
1146#define heap_pages_sorted objspace->heap_pages.sorted
1147#define heap_allocated_pages objspace->heap_pages.allocated_pages
1148#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1149#define heap_pages_lomem objspace->heap_pages.range[0]
1150#define heap_pages_himem objspace->heap_pages.range[1]
1151#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1152#define heap_pages_final_slots objspace->heap_pages.final_slots
1153#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1154#define size_pools objspace->size_pools
1155#define during_gc objspace->flags.during_gc
1156#define finalizing objspace->atomic_flags.finalizing
1157#define finalizer_table objspace->finalizer_table
1158#define global_list objspace->global_list
1159#define ruby_gc_stressful objspace->flags.gc_stressful
1160#define ruby_gc_stress_mode objspace->gc_stress_mode
1161#if GC_DEBUG_STRESS_TO_CLASS
1162#define stress_to_class objspace->stress_to_class
1163#define set_stress_to_class(c) (stress_to_class = (c))
1164#else
1165#define stress_to_class (objspace, 0)
1166#define set_stress_to_class(c) (objspace, (c))
1167#endif
1168
1169#if 0
1170#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1171#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1172#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1173#define dont_gc_val() (objspace->flags.dont_gc)
1174#else
1175#define dont_gc_on() (objspace->flags.dont_gc = 1)
1176#define dont_gc_off() (objspace->flags.dont_gc = 0)
1177#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1178#define dont_gc_val() (objspace->flags.dont_gc)
1179#endif
1180
1181static inline enum gc_mode
1182gc_mode_verify(enum gc_mode mode)
1183{
1184#if RGENGC_CHECK_MODE > 0
1185 switch (mode) {
1186 case gc_mode_none:
1187 case gc_mode_marking:
1188 case gc_mode_sweeping:
1189 case gc_mode_compacting:
1190 break;
1191 default:
1192 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
1193 }
1194#endif
1195 return mode;
1196}
1197
1198static inline bool
1199has_sweeping_pages(rb_objspace_t *objspace)
1200{
1201 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1202 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1203 return TRUE;
1204 }
1205 }
1206 return FALSE;
1207}
1208
1209static inline size_t
1210heap_eden_total_pages(rb_objspace_t *objspace)
1211{
1212 size_t count = 0;
1213 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1214 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1215 }
1216 return count;
1217}
1218
1219static inline size_t
1220heap_eden_total_slots(rb_objspace_t *objspace)
1221{
1222 size_t count = 0;
1223 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1224 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1225 }
1226 return count;
1227}
1228
1229static inline size_t
1230heap_tomb_total_pages(rb_objspace_t *objspace)
1231{
1232 size_t count = 0;
1233 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1234 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1235 }
1236 return count;
1237}
1238
1239static inline size_t
1240heap_allocatable_pages(rb_objspace_t *objspace)
1241{
1242 size_t count = 0;
1243 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1244 count += size_pools[i].allocatable_pages;
1245 }
1246 return count;
1247}
1248
1249static inline size_t
1250heap_allocatable_slots(rb_objspace_t *objspace)
1251{
1252 size_t count = 0;
1253 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1254 rb_size_pool_t *size_pool = &size_pools[i];
1255 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1256 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1257 }
1258 return count;
1259}
1260
1261static inline size_t
1262total_allocated_pages(rb_objspace_t *objspace)
1263{
1264 size_t count = 0;
1265 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1266 rb_size_pool_t *size_pool = &size_pools[i];
1267 count += size_pool->total_allocated_pages;
1268 }
1269 return count;
1270}
1271
1272static inline size_t
1273total_freed_pages(rb_objspace_t *objspace)
1274{
1275 size_t count = 0;
1276 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1277 rb_size_pool_t *size_pool = &size_pools[i];
1278 count += size_pool->total_freed_pages;
1279 }
1280 return count;
1281}
1282
1283static inline size_t
1284total_allocated_objects(rb_objspace_t *objspace)
1285{
1286 size_t count = 0;
1287 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1288 rb_size_pool_t *size_pool = &size_pools[i];
1289 count += size_pool->total_allocated_objects;
1290 }
1291 return count;
1292}
1293
1294static inline size_t
1295total_freed_objects(rb_objspace_t *objspace)
1296{
1297 size_t count = 0;
1298 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1299 rb_size_pool_t *size_pool = &size_pools[i];
1300 count += size_pool->total_freed_objects;
1301 }
1302 return count;
1303}
1304
1305#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1306#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1307
1308#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1309#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1310#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1311#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1312#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1313#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1314#define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
1315#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1316
1317#if SIZEOF_LONG == SIZEOF_VOIDP
1318# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1319#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1320# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1321 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1322#else
1323# error not supported
1324#endif
1325
1326#define RANY(o) ((RVALUE*)(o))
1327
1328struct RZombie {
1329 struct RBasic basic;
1330 VALUE next;
1331 void (*dfree)(void *);
1332 void *data;
1333};
1334
1335#define RZOMBIE(o) ((struct RZombie *)(o))
1336
1337#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1338
1339#if RUBY_MARK_FREE_DEBUG
1340int ruby_gc_debug_indent = 0;
1341#endif
1343int ruby_disable_gc = 0;
1344int ruby_enable_autocompact = 0;
1345#if RGENGC_CHECK_MODE
1346gc_compact_compare_func ruby_autocompact_compare_func;
1347#endif
1348
1349void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool referece_updating);
1350void rb_iseq_free(const rb_iseq_t *iseq);
1351size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1352void rb_vm_update_references(void *ptr);
1353
1354void rb_gcdebug_print_obj_condition(VALUE obj);
1355
1356NORETURN(static void *gc_vraise(void *ptr));
1357NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
1358NORETURN(static void negative_size_allocation_error(const char *));
1359
1360static void init_mark_stack(mark_stack_t *stack);
1361static int garbage_collect(rb_objspace_t *, unsigned int reason);
1362
1363static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1364static void gc_rest(rb_objspace_t *objspace);
1365
1366enum gc_enter_event {
1367 gc_enter_event_start,
1368 gc_enter_event_continue,
1369 gc_enter_event_rest,
1370 gc_enter_event_finalizer,
1371 gc_enter_event_rb_memerror,
1372};
1373
1374static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1375static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1376static void gc_marking_enter(rb_objspace_t *objspace);
1377static void gc_marking_exit(rb_objspace_t *objspace);
1378static void gc_sweeping_enter(rb_objspace_t *objspace);
1379static void gc_sweeping_exit(rb_objspace_t *objspace);
1380static bool gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1381
1382static void gc_sweep(rb_objspace_t *objspace);
1383static void gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool);
1384static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1385
1386static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1387static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1388static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1389NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1390
1391static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1392NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1393
1394static size_t obj_memsize_of(VALUE obj, int use_all_types);
1395static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1396
1397static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1398static VALUE gc_disable_no_rest(rb_objspace_t *);
1399
1400static double getrusage_time(void);
1401static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1402static inline void gc_prof_timer_start(rb_objspace_t *);
1403static inline void gc_prof_timer_stop(rb_objspace_t *);
1404static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1405static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1406static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1407static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1408static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1409static inline void gc_prof_set_heap_info(rb_objspace_t *);
1410
1411#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1412 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1413 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1414 } \
1415} while (0)
1416
1417#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1418
1419#define gc_prof_record(objspace) (objspace)->profile.current_record
1420#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1421
1422#ifdef HAVE_VA_ARGS_MACRO
1423# define gc_report(level, objspace, ...) \
1424 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1425#else
1426# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1427#endif
1428PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1429static const char *obj_info(VALUE obj);
1430static const char *obj_type_name(VALUE obj);
1431
1432static void gc_finalize_deferred(void *dmy);
1433
1434/*
1435 * 1 - TSC (H/W Time Stamp Counter)
1436 * 2 - getrusage
1437 */
1438#ifndef TICK_TYPE
1439#define TICK_TYPE 1
1440#endif
1441
1442#if USE_TICK_T
1443
1444#if TICK_TYPE == 1
1445/* the following code is only for internal tuning. */
1446
1447/* Source code to use RDTSC is quoted and modified from
1448 * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1449 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1450 */
1451
1452#if defined(__GNUC__) && defined(__i386__)
1453typedef unsigned long long tick_t;
1454#define PRItick "llu"
1455static inline tick_t
1456tick(void)
1457{
1458 unsigned long long int x;
1459 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1460 return x;
1461}
1462
1463#elif defined(__GNUC__) && defined(__x86_64__)
1464typedef unsigned long long tick_t;
1465#define PRItick "llu"
1466
1467static __inline__ tick_t
1468tick(void)
1469{
1470 unsigned long hi, lo;
1471 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1472 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1473}
1474
1475#elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1476typedef unsigned long long tick_t;
1477#define PRItick "llu"
1478
1479static __inline__ tick_t
1480tick(void)
1481{
1482 unsigned long long val = __builtin_ppc_get_timebase();
1483 return val;
1484}
1485
1486/* Implementation for macOS PPC by @nobu
1487 * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1488 */
1489#elif defined(__POWERPC__) && defined(__APPLE__)
1490typedef unsigned long long tick_t;
1491#define PRItick "llu"
1492
1493static __inline__ tick_t
1494tick(void)
1495{
1496 unsigned long int upper, lower, tmp;
1497 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1498 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1499 do {
1500 mftbu(upper);
1501 mftb(lower);
1502 mftbu(tmp);
1503 } while (tmp != upper);
1504 return ((tick_t)upper << 32) | lower;
1505}
1506
1507#elif defined(__aarch64__) && defined(__GNUC__)
1508typedef unsigned long tick_t;
1509#define PRItick "lu"
1510
1511static __inline__ tick_t
1512tick(void)
1513{
1514 unsigned long val;
1515 __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1516 return val;
1517}
1518
1519
1520#elif defined(_WIN32) && defined(_MSC_VER)
1521#include <intrin.h>
1522typedef unsigned __int64 tick_t;
1523#define PRItick "llu"
1524
1525static inline tick_t
1526tick(void)
1527{
1528 return __rdtsc();
1529}
1530
1531#else /* use clock */
1532typedef clock_t tick_t;
1533#define PRItick "llu"
1534
1535static inline tick_t
1536tick(void)
1537{
1538 return clock();
1539}
1540#endif /* TSC */
1541
1542#elif TICK_TYPE == 2
1543typedef double tick_t;
1544#define PRItick "4.9f"
1545
1546static inline tick_t
1547tick(void)
1548{
1549 return getrusage_time();
1550}
1551#else /* TICK_TYPE */
1552#error "choose tick type"
1553#endif /* TICK_TYPE */
1554
1555#define MEASURE_LINE(expr) do { \
1556 volatile tick_t start_time = tick(); \
1557 volatile tick_t end_time; \
1558 expr; \
1559 end_time = tick(); \
1560 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1561} while (0)
1562
1563#else /* USE_TICK_T */
1564#define MEASURE_LINE(expr) expr
1565#endif /* USE_TICK_T */
1566
1567static inline void *
1568asan_unpoison_object_temporary(VALUE obj)
1569{
1570 void *ptr = asan_poisoned_object_p(obj);
1571 asan_unpoison_object(obj, false);
1572 return ptr;
1573}
1574
1575static inline void *
1576asan_poison_object_restore(VALUE obj, void *ptr)
1577{
1578 if (ptr) {
1579 asan_poison_object(obj);
1580 }
1581 return NULL;
1582}
1583
1584#define asan_unpoisoning_object(obj) \
1585 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1586 *unpoisoning = &poisoned; /* flag to loop just once */ \
1587 unpoisoning; \
1588 unpoisoning = asan_poison_object_restore(obj, poisoned))
1589
1590#define FL_CHECK2(name, x, pred) \
1591 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1592 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1593#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1594#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1595#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1596
1597#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1598#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1599#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1600
1601#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1602#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1603#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1604
1605#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1606#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1607#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1608
1609static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1610static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1611static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1612
1613static int
1614check_rvalue_consistency_force(const VALUE obj, int terminate)
1615{
1616 int err = 0;
1617 rb_objspace_t *objspace = &rb_objspace;
1618
1619 RB_VM_LOCK_ENTER_NO_BARRIER();
1620 {
1621 if (SPECIAL_CONST_P(obj)) {
1622 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1623 err++;
1624 }
1625 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1626 /* check if it is in tomb_pages */
1627 struct heap_page *page = NULL;
1628 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1629 rb_size_pool_t *size_pool = &size_pools[i];
1630 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1631 if (page->start <= (uintptr_t)obj &&
1632 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1633 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1634 (void *)obj, (void *)page);
1635 err++;
1636 goto skip;
1637 }
1638 }
1639 }
1640 bp();
1641 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1642 err++;
1643 skip:
1644 ;
1645 }
1646 else {
1647 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1648 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1649 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1650 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1651 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1652 const int age = RVALUE_AGE_GET((VALUE)obj);
1653
1654 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1655 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1656 err++;
1657 }
1658 if (BUILTIN_TYPE(obj) == T_NONE) {
1659 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1660 err++;
1661 }
1662 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1663 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1664 err++;
1665 }
1666
1667 obj_memsize_of((VALUE)obj, FALSE);
1668
1669 /* check generation
1670 *
1671 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1672 */
1673 if (age > 0 && wb_unprotected_bit) {
1674 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1675 err++;
1676 }
1677
1678 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1679 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1680 err++;
1681 }
1682
1683 if (!is_full_marking(objspace)) {
1684 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1685 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1686 obj_info(obj), age);
1687 err++;
1688 }
1689 if (remembered_bit && age != RVALUE_OLD_AGE) {
1690 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1691 obj_info(obj), age);
1692 err++;
1693 }
1694 }
1695
1696 /*
1697 * check coloring
1698 *
1699 * marking:false marking:true
1700 * marked:false white *invalid*
1701 * marked:true black grey
1702 */
1703 if (is_incremental_marking(objspace) && marking_bit) {
1704 if (!is_marking(objspace) && !mark_bit) {
1705 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1706 err++;
1707 }
1708 }
1709 }
1710 }
1711 RB_VM_LOCK_LEAVE_NO_BARRIER();
1712
1713 if (err > 0 && terminate) {
1714 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1715 }
1716 return err;
1717}
1718
1719#if RGENGC_CHECK_MODE == 0
1720static inline VALUE
1721check_rvalue_consistency(const VALUE obj)
1722{
1723 return obj;
1724}
1725#else
1726static VALUE
1727check_rvalue_consistency(const VALUE obj)
1728{
1729 check_rvalue_consistency_force(obj, TRUE);
1730 return obj;
1731}
1732#endif
1733
1734static inline int
1735gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1736{
1737 if (RB_SPECIAL_CONST_P(obj)) {
1738 return FALSE;
1739 }
1740 else {
1741 void *poisoned = asan_unpoison_object_temporary(obj);
1742
1743 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1744 /* Re-poison slot if it's not the one we want */
1745 if (poisoned) {
1746 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1747 asan_poison_object(obj);
1748 }
1749 return ret;
1750 }
1751}
1752
1753static inline int
1754RVALUE_MARKED(VALUE obj)
1755{
1756 check_rvalue_consistency(obj);
1757 return RVALUE_MARK_BITMAP(obj) != 0;
1758}
1759
1760static inline int
1761RVALUE_PINNED(VALUE obj)
1762{
1763 check_rvalue_consistency(obj);
1764 return RVALUE_PIN_BITMAP(obj) != 0;
1765}
1766
1767static inline int
1768RVALUE_WB_UNPROTECTED(VALUE obj)
1769{
1770 check_rvalue_consistency(obj);
1771 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1772}
1773
1774static inline int
1775RVALUE_MARKING(VALUE obj)
1776{
1777 check_rvalue_consistency(obj);
1778 return RVALUE_MARKING_BITMAP(obj) != 0;
1779}
1780
1781static inline int
1782RVALUE_REMEMBERED(VALUE obj)
1783{
1784 check_rvalue_consistency(obj);
1785 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1786}
1787
1788static inline int
1789RVALUE_UNCOLLECTIBLE(VALUE obj)
1790{
1791 check_rvalue_consistency(obj);
1792 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1793}
1794
1795static inline int
1796RVALUE_OLD_P(VALUE obj)
1797{
1798 GC_ASSERT(!RB_SPECIAL_CONST_P(obj));
1799 check_rvalue_consistency(obj);
1800 // Because this will only ever be called on GC controlled objects,
1801 // we can use the faster _RAW function here
1802 return RB_OBJ_PROMOTED_RAW(obj);
1803}
1804
1805static inline void
1806RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1807{
1808 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1809 objspace->rgengc.old_objects++;
1810
1811#if RGENGC_PROFILE >= 2
1812 objspace->profile.total_promoted_count++;
1813 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1814#endif
1815}
1816
1817static inline void
1818RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1819{
1820 RB_DEBUG_COUNTER_INC(obj_promote);
1821 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1822}
1823
1824/* set age to age+1 */
1825static inline void
1826RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1827{
1828 int age = RVALUE_AGE_GET((VALUE)obj);
1829
1830 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1831 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1832 }
1833
1834 age++;
1835 RVALUE_AGE_SET(obj, age);
1836
1837 if (age == RVALUE_OLD_AGE) {
1838 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1839 }
1840
1841 check_rvalue_consistency(obj);
1842}
1843
1844static inline void
1845RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1846{
1847 check_rvalue_consistency(obj);
1848 GC_ASSERT(!RVALUE_OLD_P(obj));
1849 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1850 check_rvalue_consistency(obj);
1851}
1852
1853static inline void
1854RVALUE_AGE_RESET(VALUE obj)
1855{
1856 RVALUE_AGE_SET(obj, 0);
1857}
1858
1859static inline void
1860RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1861{
1862 check_rvalue_consistency(obj);
1863 GC_ASSERT(RVALUE_OLD_P(obj));
1864
1865 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1866 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1867 }
1868
1869 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1870 RVALUE_AGE_RESET(obj);
1871
1872 if (RVALUE_MARKED(obj)) {
1873 objspace->rgengc.old_objects--;
1874 }
1875
1876 check_rvalue_consistency(obj);
1877}
1878
1879static inline int
1880RVALUE_BLACK_P(VALUE obj)
1881{
1882 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1883}
1884
1885#if 0
1886static inline int
1887RVALUE_GREY_P(VALUE obj)
1888{
1889 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1890}
1891#endif
1892
1893static inline int
1894RVALUE_WHITE_P(VALUE obj)
1895{
1896 return RVALUE_MARKED(obj) == FALSE;
1897}
1898
1899/*
1900 --------------------------- ObjectSpace -----------------------------
1901*/
1902
1903static inline void *
1904calloc1(size_t n)
1905{
1906 return calloc(1, n);
1907}
1908
1910rb_objspace_alloc(void)
1911{
1912 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1913 objspace->flags.measure_gc = 1;
1914 malloc_limit = gc_params.malloc_limit_min;
1915 objspace->finalize_deferred_pjob = rb_postponed_job_preregister(0, gc_finalize_deferred, objspace);
1916 if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
1917 rb_bug("Could not preregister postponed job for GC");
1918 }
1919
1920 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1921 rb_size_pool_t *size_pool = &size_pools[i];
1922
1923 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1924
1925 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1926 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1927 }
1928
1929 rb_darray_make_without_gc(&objspace->weak_references, 0);
1930
1931 dont_gc_on();
1932
1933 return objspace;
1934}
1935
1936static void free_stack_chunks(mark_stack_t *);
1937static void mark_stack_free_cache(mark_stack_t *);
1938static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1939
1940void
1941rb_objspace_free(rb_objspace_t *objspace)
1942{
1943 if (is_lazy_sweeping(objspace))
1944 rb_bug("lazy sweeping underway when freeing object space");
1945
1946 free(objspace->profile.records);
1947 objspace->profile.records = NULL;
1948
1949 if (global_list) {
1950 struct gc_list *list, *next;
1951 for (list = global_list; list; list = next) {
1952 next = list->next;
1953 xfree(list);
1954 }
1955 }
1956 if (heap_pages_sorted) {
1957 size_t i;
1958 size_t total_heap_pages = heap_allocated_pages;
1959 for (i = 0; i < total_heap_pages; ++i) {
1960 heap_page_free(objspace, heap_pages_sorted[i]);
1961 }
1962 free(heap_pages_sorted);
1963 heap_allocated_pages = 0;
1964 heap_pages_sorted_length = 0;
1965 heap_pages_lomem = 0;
1966 heap_pages_himem = 0;
1967
1968 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1969 rb_size_pool_t *size_pool = &size_pools[i];
1970 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1971 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1972 }
1973 }
1974 st_free_table(objspace->id_to_obj_tbl);
1975 st_free_table(objspace->obj_to_id_tbl);
1976
1977 free_stack_chunks(&objspace->mark_stack);
1978 mark_stack_free_cache(&objspace->mark_stack);
1979
1980 rb_darray_free_without_gc(objspace->weak_references);
1981
1982 free(objspace);
1983}
1984
1985static void
1986heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1987{
1988 struct heap_page **sorted;
1989 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1990
1991 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1992 next_length, size);
1993
1994 if (heap_pages_sorted_length > 0) {
1995 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1996 if (sorted) heap_pages_sorted = sorted;
1997 }
1998 else {
1999 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
2000 }
2001
2002 if (sorted == 0) {
2003 rb_memerror();
2004 }
2005
2006 heap_pages_sorted_length = next_length;
2007}
2008
2009static void
2010heap_pages_expand_sorted(rb_objspace_t *objspace)
2011{
2012 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
2013 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
2014 * however, if there are pages which do not have empty slots, then try to create new pages
2015 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
2016 */
2017 size_t next_length = heap_allocatable_pages(objspace);
2018 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
2019 rb_size_pool_t *size_pool = &size_pools[i];
2020 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
2021 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
2022 }
2023
2024 if (next_length > heap_pages_sorted_length) {
2025 heap_pages_expand_sorted_to(objspace, next_length);
2026 }
2027
2028 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2029 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2030}
2031
2032static void
2033size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
2034{
2035 size_pool->allocatable_pages = s;
2036 heap_pages_expand_sorted(objspace);
2037}
2038
2039static inline void
2040heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
2041{
2042 ASSERT_vm_locking();
2043
2044 RVALUE *p = (RVALUE *)obj;
2045
2046 asan_unpoison_object(obj, false);
2047
2048 asan_unlock_freelist(page);
2049
2050 p->as.free.flags = 0;
2051 p->as.free.next = page->freelist;
2052 page->freelist = p;
2053 asan_lock_freelist(page);
2054
2055 RVALUE_AGE_RESET(obj);
2056
2057 if (RGENGC_CHECK_MODE &&
2058 /* obj should belong to page */
2059 !(page->start <= (uintptr_t)obj &&
2060 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2061 obj % BASE_SLOT_SIZE == 0)) {
2062 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
2063 }
2064
2065 asan_poison_object(obj);
2066 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
2067}
2068
2069static inline void
2070heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
2071{
2072 asan_unlock_freelist(page);
2073 GC_ASSERT(page->free_slots != 0);
2074 GC_ASSERT(page->freelist != NULL);
2075
2076 page->free_next = heap->free_pages;
2077 heap->free_pages = page;
2078
2079 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
2080
2081 asan_lock_freelist(page);
2082}
2083
2084static inline void
2085heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2086{
2087 asan_unlock_freelist(page);
2088 GC_ASSERT(page->free_slots != 0);
2089 GC_ASSERT(page->freelist != NULL);
2090
2091 page->free_next = heap->pooled_pages;
2092 heap->pooled_pages = page;
2093 objspace->rincgc.pooled_slots += page->free_slots;
2094
2095 asan_lock_freelist(page);
2096}
2097
2098static void
2099heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2100{
2101 ccan_list_del(&page->page_node);
2102 heap->total_pages--;
2103 heap->total_slots -= page->total_slots;
2104}
2105
2106static void rb_aligned_free(void *ptr, size_t size);
2107
2108static void
2109heap_page_body_free(struct heap_page_body *page_body)
2110{
2111 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2112
2113 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2114#ifdef HAVE_MMAP
2115 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2116 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2117 rb_bug("heap_page_body_free: munmap failed");
2118 }
2119#endif
2120 }
2121 else {
2122 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2123 }
2124}
2125
2126static void
2127heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
2128{
2129 heap_allocated_pages--;
2130 page->size_pool->total_freed_pages++;
2131 heap_page_body_free(GET_PAGE_BODY(page->start));
2132 free(page);
2133}
2134
2135static void
2136heap_pages_free_unused_pages(rb_objspace_t *objspace)
2137{
2138 size_t i, j;
2139
2140 bool has_pages_in_tomb_heap = FALSE;
2141 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2142 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2143 has_pages_in_tomb_heap = TRUE;
2144 break;
2145 }
2146 }
2147
2148 if (has_pages_in_tomb_heap) {
2149 for (i = j = 0; j < heap_allocated_pages; i++) {
2150 struct heap_page *page = heap_pages_sorted[i];
2151
2152 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2153 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2154 heap_page_free(objspace, page);
2155 }
2156 else {
2157 if (i != j) {
2158 heap_pages_sorted[j] = page;
2159 }
2160 j++;
2161 }
2162 }
2163
2164 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2165 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2166 GC_ASSERT(himem <= heap_pages_himem);
2167 heap_pages_himem = himem;
2168
2169 struct heap_page *lopage = heap_pages_sorted[0];
2170 uintptr_t lomem = (uintptr_t)lopage->start;
2171 GC_ASSERT(lomem >= heap_pages_lomem);
2172 heap_pages_lomem = lomem;
2173
2174 GC_ASSERT(j == heap_allocated_pages);
2175 }
2176}
2177
2178static struct heap_page_body *
2179heap_page_body_allocate(void)
2180{
2181 struct heap_page_body *page_body;
2182
2183 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2184#ifdef HAVE_MMAP
2185 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2186
2187 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2188 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2189 if (ptr == MAP_FAILED) {
2190 return NULL;
2191 }
2192
2193 char *aligned = ptr + HEAP_PAGE_ALIGN;
2194 aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2195 GC_ASSERT(aligned > ptr);
2196 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2197
2198 size_t start_out_of_range_size = aligned - ptr;
2199 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2200 if (start_out_of_range_size > 0) {
2201 if (munmap(ptr, start_out_of_range_size)) {
2202 rb_bug("heap_page_body_allocate: munmap failed for start");
2203 }
2204 }
2205
2206 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2207 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2208 if (end_out_of_range_size > 0) {
2209 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2210 rb_bug("heap_page_body_allocate: munmap failed for end");
2211 }
2212 }
2213
2214 page_body = (struct heap_page_body *)aligned;
2215#endif
2216 }
2217 else {
2218 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2219 }
2220
2221 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2222
2223 return page_body;
2224}
2225
2226static struct heap_page *
2227heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2228{
2229 uintptr_t start, end, p;
2230 struct heap_page *page;
2231 uintptr_t hi, lo, mid;
2232 size_t stride = size_pool->slot_size;
2233 unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
2234
2235 /* assign heap_page body (contains heap_page_header and RVALUEs) */
2236 struct heap_page_body *page_body = heap_page_body_allocate();
2237 if (page_body == 0) {
2238 rb_memerror();
2239 }
2240
2241 /* assign heap_page entry */
2242 page = calloc1(sizeof(struct heap_page));
2243 if (page == 0) {
2244 heap_page_body_free(page_body);
2245 rb_memerror();
2246 }
2247
2248 /* adjust obj_limit (object number available in this page) */
2249 start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
2250
2251 if (start % BASE_SLOT_SIZE != 0) {
2252 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2253 start = start + delta;
2254 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2255
2256 /* Find a num in page that is evenly divisible by `stride`.
2257 * This is to ensure that objects are aligned with bit planes.
2258 * In other words, ensure there are an even number of objects
2259 * per bit plane. */
2260 if (NUM_IN_PAGE(start) == 1) {
2261 start += stride - BASE_SLOT_SIZE;
2262 }
2263
2264 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2265
2266 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2267 }
2268 end = start + (limit * (int)stride);
2269
2270 /* setup heap_pages_sorted */
2271 lo = 0;
2272 hi = (uintptr_t)heap_allocated_pages;
2273 while (lo < hi) {
2274 struct heap_page *mid_page;
2275
2276 mid = (lo + hi) / 2;
2277 mid_page = heap_pages_sorted[mid];
2278 if ((uintptr_t)mid_page->start < start) {
2279 lo = mid + 1;
2280 }
2281 else if ((uintptr_t)mid_page->start > start) {
2282 hi = mid;
2283 }
2284 else {
2285 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2286 }
2287 }
2288
2289 if (hi < (uintptr_t)heap_allocated_pages) {
2290 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
2291 }
2292
2293 heap_pages_sorted[hi] = page;
2294
2295 heap_allocated_pages++;
2296
2297 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2298 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2299 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2300
2301 size_pool->total_allocated_pages++;
2302
2303 if (heap_allocated_pages > heap_pages_sorted_length) {
2304 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
2305 heap_allocated_pages, heap_pages_sorted_length);
2306 }
2307
2308 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2309 if (heap_pages_himem < end) heap_pages_himem = end;
2310
2311 page->start = start;
2312 page->total_slots = limit;
2313 page->slot_size = size_pool->slot_size;
2314 page->size_pool = size_pool;
2315 page_body->header.page = page;
2316
2317 for (p = start; p != end; p += stride) {
2318 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
2319 heap_page_add_freeobj(objspace, page, (VALUE)p);
2320 }
2321 page->free_slots = limit;
2322
2323 asan_lock_freelist(page);
2324 return page;
2325}
2326
2327static struct heap_page *
2328heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2329{
2330 struct heap_page *page = 0, *next;
2331
2332 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2333 asan_unlock_freelist(page);
2334 if (page->freelist != NULL) {
2335 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2336 asan_lock_freelist(page);
2337 return page;
2338 }
2339 }
2340
2341 return NULL;
2342}
2343
2344static struct heap_page *
2345heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2346{
2347 struct heap_page *page;
2348 const char *method = "recycle";
2349
2350 size_pool->allocatable_pages--;
2351
2352 page = heap_page_resurrect(objspace, size_pool);
2353
2354 if (page == NULL) {
2355 page = heap_page_allocate(objspace, size_pool);
2356 method = "allocate";
2357 }
2358 if (0) fprintf(stderr, "heap_page_create: %s - %p, "
2359 "heap_allocated_pages: %"PRIdSIZE", "
2360 "heap_allocated_pages: %"PRIdSIZE", "
2361 "tomb->total_pages: %"PRIdSIZE"\n",
2362 method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2363 return page;
2364}
2365
2366static void
2367heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
2368{
2369 /* Adding to eden heap during incremental sweeping is forbidden */
2370 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2371 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2372 ccan_list_add_tail(&heap->pages, &page->page_node);
2373 heap->total_pages++;
2374 heap->total_slots += page->total_slots;
2375}
2376
2377static void
2378heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2379{
2380 struct heap_page *page = heap_page_create(objspace, size_pool);
2381 heap_add_page(objspace, size_pool, heap, page);
2382 heap_add_freepage(heap, page);
2383}
2384
2385#if GC_CAN_COMPILE_COMPACTION
2386static void
2387heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
2388{
2389 size_t i;
2390
2391 size_pool_allocatable_pages_set(objspace, size_pool, add);
2392
2393 for (i = 0; i < add; i++) {
2394 heap_assign_page(objspace, size_pool, heap);
2395 }
2396
2397 GC_ASSERT(size_pool->allocatable_pages == 0);
2398}
2399#endif
2400
2401static size_t
2402slots_to_pages_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t slots)
2403{
2404 size_t multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2405 /* Due to alignment, heap pages may have one less slot. We should
2406 * ensure there is enough pages to guarantee that we will have at
2407 * least the required number of slots after allocating all the pages. */
2408 size_t slots_per_page = (HEAP_PAGE_OBJ_LIMIT / multiple) - 1;
2409 return CEILDIV(slots, slots_per_page);
2410}
2411
2412static size_t
2413minimum_pages_for_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2414{
2415 size_t size_pool_idx = size_pool - size_pools;
2416 size_t init_slots = gc_params.size_pool_init_slots[size_pool_idx];
2417 return slots_to_pages_for_size_pool(objspace, size_pool, init_slots);
2418}
2419
2420static size_t
2421heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t free_slots, size_t total_slots, size_t used)
2422{
2423 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2424 size_t next_used;
2425
2426 if (goal_ratio == 0.0) {
2427 next_used = (size_t)(used * gc_params.growth_factor);
2428 }
2429 else if (total_slots == 0) {
2430 next_used = minimum_pages_for_size_pool(objspace, size_pool);
2431 }
2432 else {
2433 /* Find `f' where free_slots = f * total_slots * goal_ratio
2434 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2435 */
2436 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2437
2438 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2439 if (f < 1.0) f = 1.1;
2440
2441 next_used = (size_t)(f * used);
2442
2443 if (0) {
2444 fprintf(stderr,
2445 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
2446 " G(%1.2f), f(%1.2f),"
2447 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
2448 free_slots, total_slots, free_slots/(double)total_slots,
2449 goal_ratio, f, used, next_used);
2450 }
2451 }
2452
2453 if (gc_params.growth_max_slots > 0) {
2454 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2455 if (next_used > max_used) next_used = max_used;
2456 }
2457
2458 size_t extend_page_count = next_used - used;
2459 /* Extend by at least 1 page. */
2460 if (extend_page_count == 0) extend_page_count = 1;
2461
2462 return extend_page_count;
2463}
2464
2465static int
2466heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2467{
2468 if (size_pool->allocatable_pages > 0) {
2469 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2470 "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2471 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2472
2473 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2474 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2475
2476 heap_assign_page(objspace, size_pool, heap);
2477 return TRUE;
2478 }
2479 return FALSE;
2480}
2481
2482static void
2483gc_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2484{
2485 unsigned int lock_lev;
2486 gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2487
2488 /* Continue marking if in incremental marking. */
2489 if (is_incremental_marking(objspace)) {
2490 if (gc_marks_continue(objspace, size_pool, heap)) {
2491 gc_sweep(objspace);
2492 }
2493 }
2494
2495 /* Continue sweeping if in lazy sweeping or the previous incremental
2496 * marking finished and did not yield a free page. */
2497 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2498 gc_sweep_continue(objspace, size_pool, heap);
2499 }
2500
2501 gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2502}
2503
2504static void
2505heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2506{
2507 GC_ASSERT(heap->free_pages == NULL);
2508
2509 /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2510 gc_continue(objspace, size_pool, heap);
2511
2512 /* If we still don't have a free page and not allowed to create a new page,
2513 * we should start a new GC cycle. */
2514 if (heap->free_pages == NULL &&
2515 (will_be_incremental_marking(objspace) ||
2516 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2517 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2518 rb_memerror();
2519 }
2520 else {
2521 /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2522 gc_continue(objspace, size_pool, heap);
2523
2524 /* If we're not incremental marking (e.g. a minor GC) or finished
2525 * sweeping and still don't have a free page, then
2526 * gc_sweep_finish_size_pool should allow us to create a new page. */
2527 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2528 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2529 rb_bug("cannot create a new page after GC");
2530 }
2531 else { // Major GC is required, which will allow us to create new page
2532 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2533 rb_memerror();
2534 }
2535 else {
2536 /* Do steps of incremental marking or lazy sweeping. */
2537 gc_continue(objspace, size_pool, heap);
2538
2539 if (heap->free_pages == NULL &&
2540 !heap_increment(objspace, size_pool, heap)) {
2541 rb_bug("cannot create a new page after major GC");
2542 }
2543 }
2544 }
2545 }
2546 }
2547 }
2548
2549 GC_ASSERT(heap->free_pages != NULL);
2550}
2551
2552void
2553rb_objspace_set_event_hook(const rb_event_flag_t event)
2554{
2555 rb_objspace_t *objspace = &rb_objspace;
2556 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2557 objspace->flags.has_newobj_hook = !!(objspace->hook_events & RUBY_INTERNAL_EVENT_NEWOBJ);
2558}
2559
2560static void
2561gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2562{
2563 if (UNLIKELY(!ec->cfp)) return;
2564 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2565}
2566
2567#define gc_event_newobj_hook_needed_p(objspace) ((objspace)->flags.has_newobj_hook)
2568#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2569
2570#define gc_event_hook_prep(objspace, event, data, prep) do { \
2571 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2572 prep; \
2573 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2574 } \
2575} while (0)
2576
2577#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2578
2579static inline VALUE
2580newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2581{
2582#if !__has_feature(memory_sanitizer)
2583 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2584 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2585#endif
2586 RVALUE *p = RANY(obj);
2587 p->as.basic.flags = flags;
2588 *((VALUE *)&p->as.basic.klass) = klass;
2589
2590 int t = flags & RUBY_T_MASK;
2591 if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
2592 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2593 }
2594
2595#if RACTOR_CHECK_MODE
2596 rb_ractor_setup_belonging(obj);
2597#endif
2598
2599#if RGENGC_CHECK_MODE
2600 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2601
2602 RB_VM_LOCK_ENTER_NO_BARRIER();
2603 {
2604 check_rvalue_consistency(obj);
2605
2606 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2607 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2608 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2609 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2610
2611 if (RVALUE_REMEMBERED((VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2612 }
2613 RB_VM_LOCK_LEAVE_NO_BARRIER();
2614#endif
2615
2616 if (UNLIKELY(wb_protected == FALSE)) {
2617 ASSERT_vm_locking();
2618 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2619 }
2620
2621#if RGENGC_PROFILE
2622 if (wb_protected) {
2623 objspace->profile.total_generated_normal_object_count++;
2624#if RGENGC_PROFILE >= 2
2625 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2626#endif
2627 }
2628 else {
2629 objspace->profile.total_generated_shady_object_count++;
2630#if RGENGC_PROFILE >= 2
2631 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2632#endif
2633 }
2634#endif
2635
2636#if GC_DEBUG
2637 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2638 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2639#endif
2640
2641 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2642
2643 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2644 return obj;
2645}
2646
2647size_t
2648rb_gc_obj_slot_size(VALUE obj)
2649{
2650 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2651}
2652
2653static inline size_t
2654size_pool_slot_size(unsigned char pool_id)
2655{
2656 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2657
2658 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2659
2660#if RGENGC_CHECK_MODE
2661 rb_objspace_t *objspace = &rb_objspace;
2662 GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size);
2663#endif
2664
2665 slot_size -= RVALUE_OVERHEAD;
2666
2667 return slot_size;
2668}
2669
2670size_t
2671rb_size_pool_slot_size(unsigned char pool_id)
2672{
2673 return size_pool_slot_size(pool_id);
2674}
2675
2676bool
2677rb_gc_size_allocatable_p(size_t size)
2678{
2679 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2680}
2681
2682static inline VALUE
2683ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2684 size_t size_pool_idx)
2685{
2686 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2687 RVALUE *p = size_pool_cache->freelist;
2688
2689 if (is_incremental_marking(objspace)) {
2690 // Not allowed to allocate without running an incremental marking step
2691 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2692 return Qfalse;
2693 }
2694
2695 if (p) {
2696 cache->incremental_mark_step_allocated_slots++;
2697 }
2698 }
2699
2700 if (p) {
2701 VALUE obj = (VALUE)p;
2702 MAYBE_UNUSED(const size_t) stride = size_pool_slot_size(size_pool_idx);
2703 size_pool_cache->freelist = p->as.free.next;
2704 asan_unpoison_memory_region(p, stride, true);
2705#if RGENGC_CHECK_MODE
2706 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2707 // zero clear
2708 MEMZERO((char *)obj, char, stride);
2709#endif
2710 return obj;
2711 }
2712 else {
2713 return Qfalse;
2714 }
2715}
2716
2717static struct heap_page *
2718heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2719{
2720 ASSERT_vm_locking();
2721
2722 struct heap_page *page;
2723
2724 if (heap->free_pages == NULL) {
2725 heap_prepare(objspace, size_pool, heap);
2726 }
2727
2728 page = heap->free_pages;
2729 heap->free_pages = page->free_next;
2730
2731 GC_ASSERT(page->free_slots != 0);
2732 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page, (void *)page->freelist, page->free_slots);
2733
2734 asan_unlock_freelist(page);
2735
2736 return page;
2737}
2738
2739static inline void
2740ractor_cache_set_page(rb_ractor_newobj_cache_t *cache, size_t size_pool_idx,
2741 struct heap_page *page)
2742{
2743 gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
2744
2745 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2746
2747 GC_ASSERT(size_pool_cache->freelist == NULL);
2748 GC_ASSERT(page->free_slots != 0);
2749 GC_ASSERT(page->freelist != NULL);
2750
2751 size_pool_cache->using_page = page;
2752 size_pool_cache->freelist = page->freelist;
2753 page->free_slots = 0;
2754 page->freelist = NULL;
2755
2756 asan_unpoison_object((VALUE)size_pool_cache->freelist, false);
2757 GC_ASSERT(RB_TYPE_P((VALUE)size_pool_cache->freelist, T_NONE));
2758 asan_poison_object((VALUE)size_pool_cache->freelist);
2759}
2760
2761static inline VALUE
2762newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2763{
2764 RVALUE *p = (RVALUE *)obj;
2765 p->as.values.v1 = v1;
2766 p->as.values.v2 = v2;
2767 p->as.values.v3 = v3;
2768 return obj;
2769}
2770
2771static inline size_t
2772size_pool_idx_for_size(size_t size)
2773{
2774 size += RVALUE_OVERHEAD;
2775
2776 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2777
2778 /* size_pool_idx is ceil(log2(slot_count)) */
2779 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2780
2781 if (size_pool_idx >= SIZE_POOL_COUNT) {
2782 rb_bug("size_pool_idx_for_size: allocation size too large "
2783 "(size=%"PRIuSIZE"u, size_pool_idx=%"PRIuSIZE"u)", size, size_pool_idx);
2784 }
2785
2786#if RGENGC_CHECK_MODE
2787 rb_objspace_t *objspace = &rb_objspace;
2788 GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
2789 if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
2790#endif
2791
2792 return size_pool_idx;
2793}
2794
2795static VALUE
2796newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, bool vm_locked)
2797{
2798 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
2799 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2800 rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;
2801
2802 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2803
2804 if (UNLIKELY(obj == Qfalse)) {
2805 unsigned int lev;
2806 bool unlock_vm = false;
2807
2808 if (!vm_locked) {
2809 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2810 vm_locked = true;
2811 unlock_vm = true;
2812 }
2813
2814 {
2815 ASSERT_vm_locking();
2816
2817 if (is_incremental_marking(objspace)) {
2818 gc_continue(objspace, size_pool, heap);
2819 cache->incremental_mark_step_allocated_slots = 0;
2820
2821 // Retry allocation after resetting incremental_mark_step_allocated_slots
2822 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2823 }
2824
2825 if (obj == Qfalse) {
2826 // Get next free page (possibly running GC)
2827 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2828 ractor_cache_set_page(cache, size_pool_idx, page);
2829
2830 // Retry allocation after moving to new page
2831 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2832
2833 GC_ASSERT(obj != Qfalse);
2834 }
2835 }
2836
2837 if (unlock_vm) {
2838 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2839 }
2840 }
2841
2842 size_pool->total_allocated_objects++;
2843
2844 return obj;
2845}
2846
2847static void
2848newobj_zero_slot(VALUE obj)
2849{
2850 memset((char *)obj + sizeof(struct RBasic), 0, rb_gc_obj_slot_size(obj) - sizeof(struct RBasic));
2851}
2852
2853ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
2854
2855static inline VALUE
2856newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
2857{
2858 VALUE obj;
2859 unsigned int lev;
2860
2861 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2862 {
2863 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2864 if (during_gc) {
2865 dont_gc_on();
2866 during_gc = 0;
2867 rb_bug("object allocation during garbage collection phase");
2868 }
2869
2870 if (ruby_gc_stressful) {
2871 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2872 rb_memerror();
2873 }
2874 }
2875 }
2876
2877 obj = newobj_alloc(objspace, cr, size_pool_idx, true);
2878 newobj_init(klass, flags, wb_protected, objspace, obj);
2879
2880 gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_zero_slot(obj));
2881 }
2882 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2883
2884 return obj;
2885}
2886
2887NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2888 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2889NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2890 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2891
2892static VALUE
2893newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2894{
2895 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2896}
2897
2898static VALUE
2899newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2900{
2901 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2902}
2903
2904static inline VALUE
2905newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
2906{
2907 VALUE obj;
2908 rb_objspace_t *objspace = &rb_objspace;
2909
2910 RB_DEBUG_COUNTER_INC(obj_newobj);
2911 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2912
2913 if (UNLIKELY(stress_to_class)) {
2914 long i, cnt = RARRAY_LEN(stress_to_class);
2915 for (i = 0; i < cnt; ++i) {
2916 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2917 }
2918 }
2919
2920 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2921
2922 if (SHAPE_IN_BASIC_FLAGS || (flags & RUBY_T_MASK) == T_OBJECT) {
2923 flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2924 }
2925
2926 if (!UNLIKELY(during_gc ||
2927 ruby_gc_stressful ||
2928 gc_event_newobj_hook_needed_p(objspace)) &&
2929 wb_protected) {
2930 obj = newobj_alloc(objspace, cr, size_pool_idx, false);
2931 newobj_init(klass, flags, wb_protected, objspace, obj);
2932 }
2933 else {
2934 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2935
2936 obj = wb_protected ?
2937 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2938 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2939 }
2940
2941 return obj;
2942}
2943
2944static inline VALUE
2945newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2946{
2947 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2948 return newobj_fill(obj, v1, v2, v3);
2949}
2950
2951VALUE
2952rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
2953{
2954 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2955 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
2956}
2957
2958VALUE
2959rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
2960{
2961 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2962 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2963}
2964
2965/* for compatibility */
2966
2967VALUE
2968rb_newobj(void)
2969{
2970 return newobj_of(GET_RACTOR(), 0, T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2971}
2972
2973static VALUE
2974rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
2975{
2976 GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
2977 GC_ASSERT(flags & ROBJECT_EMBED);
2978
2979 size_t size;
2980 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2981
2982 size = rb_obj_embedded_size(index_tbl_num_entries);
2983 if (!rb_gc_size_allocatable_p(size)) {
2984 size = sizeof(struct RObject);
2985 }
2986
2987 VALUE obj = newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, wb_protected, size);
2988 RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT);
2989
2990 // Set the shape to the specific T_OBJECT shape which is always
2991 // SIZE_POOL_COUNT away from the root shape.
2992 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2993
2994#if RUBY_DEBUG
2995 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
2996 VALUE *ptr = ROBJECT_IVPTR(obj);
2997 for (size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
2998 ptr[i] = Qundef;
2999 }
3000#endif
3001
3002 return obj;
3003}
3004
3005VALUE
3006rb_newobj_of(VALUE klass, VALUE flags)
3007{
3008 if ((flags & RUBY_T_MASK) == T_OBJECT) {
3009 return rb_class_instance_allocate_internal(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED, flags & FL_WB_PROTECTED);
3010 }
3011 else {
3012 return newobj_of(GET_RACTOR(), klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED, RVALUE_SIZE);
3013 }
3014}
3015
3016#define UNEXPECTED_NODE(func) \
3017 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
3018 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
3019
3020const char *
3021rb_imemo_name(enum imemo_type type)
3022{
3023 // put no default case to get a warning if an imemo type is missing
3024 switch (type) {
3025#define IMEMO_NAME(x) case imemo_##x: return #x;
3026 IMEMO_NAME(env);
3027 IMEMO_NAME(cref);
3028 IMEMO_NAME(svar);
3029 IMEMO_NAME(throw_data);
3030 IMEMO_NAME(ifunc);
3031 IMEMO_NAME(memo);
3032 IMEMO_NAME(ment);
3033 IMEMO_NAME(iseq);
3034 IMEMO_NAME(tmpbuf);
3035 IMEMO_NAME(ast);
3036 IMEMO_NAME(parser_strterm);
3037 IMEMO_NAME(callinfo);
3038 IMEMO_NAME(callcache);
3039 IMEMO_NAME(constcache);
3040#undef IMEMO_NAME
3041 }
3042 return "unknown";
3043}
3044
3045#undef rb_imemo_new
3046
3047VALUE
3048rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3049{
3050 size_t size = RVALUE_SIZE;
3051 VALUE flags = T_IMEMO | (type << FL_USHIFT);
3052 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, TRUE, size);
3053}
3054
3055static VALUE
3056rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3057{
3058 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
3059 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
3060 return newobj_of(GET_RACTOR(), v0, flags, v1, v2, v3, FALSE, size);
3061}
3062
3063static VALUE
3064rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
3065{
3066 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
3067}
3068
3070rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
3071{
3072 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
3073}
3074
3075static size_t
3076imemo_memsize(VALUE obj)
3077{
3078 size_t size = 0;
3079 switch (imemo_type(obj)) {
3080 case imemo_ment:
3081 size += sizeof(RANY(obj)->as.imemo.ment.def);
3082 break;
3083 case imemo_iseq:
3084 size += rb_iseq_memsize((rb_iseq_t *)obj);
3085 break;
3086 case imemo_env:
3087 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
3088 break;
3089 case imemo_tmpbuf:
3090 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
3091 break;
3092 case imemo_ast:
3093 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3094 break;
3095 case imemo_cref:
3096 case imemo_svar:
3097 case imemo_throw_data:
3098 case imemo_ifunc:
3099 case imemo_memo:
3100 case imemo_parser_strterm:
3101 break;
3102 default:
3103 /* unreachable */
3104 break;
3105 }
3106 return size;
3107}
3108
3109#if IMEMO_DEBUG
3110VALUE
3111rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
3112{
3113 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
3114 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
3115 return memo;
3116}
3117#endif
3118
3119VALUE
3120rb_class_allocate_instance(VALUE klass)
3121{
3122 return rb_class_instance_allocate_internal(klass, T_OBJECT | ROBJECT_EMBED, RGENGC_WB_PROTECTED_OBJECT);
3123}
3124
3125static inline void
3126rb_data_object_check(VALUE klass)
3127{
3128 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3129 rb_undef_alloc_func(klass);
3130 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3131 }
3132}
3133
3134VALUE
3135rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3136{
3138 if (klass) rb_data_object_check(klass);
3139 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
3140}
3141
3142VALUE
3143rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3144{
3145 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3146 DATA_PTR(obj) = xcalloc(1, size);
3147 return obj;
3148}
3149
3150static VALUE
3151typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
3152{
3153 RBIMPL_NONNULL_ARG(type);
3154 if (klass) rb_data_object_check(klass);
3155 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
3156 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
3157}
3158
3159VALUE
3160rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
3161{
3162 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
3163 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
3164 }
3165
3166 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
3167}
3168
3169VALUE
3170rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
3171{
3172 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
3173 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
3174 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
3175 }
3176
3177 size_t embed_size = offsetof(struct RTypedData, data) + size;
3178 if (rb_gc_size_allocatable_p(embed_size)) {
3179 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
3180 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
3181 return obj;
3182 }
3183 }
3184
3185 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
3186 DATA_PTR(obj) = xcalloc(1, size);
3187 return obj;
3188}
3189
3190size_t
3191rb_objspace_data_type_memsize(VALUE obj)
3192{
3193 size_t size = 0;
3194 if (RTYPEDDATA_P(obj)) {
3195 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
3196 const void *ptr = RTYPEDDATA_GET_DATA(obj);
3197
3198 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3199#ifdef HAVE_MALLOC_USABLE_SIZE
3200 size += malloc_usable_size((void *)ptr);
3201#endif
3202 }
3203
3204 if (ptr && type->function.dsize) {
3205 size += type->function.dsize(ptr);
3206 }
3207 }
3208
3209 return size;
3210}
3211
3212const char *
3213rb_objspace_data_type_name(VALUE obj)
3214{
3215 if (RTYPEDDATA_P(obj)) {
3216 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
3217 }
3218 else {
3219 return 0;
3220 }
3221}
3222
3223static int
3224ptr_in_page_body_p(const void *ptr, const void *memb)
3225{
3226 struct heap_page *page = *(struct heap_page **)memb;
3227 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3228
3229 if ((uintptr_t)ptr >= p_body) {
3230 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3231 }
3232 else {
3233 return -1;
3234 }
3235}
3236
3237PUREFUNC(static inline struct heap_page * heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
3238static inline struct heap_page *
3239heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
3240{
3241 struct heap_page **res;
3242
3243 if (ptr < (uintptr_t)heap_pages_lomem ||
3244 ptr > (uintptr_t)heap_pages_himem) {
3245 return NULL;
3246 }
3247
3248 res = bsearch((void *)ptr, heap_pages_sorted,
3249 (size_t)heap_allocated_pages, sizeof(struct heap_page *),
3250 ptr_in_page_body_p);
3251
3252 if (res) {
3253 return *res;
3254 }
3255 else {
3256 return NULL;
3257 }
3258}
3259
3260PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
3261static inline int
3262is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
3263{
3264 register uintptr_t p = (uintptr_t)ptr;
3265 register struct heap_page *page;
3266
3267 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3268
3269 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
3270 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3271
3272 if (p % BASE_SLOT_SIZE != 0) return FALSE;
3273 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3274
3275 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3276 if (page) {
3277 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3278 if (page->flags.in_tomb) {
3279 return FALSE;
3280 }
3281 else {
3282 if (p < page->start) return FALSE;
3283 if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
3284 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0) return FALSE;
3285
3286 return TRUE;
3287 }
3288 }
3289 return FALSE;
3290}
3291
3292static enum rb_id_table_iterator_result
3293free_const_entry_i(VALUE value, void *data)
3294{
3295 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3296 xfree(ce);
3297 return ID_TABLE_CONTINUE;
3298}
3299
3300void
3301rb_free_const_table(struct rb_id_table *tbl)
3302{
3303 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3304 rb_id_table_free(tbl);
3305}
3306
3307// alive: if false, target pointers can be freed already.
3308// To check it, we need objspace parameter.
3309static void
3310vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
3311{
3312 if (ccs->entries) {
3313 for (int i=0; i<ccs->len; i++) {
3314 const struct rb_callcache *cc = ccs->entries[i].cc;
3315 if (!alive) {
3316 void *ptr = asan_unpoison_object_temporary((VALUE)cc);
3317 // ccs can be free'ed.
3318 if (is_pointer_to_heap(objspace, (void *)cc) &&
3319 IMEMO_TYPE_P(cc, imemo_callcache) &&
3320 cc->klass == klass) {
3321 // OK. maybe target cc.
3322 }
3323 else {
3324 if (ptr) {
3325 asan_poison_object((VALUE)cc);
3326 }
3327 continue;
3328 }
3329 if (ptr) {
3330 asan_poison_object((VALUE)cc);
3331 }
3332 }
3333
3334 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
3335 vm_cc_invalidate(cc);
3336 }
3337 ruby_xfree(ccs->entries);
3338 }
3339 ruby_xfree(ccs);
3340}
3341
3342void
3343rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
3344{
3345 RB_DEBUG_COUNTER_INC(ccs_free);
3346 vm_ccs_free(ccs, TRUE, NULL, Qundef);
3347}
3348
3350 rb_objspace_t *objspace;
3351 VALUE klass;
3352 bool alive;
3353};
3354
3355static enum rb_id_table_iterator_result
3356cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
3357{
3358 struct cc_tbl_i_data *data = data_ptr;
3359 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3360 VM_ASSERT(vm_ccs_p(ccs));
3361 VM_ASSERT(id == ccs->cme->called_id);
3362
3363 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3364 rb_vm_ccs_free(ccs);
3365 return ID_TABLE_DELETE;
3366 }
3367 else {
3368 gc_mark(data->objspace, (VALUE)ccs->cme);
3369
3370 for (int i=0; i<ccs->len; i++) {
3371 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3372 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3373
3374 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
3375 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
3376 }
3377 return ID_TABLE_CONTINUE;
3378 }
3379}
3380
3381static void
3382cc_table_mark(rb_objspace_t *objspace, VALUE klass)
3383{
3384 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3385 if (cc_tbl) {
3386 struct cc_tbl_i_data data = {
3387 .objspace = objspace,
3388 .klass = klass,
3389 };
3390 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3391 }
3392}
3393
3394static enum rb_id_table_iterator_result
3395cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
3396{
3397 struct cc_tbl_i_data *data = data_ptr;
3398 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3399 VM_ASSERT(vm_ccs_p(ccs));
3400 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3401 return ID_TABLE_CONTINUE;
3402}
3403
3404static void
3405cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
3406{
3407 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3408
3409 if (cc_tbl) {
3410 struct cc_tbl_i_data data = {
3411 .objspace = objspace,
3412 .klass = klass,
3413 .alive = alive,
3414 };
3415 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3416 rb_id_table_free(cc_tbl);
3417 }
3418}
3419
3420static enum rb_id_table_iterator_result
3421cvar_table_free_i(VALUE value, void * ctx)
3422{
3423 xfree((void *) value);
3424 return ID_TABLE_CONTINUE;
3425}
3426
3427void
3428rb_cc_table_free(VALUE klass)
3429{
3430 cc_table_free(&rb_objspace, klass, TRUE);
3431}
3432
3433static inline void
3434make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
3435{
3436 struct RZombie *zombie = RZOMBIE(obj);
3437 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
3438 zombie->dfree = dfree;
3439 zombie->data = data;
3440 VALUE prev, next = heap_pages_deferred_final;
3441 do {
3442 zombie->next = prev = next;
3443 next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
3444 } while (next != prev);
3445
3446 struct heap_page *page = GET_HEAP_PAGE(obj);
3447 page->final_slots++;
3448 heap_pages_final_slots++;
3449}
3450
3451static inline void
3452make_io_zombie(rb_objspace_t *objspace, VALUE obj)
3453{
3454 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3455 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3456}
3457
3458static void
3459obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
3460{
3461 ASSERT_vm_locking();
3462 st_data_t o = (st_data_t)obj, id;
3463
3464 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
3466
3467 if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
3468 GC_ASSERT(id);
3469 st_delete(objspace->id_to_obj_tbl, &id, NULL);
3470 }
3471 else {
3472 rb_bug("Object ID seen, but not in mapping table: %s", obj_info(obj));
3473 }
3474}
3475
3476static bool
3477rb_data_free(rb_objspace_t *objspace, VALUE obj)
3478{
3479 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3480 if (data) {
3481 int free_immediately = false;
3482 void (*dfree)(void *);
3483
3484 if (RTYPEDDATA_P(obj)) {
3485 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3486 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3487 }
3488 else {
3489 dfree = RANY(obj)->as.data.dfree;
3490 }
3491
3492 if (dfree) {
3493 if (dfree == RUBY_DEFAULT_FREE) {
3494 if (!RTYPEDDATA_EMBEDDED_P(obj)) {
3495 xfree(data);
3496 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3497 }
3498 }
3499 else if (free_immediately) {
3500 (*dfree)(data);
3501 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
3502 xfree(data);
3503 }
3504
3505 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3506 }
3507 else {
3508 make_zombie(objspace, obj, dfree, data);
3509 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3510 return FALSE;
3511 }
3512 }
3513 else {
3514 RB_DEBUG_COUNTER_INC(obj_data_empty);
3515 }
3516 }
3517
3518 return true;
3519}
3520
3521static int
3522obj_free(rb_objspace_t *objspace, VALUE obj)
3523{
3524 RB_DEBUG_COUNTER_INC(obj_free);
3525 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3526
3527 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
3528
3529 switch (BUILTIN_TYPE(obj)) {
3530 case T_NIL:
3531 case T_FIXNUM:
3532 case T_TRUE:
3533 case T_FALSE:
3534 rb_bug("obj_free() called for broken object");
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 if (FL_TEST(obj, FL_EXIVAR)) {
3542 FL_UNSET(obj, FL_EXIVAR);
3543 }
3544
3545 if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
3546 obj_free_object_id(objspace, obj);
3547 }
3548
3549 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3550
3551#if RGENGC_CHECK_MODE
3552#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3553 CHECK(RVALUE_WB_UNPROTECTED);
3554 CHECK(RVALUE_MARKED);
3555 CHECK(RVALUE_MARKING);
3556 CHECK(RVALUE_UNCOLLECTIBLE);
3557#undef CHECK
3558#endif
3559
3560 switch (BUILTIN_TYPE(obj)) {
3561 case T_OBJECT:
3562 if (rb_shape_obj_too_complex(obj)) {
3563 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3564 st_free_table(ROBJECT_IV_HASH(obj));
3565 }
3566 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3567 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3568 }
3569 else {
3570 xfree(RANY(obj)->as.object.as.heap.ivptr);
3571 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3572 }
3573 break;
3574 case T_MODULE:
3575 case T_CLASS:
3576 rb_id_table_free(RCLASS_M_TBL(obj));
3577 cc_table_free(objspace, obj, FALSE);
3578 if (rb_shape_obj_too_complex(obj)) {
3579 st_free_table((st_table *)RCLASS_IVPTR(obj));
3580 }
3581 else if (RCLASS_IVPTR(obj)) {
3582 xfree(RCLASS_IVPTR(obj));
3583 }
3584
3585 if (RCLASS_CONST_TBL(obj)) {
3586 rb_free_const_table(RCLASS_CONST_TBL(obj));
3587 }
3588 if (RCLASS_CVC_TBL(obj)) {
3589 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3590 rb_id_table_free(RCLASS_CVC_TBL(obj));
3591 }
3592 rb_class_remove_subclass_head(obj);
3593 rb_class_remove_from_module_subclasses(obj);
3594 rb_class_remove_from_super_subclasses(obj);
3595 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3596 xfree(RCLASS_SUPERCLASSES(obj));
3597 }
3598
3599 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
3600 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
3601 break;
3602 case T_STRING:
3603 rb_str_free(obj);
3604 break;
3605 case T_ARRAY:
3606 rb_ary_free(obj);
3607 break;
3608 case T_HASH:
3609#if USE_DEBUG_COUNTER
3610 switch (RHASH_SIZE(obj)) {
3611 case 0:
3612 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3613 break;
3614 case 1:
3615 RB_DEBUG_COUNTER_INC(obj_hash_1);
3616 break;
3617 case 2:
3618 RB_DEBUG_COUNTER_INC(obj_hash_2);
3619 break;
3620 case 3:
3621 RB_DEBUG_COUNTER_INC(obj_hash_3);
3622 break;
3623 case 4:
3624 RB_DEBUG_COUNTER_INC(obj_hash_4);
3625 break;
3626 case 5:
3627 case 6:
3628 case 7:
3629 case 8:
3630 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3631 break;
3632 default:
3633 GC_ASSERT(RHASH_SIZE(obj) > 8);
3634 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3635 }
3636
3637 if (RHASH_AR_TABLE_P(obj)) {
3638 if (RHASH_AR_TABLE(obj) == NULL) {
3639 RB_DEBUG_COUNTER_INC(obj_hash_null);
3640 }
3641 else {
3642 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3643 }
3644 }
3645 else {
3646 RB_DEBUG_COUNTER_INC(obj_hash_st);
3647 }
3648#endif
3649
3650 rb_hash_free(obj);
3651 break;
3652 case T_REGEXP:
3653 if (RANY(obj)->as.regexp.ptr) {
3654 onig_free(RANY(obj)->as.regexp.ptr);
3655 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3656 }
3657 break;
3658 case T_DATA:
3659 if (!rb_data_free(objspace, obj)) return false;
3660 break;
3661 case T_MATCH:
3662 {
3663 rb_matchext_t *rm = RMATCH_EXT(obj);
3664#if USE_DEBUG_COUNTER
3665 if (rm->regs.num_regs >= 8) {
3666 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3667 }
3668 else if (rm->regs.num_regs >= 4) {
3669 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3670 }
3671 else if (rm->regs.num_regs >= 1) {
3672 RB_DEBUG_COUNTER_INC(obj_match_under4);
3673 }
3674#endif
3675 onig_region_free(&rm->regs, 0);
3676 if (rm->char_offset)
3677 xfree(rm->char_offset);
3678
3679 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3680 }
3681 break;
3682 case T_FILE:
3683 if (RANY(obj)->as.file.fptr) {
3684 make_io_zombie(objspace, obj);
3685 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3686 return FALSE;
3687 }
3688 break;
3689 case T_RATIONAL:
3690 RB_DEBUG_COUNTER_INC(obj_rational);
3691 break;
3692 case T_COMPLEX:
3693 RB_DEBUG_COUNTER_INC(obj_complex);
3694 break;
3695 case T_MOVED:
3696 break;
3697 case T_ICLASS:
3698 /* Basically , T_ICLASS shares table with the module */
3699 if (RICLASS_OWNS_M_TBL_P(obj)) {
3700 /* Method table is not shared for origin iclasses of classes */
3701 rb_id_table_free(RCLASS_M_TBL(obj));
3702 }
3703 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3704 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3705 }
3706 rb_class_remove_subclass_head(obj);
3707 cc_table_free(objspace, obj, FALSE);
3708 rb_class_remove_from_module_subclasses(obj);
3709 rb_class_remove_from_super_subclasses(obj);
3710
3711 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3712 break;
3713
3714 case T_FLOAT:
3715 RB_DEBUG_COUNTER_INC(obj_float);
3716 break;
3717
3718 case T_BIGNUM:
3719 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3720 xfree(BIGNUM_DIGITS(obj));
3721 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3722 }
3723 else {
3724 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3725 }
3726 break;
3727
3728 case T_NODE:
3729 UNEXPECTED_NODE(obj_free);
3730 break;
3731
3732 case T_STRUCT:
3733 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3734 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3735 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3736 }
3737 else {
3738 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3739 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3740 }
3741 break;
3742
3743 case T_SYMBOL:
3744 {
3745 rb_gc_free_dsymbol(obj);
3746 RB_DEBUG_COUNTER_INC(obj_symbol);
3747 }
3748 break;
3749
3750 case T_IMEMO:
3751 switch (imemo_type(obj)) {
3752 case imemo_ment:
3753 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3754 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3755 break;
3756 case imemo_iseq:
3757 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3758 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3759 break;
3760 case imemo_env:
3761 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3762 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3763 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3764 break;
3765 case imemo_tmpbuf:
3766 xfree(RANY(obj)->as.imemo.alloc.ptr);
3767 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3768 break;
3769 case imemo_ast:
3770 rb_ast_free(&RANY(obj)->as.imemo.ast);
3771 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3772 break;
3773 case imemo_cref:
3774 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3775 break;
3776 case imemo_svar:
3777 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3778 break;
3779 case imemo_throw_data:
3780 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3781 break;
3782 case imemo_ifunc:
3783 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3784 break;
3785 case imemo_memo:
3786 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3787 break;
3788 case imemo_parser_strterm:
3789 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3790 break;
3791 case imemo_callinfo:
3792 {
3793 const struct rb_callinfo * ci = ((const struct rb_callinfo *)obj);
3794 rb_vm_ci_free(ci);
3795 if (ci->kwarg) {
3796 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
3797 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
3798 }
3799 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3800 break;
3801 }
3802 case imemo_callcache:
3803 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3804 break;
3805 case imemo_constcache:
3806 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3807 break;
3808 }
3809 return TRUE;
3810
3811 default:
3812 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3813 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3814 }
3815
3816 if (FL_TEST(obj, FL_FINALIZE)) {
3817 make_zombie(objspace, obj, 0, 0);
3818 return FALSE;
3819 }
3820 else {
3821 return TRUE;
3822 }
3823}
3824
3825
3826#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3827#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3828
3829static int
3830object_id_cmp(st_data_t x, st_data_t y)
3831{
3832 if (RB_BIGNUM_TYPE_P(x)) {
3833 return !rb_big_eql(x, y);
3834 }
3835 else {
3836 return x != y;
3837 }
3838}
3839
3840static st_index_t
3841object_id_hash(st_data_t n)
3842{
3843 if (RB_BIGNUM_TYPE_P(n)) {
3844 return FIX2LONG(rb_big_hash(n));
3845 }
3846 else {
3847 return st_numhash(n);
3848 }
3849}
3850static const struct st_hash_type object_id_hash_type = {
3851 object_id_cmp,
3852 object_id_hash,
3853};
3854
3855void
3856Init_heap(void)
3857{
3858 rb_objspace_t *objspace = &rb_objspace;
3859
3860#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3861 /* Need to determine if we can use mmap at runtime. */
3862 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3863#endif
3864
3865 objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
3866 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3867 objspace->obj_to_id_tbl = st_init_numtable();
3868
3869#if RGENGC_ESTIMATE_OLDMALLOC
3870 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3871#endif
3872
3873 /* Set size pools allocatable pages. */
3874 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3875 rb_size_pool_t *size_pool = &size_pools[i];
3876
3877 /* Set the default value of size_pool_init_slots. */
3878 gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
3879
3880 size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
3881 }
3882 heap_pages_expand_sorted(objspace);
3883
3884 init_mark_stack(&objspace->mark_stack);
3885
3886 objspace->profile.invoke_time = getrusage_time();
3887 finalizer_table = st_init_numtable();
3888}
3889
3890void
3891Init_gc_stress(void)
3892{
3893 rb_objspace_t *objspace = &rb_objspace;
3894
3895 gc_stress_set(objspace, ruby_initial_gc_stress);
3896}
3897
3898typedef int each_obj_callback(void *, void *, size_t, void *);
3899typedef int each_page_callback(struct heap_page *, void *);
3900
3901static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
3902static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3903
3905 rb_objspace_t *objspace;
3906 bool reenable_incremental;
3907
3908 each_obj_callback *each_obj_callback;
3909 each_page_callback *each_page_callback;
3910 void *data;
3911
3912 struct heap_page **pages[SIZE_POOL_COUNT];
3913 size_t pages_counts[SIZE_POOL_COUNT];
3914};
3915
3916static VALUE
3917objspace_each_objects_ensure(VALUE arg)
3918{
3919 struct each_obj_data *data = (struct each_obj_data *)arg;
3920 rb_objspace_t *objspace = data->objspace;
3921
3922 /* Reenable incremental GC */
3923 if (data->reenable_incremental) {
3924 objspace->flags.dont_incremental = FALSE;
3925 }
3926
3927 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3928 struct heap_page **pages = data->pages[i];
3929 free(pages);
3930 }
3931
3932 return Qnil;
3933}
3934
3935static VALUE
3936objspace_each_objects_try(VALUE arg)
3937{
3938 struct each_obj_data *data = (struct each_obj_data *)arg;
3939 rb_objspace_t *objspace = data->objspace;
3940
3941 /* Copy pages from all size_pools to their respective buffers. */
3942 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3943 rb_size_pool_t *size_pool = &size_pools[i];
3944 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
3945
3946 struct heap_page **pages = malloc(size);
3947 if (!pages) rb_memerror();
3948
3949 /* Set up pages buffer by iterating over all pages in the current eden
3950 * heap. This will be a snapshot of the state of the heap before we
3951 * call the callback over each page that exists in this buffer. Thus it
3952 * is safe for the callback to allocate objects without possibly entering
3953 * an infinite loop. */
3954 struct heap_page *page = 0;
3955 size_t pages_count = 0;
3956 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3957 pages[pages_count] = page;
3958 pages_count++;
3959 }
3960 data->pages[i] = pages;
3961 data->pages_counts[i] = pages_count;
3962 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3963 }
3964
3965 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3966 rb_size_pool_t *size_pool = &size_pools[i];
3967 size_t pages_count = data->pages_counts[i];
3968 struct heap_page **pages = data->pages[i];
3969
3970 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
3971 for (size_t i = 0; i < pages_count; i++) {
3972 /* If we have reached the end of the linked list then there are no
3973 * more pages, so break. */
3974 if (page == NULL) break;
3975
3976 /* If this page does not match the one in the buffer, then move to
3977 * the next page in the buffer. */
3978 if (pages[i] != page) continue;
3979
3980 uintptr_t pstart = (uintptr_t)page->start;
3981 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3982
3983 if (!__asan_region_is_poisoned((void *)pstart, pend - pstart)) {
3984 if (data->each_obj_callback &&
3985 (*data->each_obj_callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
3986 break;
3987 }
3988 if (data->each_page_callback &&
3989 (*data->each_page_callback)(page, data->data)) {
3990 break;
3991 }
3992 }
3993
3994 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3995 }
3996 }
3997
3998 return Qnil;
3999}
4000
4001/*
4002 * rb_objspace_each_objects() is special C API to walk through
4003 * Ruby object space. This C API is too difficult to use it.
4004 * To be frank, you should not use it. Or you need to read the
4005 * source code of this function and understand what this function does.
4006 *
4007 * 'callback' will be called several times (the number of heap page,
4008 * at current implementation) with:
4009 * vstart: a pointer to the first living object of the heap_page.
4010 * vend: a pointer to next to the valid heap_page area.
4011 * stride: a distance to next VALUE.
4012 *
4013 * If callback() returns non-zero, the iteration will be stopped.
4014 *
4015 * This is a sample callback code to iterate liveness objects:
4016 *
4017 * static int
4018 * sample_callback(void *vstart, void *vend, int stride, void *data)
4019 * {
4020 * VALUE v = (VALUE)vstart;
4021 * for (; v != (VALUE)vend; v += stride) {
4022 * if (!rb_objspace_internal_object_p(v)) { // liveness check
4023 * // do something with live object 'v'
4024 * }
4025 * }
4026 * return 0; // continue to iteration
4027 * }
4028 *
4029 * Note: 'vstart' is not a top of heap_page. This point the first
4030 * living object to grasp at least one object to avoid GC issue.
4031 * This means that you can not walk through all Ruby object page
4032 * including freed object page.
4033 *
4034 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
4035 * However, there are possibilities to pass variable values with
4036 * 'stride' with some reasons. You must use stride instead of
4037 * use some constant value in the iteration.
4038 */
4039void
4040rb_objspace_each_objects(each_obj_callback *callback, void *data)
4041{
4042 objspace_each_objects(&rb_objspace, callback, data, TRUE);
4043}
4044
4045static void
4046objspace_each_exec(bool protected, struct each_obj_data *each_obj_data)
4047{
4048 /* Disable incremental GC */
4049 rb_objspace_t *objspace = each_obj_data->objspace;
4050 bool reenable_incremental = FALSE;
4051 if (protected) {
4052 reenable_incremental = !objspace->flags.dont_incremental;
4053
4054 gc_rest(objspace);
4055 objspace->flags.dont_incremental = TRUE;
4056 }
4057
4058 each_obj_data->reenable_incremental = reenable_incremental;
4059 memset(&each_obj_data->pages, 0, sizeof(each_obj_data->pages));
4060 memset(&each_obj_data->pages_counts, 0, sizeof(each_obj_data->pages_counts));
4061 rb_ensure(objspace_each_objects_try, (VALUE)each_obj_data,
4062 objspace_each_objects_ensure, (VALUE)each_obj_data);
4063}
4064
4065static void
4066objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
4067{
4068 struct each_obj_data each_obj_data = {
4069 .objspace = objspace,
4070 .each_obj_callback = callback,
4071 .each_page_callback = NULL,
4072 .data = data,
4073 };
4074 objspace_each_exec(protected, &each_obj_data);
4075}
4076
4077static void
4078objspace_each_pages(rb_objspace_t *objspace, each_page_callback *callback, void *data, bool protected)
4079{
4080 struct each_obj_data each_obj_data = {
4081 .objspace = objspace,
4082 .each_obj_callback = NULL,
4083 .each_page_callback = callback,
4084 .data = data,
4085 };
4086 objspace_each_exec(protected, &each_obj_data);
4087}
4088
4089void
4090rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
4091{
4092 objspace_each_objects(&rb_objspace, callback, data, FALSE);
4093}
4094
4096 size_t num;
4097 VALUE of;
4098};
4099
4100static int
4101internal_object_p(VALUE obj)
4102{
4103 RVALUE *p = (RVALUE *)obj;
4104 void *ptr = asan_unpoison_object_temporary(obj);
4105 bool used_p = p->as.basic.flags;
4106
4107 if (used_p) {
4108 switch (BUILTIN_TYPE(obj)) {
4109 case T_NODE:
4110 UNEXPECTED_NODE(internal_object_p);
4111 break;
4112 case T_NONE:
4113 case T_MOVED:
4114 case T_IMEMO:
4115 case T_ICLASS:
4116 case T_ZOMBIE:
4117 break;
4118 case T_CLASS:
4119 if (!p->as.basic.klass) break;
4120 if (FL_TEST(obj, FL_SINGLETON)) {
4121 return rb_singleton_class_internal_p(obj);
4122 }
4123 return 0;
4124 default:
4125 if (!p->as.basic.klass) break;
4126 return 0;
4127 }
4128 }
4129 if (ptr || ! used_p) {
4130 asan_poison_object(obj);
4131 }
4132 return 1;
4133}
4134
4135int
4136rb_objspace_internal_object_p(VALUE obj)
4137{
4138 return internal_object_p(obj);
4139}
4140
4141static int
4142os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
4143{
4144 struct os_each_struct *oes = (struct os_each_struct *)data;
4145
4146 VALUE v = (VALUE)vstart;
4147 for (; v != (VALUE)vend; v += stride) {
4148 if (!internal_object_p(v)) {
4149 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
4150 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
4151 rb_yield(v);
4152 oes->num++;
4153 }
4154 }
4155 }
4156 }
4157
4158 return 0;
4159}
4160
4161static VALUE
4162os_obj_of(VALUE of)
4163{
4164 struct os_each_struct oes;
4165
4166 oes.num = 0;
4167 oes.of = of;
4168 rb_objspace_each_objects(os_obj_of_i, &oes);
4169 return SIZET2NUM(oes.num);
4170}
4171
4172/*
4173 * call-seq:
4174 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
4175 * ObjectSpace.each_object([module]) -> an_enumerator
4176 *
4177 * Calls the block once for each living, nonimmediate object in this
4178 * Ruby process. If <i>module</i> is specified, calls the block
4179 * for only those classes or modules that match (or are a subclass of)
4180 * <i>module</i>. Returns the number of objects found. Immediate
4181 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
4182 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
4183 * never returned. In the example below, #each_object returns both
4184 * the numbers we defined and several constants defined in the Math
4185 * module.
4186 *
4187 * If no block is given, an enumerator is returned instead.
4188 *
4189 * a = 102.7
4190 * b = 95 # Won't be returned
4191 * c = 12345678987654321
4192 * count = ObjectSpace.each_object(Numeric) {|x| p x }
4193 * puts "Total count: #{count}"
4194 *
4195 * <em>produces:</em>
4196 *
4197 * 12345678987654321
4198 * 102.7
4199 * 2.71828182845905
4200 * 3.14159265358979
4201 * 2.22044604925031e-16
4202 * 1.7976931348623157e+308
4203 * 2.2250738585072e-308
4204 * Total count: 7
4205 *
4206 */
4207
4208static VALUE
4209os_each_obj(int argc, VALUE *argv, VALUE os)
4210{
4211 VALUE of;
4212
4213 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
4214 RETURN_ENUMERATOR(os, 1, &of);
4215 return os_obj_of(of);
4216}
4217
4218/*
4219 * call-seq:
4220 * ObjectSpace.undefine_finalizer(obj)
4221 *
4222 * Removes all finalizers for <i>obj</i>.
4223 *
4224 */
4225
4226static VALUE
4227undefine_final(VALUE os, VALUE obj)
4228{
4229 return rb_undefine_finalizer(obj);
4230}
4231
4232VALUE
4233rb_undefine_finalizer(VALUE obj)
4234{
4235 rb_objspace_t *objspace = &rb_objspace;
4236 st_data_t data = obj;
4237 rb_check_frozen(obj);
4238 st_delete(finalizer_table, &data, 0);
4239 FL_UNSET(obj, FL_FINALIZE);
4240 return obj;
4241}
4242
4243static void
4244should_be_callable(VALUE block)
4245{
4246 if (!rb_obj_respond_to(block, idCall, TRUE)) {
4247 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
4248 rb_obj_class(block));
4249 }
4250}
4251
4252static void
4253should_be_finalizable(VALUE obj)
4254{
4255 if (!FL_ABLE(obj)) {
4256 rb_raise(rb_eArgError, "cannot define finalizer for %s",
4257 rb_obj_classname(obj));
4258 }
4259 rb_check_frozen(obj);
4260}
4261
4262VALUE
4263rb_define_finalizer_no_check(VALUE obj, VALUE block)
4264{
4265 rb_objspace_t *objspace = &rb_objspace;
4266 VALUE table;
4267 st_data_t data;
4268
4269 RBASIC(obj)->flags |= FL_FINALIZE;
4270
4271 if (st_lookup(finalizer_table, obj, &data)) {
4272 table = (VALUE)data;
4273
4274 /* avoid duplicate block, table is usually small */
4275 {
4276 long len = RARRAY_LEN(table);
4277 long i;
4278
4279 for (i = 0; i < len; i++) {
4280 VALUE recv = RARRAY_AREF(table, i);
4281 if (rb_equal(recv, block)) {
4282 block = recv;
4283 goto end;
4284 }
4285 }
4286 }
4287
4288 rb_ary_push(table, block);
4289 }
4290 else {
4291 table = rb_ary_new3(1, block);
4292 RBASIC_CLEAR_CLASS(table);
4293 st_add_direct(finalizer_table, obj, table);
4294 }
4295 end:
4296 block = rb_ary_new3(2, INT2FIX(0), block);
4297 OBJ_FREEZE(block);
4298 return block;
4299}
4300
4301/*
4302 * call-seq:
4303 * ObjectSpace.define_finalizer(obj, aProc=proc())
4304 *
4305 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
4306 * was destroyed. The object ID of the <i>obj</i> will be passed
4307 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
4308 * method, make sure it can be called with a single argument.
4309 *
4310 * The return value is an array <code>[0, aProc]</code>.
4311 *
4312 * The two recommended patterns are to either create the finaliser proc
4313 * in a non-instance method where it can safely capture the needed state,
4314 * or to use a custom callable object that stores the needed state
4315 * explicitly as instance variables.
4316 *
4317 * class Foo
4318 * def initialize(data_needed_for_finalization)
4319 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
4320 * end
4321 *
4322 * def self.create_finalizer(data_needed_for_finalization)
4323 * proc {
4324 * puts "finalizing #{data_needed_for_finalization}"
4325 * }
4326 * end
4327 * end
4328 *
4329 * class Bar
4330 * class Remover
4331 * def initialize(data_needed_for_finalization)
4332 * @data_needed_for_finalization = data_needed_for_finalization
4333 * end
4334 *
4335 * def call(id)
4336 * puts "finalizing #{@data_needed_for_finalization}"
4337 * end
4338 * end
4339 *
4340 * def initialize(data_needed_for_finalization)
4341 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
4342 * end
4343 * end
4344 *
4345 * Note that if your finalizer references the object to be
4346 * finalized it will never be run on GC, although it will still be
4347 * run at exit. You will get a warning if you capture the object
4348 * to be finalized as the receiver of the finalizer.
4349 *
4350 * class CapturesSelf
4351 * def initialize(name)
4352 * ObjectSpace.define_finalizer(self, proc {
4353 * # this finalizer will only be run on exit
4354 * puts "finalizing #{name}"
4355 * })
4356 * end
4357 * end
4358 *
4359 * Also note that finalization can be unpredictable and is never guaranteed
4360 * to be run except on exit.
4361 */
4362
4363static VALUE
4364define_final(int argc, VALUE *argv, VALUE os)
4365{
4366 VALUE obj, block;
4367
4368 rb_scan_args(argc, argv, "11", &obj, &block);
4369 should_be_finalizable(obj);
4370 if (argc == 1) {
4371 block = rb_block_proc();
4372 }
4373 else {
4374 should_be_callable(block);
4375 }
4376
4377 if (rb_callable_receiver(block) == obj) {
4378 rb_warn("finalizer references object to be finalized");
4379 }
4380
4381 return rb_define_finalizer_no_check(obj, block);
4382}
4383
4384VALUE
4385rb_define_finalizer(VALUE obj, VALUE block)
4386{
4387 should_be_finalizable(obj);
4388 should_be_callable(block);
4389 return rb_define_finalizer_no_check(obj, block);
4390}
4391
4392void
4393rb_gc_copy_finalizer(VALUE dest, VALUE obj)
4394{
4395 rb_objspace_t *objspace = &rb_objspace;
4396 VALUE table;
4397 st_data_t data;
4398
4399 if (!FL_TEST(obj, FL_FINALIZE)) return;
4400 if (st_lookup(finalizer_table, obj, &data)) {
4401 table = (VALUE)data;
4402 st_insert(finalizer_table, dest, table);
4403 }
4404 FL_SET(dest, FL_FINALIZE);
4405}
4406
4407static VALUE
4408run_single_final(VALUE cmd, VALUE objid)
4409{
4410 return rb_check_funcall(cmd, idCall, 1, &objid);
4411}
4412
4413static void
4414warn_exception_in_finalizer(rb_execution_context_t *ec, VALUE final)
4415{
4416 if (!UNDEF_P(final) && !NIL_P(ruby_verbose)) {
4417 VALUE errinfo = ec->errinfo;
4418 rb_warn("Exception in finalizer %+"PRIsVALUE, final);
4419 rb_ec_error_print(ec, errinfo);
4420 }
4421}
4422
4423static void
4424run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
4425{
4426 long i;
4427 enum ruby_tag_type state;
4428 volatile struct {
4429 VALUE errinfo;
4430 VALUE objid;
4431 VALUE final;
4432 rb_control_frame_t *cfp;
4433 VALUE *sp;
4434 long finished;
4435 } saved;
4436
4437 rb_execution_context_t * volatile ec = GET_EC();
4438#define RESTORE_FINALIZER() (\
4439 ec->cfp = saved.cfp, \
4440 ec->cfp->sp = saved.sp, \
4441 ec->errinfo = saved.errinfo)
4442
4443 saved.errinfo = ec->errinfo;
4444 saved.objid = rb_obj_id(obj);
4445 saved.cfp = ec->cfp;
4446 saved.sp = ec->cfp->sp;
4447 saved.finished = 0;
4448 saved.final = Qundef;
4449
4450 EC_PUSH_TAG(ec);
4451 state = EC_EXEC_TAG();
4452 if (state != TAG_NONE) {
4453 ++saved.finished; /* skip failed finalizer */
4454 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final, Qundef));
4455 }
4456 for (i = saved.finished;
4457 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
4458 saved.finished = ++i) {
4459 run_single_final(saved.final = RARRAY_AREF(table, i), saved.objid);
4460 }
4461 EC_POP_TAG();
4462#undef RESTORE_FINALIZER
4463}
4464
4465static void
4466run_final(rb_objspace_t *objspace, VALUE zombie)
4467{
4468 st_data_t key, table;
4469
4470 if (RZOMBIE(zombie)->dfree) {
4471 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4472 }
4473
4474 key = (st_data_t)zombie;
4475 if (st_delete(finalizer_table, &key, &table)) {
4476 run_finalizer(objspace, zombie, (VALUE)table);
4477 }
4478}
4479
4480static void
4481finalize_list(rb_objspace_t *objspace, VALUE zombie)
4482{
4483 while (zombie) {
4484 VALUE next_zombie;
4485 struct heap_page *page;
4486 asan_unpoison_object(zombie, false);
4487 next_zombie = RZOMBIE(zombie)->next;
4488 page = GET_HEAP_PAGE(zombie);
4489
4490 run_final(objspace, zombie);
4491
4492 RB_VM_LOCK_ENTER();
4493 {
4494 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
4495 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
4496 obj_free_object_id(objspace, zombie);
4497 }
4498
4499 GC_ASSERT(heap_pages_final_slots > 0);
4500 GC_ASSERT(page->final_slots > 0);
4501
4502 heap_pages_final_slots--;
4503 page->final_slots--;
4504 page->free_slots++;
4505 heap_page_add_freeobj(objspace, page, zombie);
4506 page->size_pool->total_freed_objects++;
4507 }
4508 RB_VM_LOCK_LEAVE();
4509
4510 zombie = next_zombie;
4511 }
4512}
4513
4514static void
4515finalize_deferred_heap_pages(rb_objspace_t *objspace)
4516{
4517 VALUE zombie;
4518 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4519 finalize_list(objspace, zombie);
4520 }
4521}
4522
4523static void
4524finalize_deferred(rb_objspace_t *objspace)
4525{
4526 rb_execution_context_t *ec = GET_EC();
4527 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4528 finalize_deferred_heap_pages(objspace);
4529 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4530}
4531
4532static void
4533gc_finalize_deferred(void *dmy)
4534{
4535 rb_objspace_t *objspace = dmy;
4536 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4537
4538 finalize_deferred(objspace);
4539 ATOMIC_SET(finalizing, 0);
4540}
4541
4542static void
4543gc_finalize_deferred_register(rb_objspace_t *objspace)
4544{
4545 /* will enqueue a call to gc_finalize_deferred */
4546 rb_postponed_job_trigger(objspace->finalize_deferred_pjob);
4547}
4548
4549static int pop_mark_stack(mark_stack_t *stack, VALUE *data);
4550
4551static void
4552gc_abort(rb_objspace_t *objspace)
4553{
4554 if (is_incremental_marking(objspace)) {
4555 /* Remove all objects from the mark stack. */
4556 VALUE obj;
4557 while (pop_mark_stack(&objspace->mark_stack, &obj));
4558
4559 objspace->flags.during_incremental_marking = FALSE;
4560 }
4561
4562 if (is_lazy_sweeping(objspace)) {
4563 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
4564 rb_size_pool_t *size_pool = &size_pools[i];
4565 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4566
4567 heap->sweeping_page = NULL;
4568 struct heap_page *page = NULL;
4569
4570 ccan_list_for_each(&heap->pages, page, page_node) {
4571 page->flags.before_sweep = false;
4572 }
4573 }
4574 }
4575
4576 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
4577 rb_size_pool_t *size_pool = &size_pools[i];
4578 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
4579 rgengc_mark_and_rememberset_clear(objspace, heap);
4580 }
4581
4582 gc_mode_set(objspace, gc_mode_none);
4583}
4584
4586 VALUE obj;
4587 VALUE table;
4588 struct force_finalize_list *next;
4589};
4590
4591static int
4592force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4593{
4594 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
4595 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
4596 curr->obj = key;
4597 curr->table = val;
4598 curr->next = *prev;
4599 *prev = curr;
4600 return ST_CONTINUE;
4601}
4602
4603bool rb_obj_is_main_ractor(VALUE gv);
4604
4605void
4606rb_objspace_free_objects(rb_objspace_t *objspace)
4607{
4608 for (size_t i = 0; i < heap_allocated_pages; i++) {
4609 struct heap_page *page = heap_pages_sorted[i];
4610 short stride = page->slot_size;
4611
4612 uintptr_t p = (uintptr_t)page->start;
4613 uintptr_t pend = p + page->total_slots * stride;
4614 for (; p < pend; p += stride) {
4615 VALUE vp = (VALUE)p;
4616 switch (BUILTIN_TYPE(vp)) {
4617 case T_DATA: {
4618 if (rb_obj_is_mutex(vp) || rb_obj_is_thread(vp) || rb_obj_is_main_ractor(vp)) {
4619 obj_free(objspace, vp);
4620 }
4621 break;
4622 }
4623 case T_ARRAY:
4624 obj_free(objspace, vp);
4625 break;
4626 default:
4627 break;
4628 }
4629 }
4630 }
4631}
4632
4633
4634void
4635rb_objspace_call_finalizer(rb_objspace_t *objspace)
4636{
4637 size_t i;
4638
4639#if RGENGC_CHECK_MODE >= 2
4640 gc_verify_internal_consistency(objspace);
4641#endif
4642 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4643
4644 /* run finalizers */
4645 finalize_deferred(objspace);
4646 GC_ASSERT(heap_pages_deferred_final == 0);
4647
4648 /* prohibit incremental GC */
4649 objspace->flags.dont_incremental = 1;
4650
4651 /* force to run finalizer */
4652 while (finalizer_table->num_entries) {
4653 struct force_finalize_list *list = 0;
4654 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4655 while (list) {
4656 struct force_finalize_list *curr = list;
4657 st_data_t obj = (st_data_t)curr->obj;
4658 run_finalizer(objspace, curr->obj, curr->table);
4659 st_delete(finalizer_table, &obj, 0);
4660 list = curr->next;
4661 xfree(curr);
4662 }
4663 }
4664
4665 /* Abort incremental marking and lazy sweeping to speed up shutdown. */
4666 gc_abort(objspace);
4667
4668 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4669 dont_gc_on();
4670
4671 /* running data/file finalizers are part of garbage collection */
4672 unsigned int lock_lev;
4673 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4674
4675 /* run data/file object's finalizers */
4676 for (i = 0; i < heap_allocated_pages; i++) {
4677 struct heap_page *page = heap_pages_sorted[i];
4678 short stride = page->slot_size;
4679
4680 uintptr_t p = (uintptr_t)page->start;
4681 uintptr_t pend = p + page->total_slots * stride;
4682 for (; p < pend; p += stride) {
4683 VALUE vp = (VALUE)p;
4684 void *poisoned = asan_unpoison_object_temporary(vp);
4685 switch (BUILTIN_TYPE(vp)) {
4686 case T_DATA:
4687 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
4688 if (rb_obj_is_thread(vp)) break;
4689 if (rb_obj_is_mutex(vp)) break;
4690 if (rb_obj_is_fiber(vp)) break;
4691 if (rb_obj_is_main_ractor(vp)) break;
4692
4693 obj_free(objspace, vp);
4694 break;
4695 case T_FILE:
4696 obj_free(objspace, vp);
4697 break;
4698 case T_SYMBOL:
4699 case T_ARRAY:
4700 case T_NONE:
4701 break;
4702 default:
4703 if (rb_free_at_exit) {
4704 obj_free(objspace, vp);
4705 }
4706 break;
4707 }
4708 if (poisoned) {
4709 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4710 asan_poison_object(vp);
4711 }
4712 }
4713 }
4714
4715 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4716
4717 finalize_deferred_heap_pages(objspace);
4718
4719 st_free_table(finalizer_table);
4720 finalizer_table = 0;
4721 ATOMIC_SET(finalizing, 0);
4722}
4723
4724static inline int
4725is_swept_object(VALUE ptr)
4726{
4727 struct heap_page *page = GET_HEAP_PAGE(ptr);
4728 return page->flags.before_sweep ? FALSE : TRUE;
4729}
4730
4731/* garbage objects will be collected soon. */
4732static inline int
4733is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
4734{
4735 if (!is_lazy_sweeping(objspace) ||
4736 is_swept_object(ptr) ||
4737 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4738
4739 return FALSE;
4740 }
4741 else {
4742 return TRUE;
4743 }
4744}
4745
4746static inline int
4747is_live_object(rb_objspace_t *objspace, VALUE ptr)
4748{
4749 switch (BUILTIN_TYPE(ptr)) {
4750 case T_NONE:
4751 case T_MOVED:
4752 case T_ZOMBIE:
4753 return FALSE;
4754 default:
4755 break;
4756 }
4757
4758 if (!is_garbage_object(objspace, ptr)) {
4759 return TRUE;
4760 }
4761 else {
4762 return FALSE;
4763 }
4764}
4765
4766static inline int
4767is_markable_object(VALUE obj)
4768{
4769 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
4770 check_rvalue_consistency(obj);
4771 return TRUE;
4772}
4773
4774int
4775rb_objspace_markable_object_p(VALUE obj)
4776{
4777 rb_objspace_t *objspace = &rb_objspace;
4778 return is_markable_object(obj) && is_live_object(objspace, obj);
4779}
4780
4781int
4782rb_objspace_garbage_object_p(VALUE obj)
4783{
4784 rb_objspace_t *objspace = &rb_objspace;
4785 return is_garbage_object(objspace, obj);
4786}
4787
4788bool
4789rb_gc_is_ptr_to_obj(void *ptr)
4790{
4791 rb_objspace_t *objspace = &rb_objspace;
4792 return is_pointer_to_heap(objspace, ptr);
4793}
4794
4795VALUE
4796rb_gc_id2ref_obj_tbl(VALUE objid)
4797{
4798 rb_objspace_t *objspace = &rb_objspace;
4799
4800 VALUE orig;
4801 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4802 return orig;
4803 }
4804 else {
4805 return Qundef;
4806 }
4807}
4808
4809/*
4810 * call-seq:
4811 * ObjectSpace._id2ref(object_id) -> an_object
4812 *
4813 * Converts an object id to a reference to the object. May not be
4814 * called on an object id passed as a parameter to a finalizer.
4815 *
4816 * s = "I am a string" #=> "I am a string"
4817 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4818 * r == s #=> true
4819 *
4820 * On multi-ractor mode, if the object is not shareable, it raises
4821 * RangeError.
4822 */
4823
4824static VALUE
4825id2ref(VALUE objid)
4826{
4827#if SIZEOF_LONG == SIZEOF_VOIDP
4828#define NUM2PTR(x) NUM2ULONG(x)
4829#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4830#define NUM2PTR(x) NUM2ULL(x)
4831#endif
4832 rb_objspace_t *objspace = &rb_objspace;
4833 VALUE ptr;
4834 VALUE orig;
4835 void *p0;
4836
4837 objid = rb_to_int(objid);
4838 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4839 ptr = NUM2PTR(objid);
4840 if (ptr == Qtrue) return Qtrue;
4841 if (ptr == Qfalse) return Qfalse;
4842 if (NIL_P(ptr)) return Qnil;
4843 if (FIXNUM_P(ptr)) return (VALUE)ptr;
4844 if (FLONUM_P(ptr)) return (VALUE)ptr;
4845
4846 ptr = obj_id_to_ref(objid);
4847 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
4848 ID symid = ptr / sizeof(RVALUE);
4849 p0 = (void *)ptr;
4850 if (!rb_static_id_valid_p(symid))
4851 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
4852 return ID2SYM(symid);
4853 }
4854 }
4855
4856 if (!UNDEF_P(orig = rb_gc_id2ref_obj_tbl(objid)) &&
4857 is_live_object(objspace, orig)) {
4858
4859 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4860 return orig;
4861 }
4862 else {
4863 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4864 }
4865 }
4866
4867 if (rb_int_ge(objid, objspace->next_object_id)) {
4868 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4869 }
4870 else {
4871 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4872 }
4873}
4874
4875/* :nodoc: */
4876static VALUE
4877os_id2ref(VALUE os, VALUE objid)
4878{
4879 return id2ref(objid);
4880}
4881
4882static VALUE
4883rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4884{
4885 if (STATIC_SYM_P(obj)) {
4886 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4887 }
4888 else if (FLONUM_P(obj)) {
4889#if SIZEOF_LONG == SIZEOF_VOIDP
4890 return LONG2NUM((SIGNED_VALUE)obj);
4891#else
4892 return LL2NUM((SIGNED_VALUE)obj);
4893#endif
4894 }
4895 else if (SPECIAL_CONST_P(obj)) {
4896 return LONG2NUM((SIGNED_VALUE)obj);
4897 }
4898
4899 return get_heap_object_id(obj);
4900}
4901
4902static VALUE
4903cached_object_id(VALUE obj)
4904{
4905 VALUE id;
4906 rb_objspace_t *objspace = &rb_objspace;
4907
4908 RB_VM_LOCK_ENTER();
4909 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4910 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
4911 }
4912 else {
4913 GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
4914
4915 id = objspace->next_object_id;
4916 objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
4917
4918 VALUE already_disabled = rb_gc_disable_no_rest();
4919 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4920 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4921 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4922 FL_SET(obj, FL_SEEN_OBJ_ID);
4923 }
4924 RB_VM_LOCK_LEAVE();
4925
4926 return id;
4927}
4928
4929static VALUE
4930nonspecial_obj_id(VALUE obj)
4931{
4932#if SIZEOF_LONG == SIZEOF_VOIDP
4933 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
4934#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4935 return LL2NUM((SIGNED_VALUE)(obj) / 2);
4936#else
4937# error not supported
4938#endif
4939}
4940
4941VALUE
4942rb_memory_id(VALUE obj)
4943{
4944 return rb_find_object_id(obj, nonspecial_obj_id);
4945}
4946
4947/*
4948 * Document-method: __id__
4949 * Document-method: object_id
4950 *
4951 * call-seq:
4952 * obj.__id__ -> integer
4953 * obj.object_id -> integer
4954 *
4955 * Returns an integer identifier for +obj+.
4956 *
4957 * The same number will be returned on all calls to +object_id+ for a given
4958 * object, and no two active objects will share an id.
4959 *
4960 * Note: that some objects of builtin classes are reused for optimization.
4961 * This is the case for immediate values and frozen string literals.
4962 *
4963 * BasicObject implements +__id__+, Kernel implements +object_id+.
4964 *
4965 * Immediate values are not passed by reference but are passed by value:
4966 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4967 *
4968 * Object.new.object_id == Object.new.object_id # => false
4969 * (21 * 2).object_id == (21 * 2).object_id # => true
4970 * "hello".object_id == "hello".object_id # => false
4971 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4972 */
4973
4974VALUE
4975rb_obj_id(VALUE obj)
4976{
4977 /*
4978 * 32-bit VALUE space
4979 * MSB ------------------------ LSB
4980 * false 00000000000000000000000000000000
4981 * true 00000000000000000000000000000010
4982 * nil 00000000000000000000000000000100
4983 * undef 00000000000000000000000000000110
4984 * symbol ssssssssssssssssssssssss00001110
4985 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4986 * fixnum fffffffffffffffffffffffffffffff1
4987 *
4988 * object_id space
4989 * LSB
4990 * false 00000000000000000000000000000000
4991 * true 00000000000000000000000000000010
4992 * nil 00000000000000000000000000000100
4993 * undef 00000000000000000000000000000110
4994 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4995 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4996 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4997 *
4998 * where A = sizeof(RVALUE)/4
4999 *
5000 * sizeof(RVALUE) is
5001 * 20 if 32-bit, double is 4-byte aligned
5002 * 24 if 32-bit, double is 8-byte aligned
5003 * 40 if 64-bit
5004 */
5005
5006 return rb_find_object_id(obj, cached_object_id);
5007}
5008
5009static enum rb_id_table_iterator_result
5010cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
5011{
5012 size_t *total_size = data_ptr;
5013 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
5014 *total_size += sizeof(*ccs);
5015 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
5016 return ID_TABLE_CONTINUE;
5017}
5018
5019static size_t
5020cc_table_memsize(struct rb_id_table *cc_table)
5021{
5022 size_t total = rb_id_table_memsize(cc_table);
5023 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
5024 return total;
5025}
5026
5027static size_t
5028obj_memsize_of(VALUE obj, int use_all_types)
5029{
5030 size_t size = 0;
5031
5032 if (SPECIAL_CONST_P(obj)) {
5033 return 0;
5034 }
5035
5036 if (FL_TEST(obj, FL_EXIVAR)) {
5037 size += rb_generic_ivar_memsize(obj);
5038 }
5039
5040 switch (BUILTIN_TYPE(obj)) {
5041 case T_OBJECT:
5042 if (rb_shape_obj_too_complex(obj)) {
5043 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
5044 }
5045 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
5046 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
5047 }
5048 break;
5049 case T_MODULE:
5050 case T_CLASS:
5051 if (RCLASS_M_TBL(obj)) {
5052 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5053 }
5054 // class IV sizes are allocated as powers of two
5055 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
5056 if (RCLASS_CVC_TBL(obj)) {
5057 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
5058 }
5059 if (RCLASS_EXT(obj)->const_tbl) {
5060 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
5061 }
5062 if (RCLASS_CC_TBL(obj)) {
5063 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5064 }
5065 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
5066 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
5067 }
5068 break;
5069 case T_ICLASS:
5070 if (RICLASS_OWNS_M_TBL_P(obj)) {
5071 if (RCLASS_M_TBL(obj)) {
5072 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
5073 }
5074 }
5075 if (RCLASS_CC_TBL(obj)) {
5076 size += cc_table_memsize(RCLASS_CC_TBL(obj));
5077 }
5078 break;
5079 case T_STRING:
5080 size += rb_str_memsize(obj);
5081 break;
5082 case T_ARRAY:
5083 size += rb_ary_memsize(obj);
5084 break;
5085 case T_HASH:
5086 if (RHASH_ST_TABLE_P(obj)) {
5087 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
5088 /* st_table is in the slot */
5089 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
5090 }
5091 break;
5092 case T_REGEXP:
5093 if (RREGEXP_PTR(obj)) {
5094 size += onig_memsize(RREGEXP_PTR(obj));
5095 }
5096 break;
5097 case T_DATA:
5098 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
5099 break;
5100 case T_MATCH:
5101 {
5102 rb_matchext_t *rm = RMATCH_EXT(obj);
5103 size += onig_region_memsize(&rm->regs);
5104 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
5105 }
5106 break;
5107 case T_FILE:
5108 if (RFILE(obj)->fptr) {
5109 size += rb_io_memsize(RFILE(obj)->fptr);
5110 }
5111 break;
5112 case T_RATIONAL:
5113 case T_COMPLEX:
5114 break;
5115 case T_IMEMO:
5116 size += imemo_memsize(obj);
5117 break;
5118
5119 case T_FLOAT:
5120 case T_SYMBOL:
5121 break;
5122
5123 case T_BIGNUM:
5124 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
5125 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
5126 }
5127 break;
5128
5129 case T_NODE:
5130 UNEXPECTED_NODE(obj_memsize_of);
5131 break;
5132
5133 case T_STRUCT:
5134 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
5135 RSTRUCT(obj)->as.heap.ptr) {
5136 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
5137 }
5138 break;
5139
5140 case T_ZOMBIE:
5141 case T_MOVED:
5142 break;
5143
5144 default:
5145 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
5146 BUILTIN_TYPE(obj), (void*)obj);
5147 }
5148
5149 return size + rb_gc_obj_slot_size(obj);
5150}
5151
5152size_t
5153rb_obj_memsize_of(VALUE obj)
5154{
5155 return obj_memsize_of(obj, TRUE);
5156}
5157
5158static int
5159set_zero(st_data_t key, st_data_t val, st_data_t arg)
5160{
5161 VALUE k = (VALUE)key;
5162 VALUE hash = (VALUE)arg;
5163 rb_hash_aset(hash, k, INT2FIX(0));
5164 return ST_CONTINUE;
5165}
5166
5167static VALUE
5168type_sym(size_t type)
5169{
5170 switch (type) {
5171#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5172 COUNT_TYPE(T_NONE);
5173 COUNT_TYPE(T_OBJECT);
5174 COUNT_TYPE(T_CLASS);
5175 COUNT_TYPE(T_MODULE);
5176 COUNT_TYPE(T_FLOAT);
5177 COUNT_TYPE(T_STRING);
5178 COUNT_TYPE(T_REGEXP);
5179 COUNT_TYPE(T_ARRAY);
5180 COUNT_TYPE(T_HASH);
5181 COUNT_TYPE(T_STRUCT);
5182 COUNT_TYPE(T_BIGNUM);
5183 COUNT_TYPE(T_FILE);
5184 COUNT_TYPE(T_DATA);
5185 COUNT_TYPE(T_MATCH);
5186 COUNT_TYPE(T_COMPLEX);
5187 COUNT_TYPE(T_RATIONAL);
5188 COUNT_TYPE(T_NIL);
5189 COUNT_TYPE(T_TRUE);
5190 COUNT_TYPE(T_FALSE);
5191 COUNT_TYPE(T_SYMBOL);
5192 COUNT_TYPE(T_FIXNUM);
5193 COUNT_TYPE(T_IMEMO);
5194 COUNT_TYPE(T_UNDEF);
5195 COUNT_TYPE(T_NODE);
5196 COUNT_TYPE(T_ICLASS);
5197 COUNT_TYPE(T_ZOMBIE);
5198 COUNT_TYPE(T_MOVED);
5199#undef COUNT_TYPE
5200 default: return SIZET2NUM(type); break;
5201 }
5202}
5203
5204/*
5205 * call-seq:
5206 * ObjectSpace.count_objects([result_hash]) -> hash
5207 *
5208 * Counts all objects grouped by type.
5209 *
5210 * It returns a hash, such as:
5211 * {
5212 * :TOTAL=>10000,
5213 * :FREE=>3011,
5214 * :T_OBJECT=>6,
5215 * :T_CLASS=>404,
5216 * # ...
5217 * }
5218 *
5219 * The contents of the returned hash are implementation specific.
5220 * It may be changed in future.
5221 *
5222 * The keys starting with +:T_+ means live objects.
5223 * For example, +:T_ARRAY+ is the number of arrays.
5224 * +:FREE+ means object slots which is not used now.
5225 * +:TOTAL+ means sum of above.
5226 *
5227 * If the optional argument +result_hash+ is given,
5228 * it is overwritten and returned. This is intended to avoid probe effect.
5229 *
5230 * h = {}
5231 * ObjectSpace.count_objects(h)
5232 * puts h
5233 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
5234 *
5235 * This method is only expected to work on C Ruby.
5236 *
5237 */
5238
5239static VALUE
5240count_objects(int argc, VALUE *argv, VALUE os)
5241{
5242 rb_objspace_t *objspace = &rb_objspace;
5243 size_t counts[T_MASK+1];
5244 size_t freed = 0;
5245 size_t total = 0;
5246 size_t i;
5247 VALUE hash = Qnil;
5248
5249 if (rb_check_arity(argc, 0, 1) == 1) {
5250 hash = argv[0];
5251 if (!RB_TYPE_P(hash, T_HASH))
5252 rb_raise(rb_eTypeError, "non-hash given");
5253 }
5254
5255 for (i = 0; i <= T_MASK; i++) {
5256 counts[i] = 0;
5257 }
5258
5259 for (i = 0; i < heap_allocated_pages; i++) {
5260 struct heap_page *page = heap_pages_sorted[i];
5261 short stride = page->slot_size;
5262
5263 uintptr_t p = (uintptr_t)page->start;
5264 uintptr_t pend = p + page->total_slots * stride;
5265 for (;p < pend; p += stride) {
5266 VALUE vp = (VALUE)p;
5267 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5268
5269 void *poisoned = asan_unpoison_object_temporary(vp);
5270 if (RANY(p)->as.basic.flags) {
5271 counts[BUILTIN_TYPE(vp)]++;
5272 }
5273 else {
5274 freed++;
5275 }
5276 if (poisoned) {
5277 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
5278 asan_poison_object(vp);
5279 }
5280 }
5281 total += page->total_slots;
5282 }
5283
5284 if (NIL_P(hash)) {
5285 hash = rb_hash_new();
5286 }
5287 else if (!RHASH_EMPTY_P(hash)) {
5288 rb_hash_stlike_foreach(hash, set_zero, hash);
5289 }
5290 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
5291 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
5292
5293 for (i = 0; i <= T_MASK; i++) {
5294 VALUE type = type_sym(i);
5295 if (counts[i])
5296 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
5297 }
5298
5299 return hash;
5300}
5301
5302/*
5303 ------------------------ Garbage Collection ------------------------
5304*/
5305
5306/* Sweeping */
5307
5308static size_t
5309objspace_available_slots(rb_objspace_t *objspace)
5310{
5311 size_t total_slots = 0;
5312 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5313 rb_size_pool_t *size_pool = &size_pools[i];
5314 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5315 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5316 }
5317 return total_slots;
5318}
5319
5320static size_t
5321objspace_live_slots(rb_objspace_t *objspace)
5322{
5323 return total_allocated_objects(objspace) - total_freed_objects(objspace) - heap_pages_final_slots;
5324}
5325
5326static size_t
5327objspace_free_slots(rb_objspace_t *objspace)
5328{
5329 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5330}
5331
5332static void
5333gc_setup_mark_bits(struct heap_page *page)
5334{
5335 /* copy oldgen bitmap to mark bitmap */
5336 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5337}
5338
5339static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
5340static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size);
5341
5342#if defined(_WIN32)
5343enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5344
5345static BOOL
5346protect_page_body(struct heap_page_body *body, DWORD protect)
5347{
5348 DWORD old_protect;
5349 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5350}
5351#else
5352enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5353#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5354#endif
5355
5356static void
5357lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5358{
5359 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5360 rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
5361 }
5362 else {
5363 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
5364 }
5365}
5366
5367static void
5368unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5369{
5370 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5371 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
5372 }
5373 else {
5374 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
5375 }
5376}
5377
5378static bool
5379try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
5380{
5381 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5382
5383 struct heap_page *src_page = GET_HEAP_PAGE(src);
5384 if (!free_page) {
5385 return false;
5386 }
5387
5388 /* We should return true if either src is successfully moved, or src is
5389 * unmoveable. A false return will cause the sweeping cursor to be
5390 * incremented to the next page, and src will attempt to move again */
5391 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5392
5393 asan_unlock_freelist(free_page);
5394 VALUE dest = (VALUE)free_page->freelist;
5395 asan_lock_freelist(free_page);
5396 asan_unpoison_object(dest, false);
5397 if (!dest) {
5398 /* if we can't get something from the freelist then the page must be
5399 * full */
5400 return false;
5401 }
5402 asan_unlock_freelist(free_page);
5403 free_page->freelist = RANY(dest)->as.free.next;
5404 asan_lock_freelist(free_page);
5405
5406 GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
5407
5408 if (src_page->slot_size > free_page->slot_size) {
5409 objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
5410 }
5411 else if (free_page->slot_size > src_page->slot_size) {
5412 objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
5413 }
5414 objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
5415 objspace->rcompactor.total_moved++;
5416
5417 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5418 gc_pin(objspace, src);
5419 free_page->free_slots--;
5420
5421 return true;
5422}
5423
5424static void
5425gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
5426{
5427 struct heap_page *cursor = heap->compact_cursor;
5428
5429 while (cursor) {
5430 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5431 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5432 }
5433}
5434
5435static void gc_update_references(rb_objspace_t * objspace);
5436#if GC_CAN_COMPILE_COMPACTION
5437static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
5438#endif
5439
5440#if defined(__MINGW32__) || defined(_WIN32)
5441# define GC_COMPACTION_SUPPORTED 1
5442#else
5443/* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
5444 * the read barrier, so we must disable compaction. */
5445# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5446#endif
5447
5448#if GC_CAN_COMPILE_COMPACTION
5449static void
5450read_barrier_handler(uintptr_t original_address)
5451{
5452 VALUE obj;
5453 rb_objspace_t * objspace = &rb_objspace;
5454
5455 /* Calculate address aligned to slots. */
5456 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5457
5458 obj = (VALUE)address;
5459
5460 struct heap_page_body *page_body = GET_PAGE_BODY(obj);
5461
5462 /* If the page_body is NULL, then mprotect cannot handle it and will crash
5463 * with "Cannot allocate memory". */
5464 if (page_body == NULL) {
5465 rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address);
5466 }
5467
5468 RB_VM_LOCK_ENTER();
5469 {
5470 unlock_page_body(objspace, page_body);
5471
5472 objspace->profile.read_barrier_faults++;
5473
5474 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5475 }
5476 RB_VM_LOCK_LEAVE();
5477}
5478#endif
5479
5480#if !GC_CAN_COMPILE_COMPACTION
5481static void
5482uninstall_handlers(void)
5483{
5484 /* no-op */
5485}
5486
5487static void
5488install_handlers(void)
5489{
5490 /* no-op */
5491}
5492#elif defined(_WIN32)
5493static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5494typedef void (*signal_handler)(int);
5495static signal_handler old_sigsegv_handler;
5496
5497static LONG WINAPI
5498read_barrier_signal(EXCEPTION_POINTERS * info)
5499{
5500 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5501 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5502 /* > The second array element specifies the virtual address of the inaccessible data.
5503 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5504 *
5505 * Use this address to invalidate the page */
5506 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5507 return EXCEPTION_CONTINUE_EXECUTION;
5508 }
5509 else {
5510 return EXCEPTION_CONTINUE_SEARCH;
5511 }
5512}
5513
5514static void
5515uninstall_handlers(void)
5516{
5517 signal(SIGSEGV, old_sigsegv_handler);
5518 SetUnhandledExceptionFilter(old_handler);
5519}
5520
5521static void
5522install_handlers(void)
5523{
5524 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5525 old_sigsegv_handler = signal(SIGSEGV, NULL);
5526 /* Unhandled Exception Filter has access to the violation address similar
5527 * to si_addr from sigaction */
5528 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5529}
5530#else
5531static struct sigaction old_sigbus_handler;
5532static struct sigaction old_sigsegv_handler;
5533
5534#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5535static exception_mask_t old_exception_masks[32];
5536static mach_port_t old_exception_ports[32];
5537static exception_behavior_t old_exception_behaviors[32];
5538static thread_state_flavor_t old_exception_flavors[32];
5539static mach_msg_type_number_t old_exception_count;
5540
5541static void
5542disable_mach_bad_access_exc(void)
5543{
5544 old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
5545 task_swap_exception_ports(
5546 mach_task_self(), EXC_MASK_BAD_ACCESS,
5547 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5548 old_exception_masks, &old_exception_count,
5549 old_exception_ports, old_exception_behaviors, old_exception_flavors
5550 );
5551}
5552
5553static void
5554restore_mach_bad_access_exc(void)
5555{
5556 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5557 task_set_exception_ports(
5558 mach_task_self(),
5559 old_exception_masks[i], old_exception_ports[i],
5560 old_exception_behaviors[i], old_exception_flavors[i]
5561 );
5562 }
5563}
5564#endif
5565
5566static void
5567read_barrier_signal(int sig, siginfo_t * info, void * data)
5568{
5569 // setup SEGV/BUS handlers for errors
5570 struct sigaction prev_sigbus, prev_sigsegv;
5571 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5572 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5573
5574 // enable SIGBUS/SEGV
5575 sigset_t set, prev_set;
5576 sigemptyset(&set);
5577 sigaddset(&set, SIGBUS);
5578 sigaddset(&set, SIGSEGV);
5579 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5580#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5581 disable_mach_bad_access_exc();
5582#endif
5583 // run handler
5584 read_barrier_handler((uintptr_t)info->si_addr);
5585
5586 // reset SEGV/BUS handlers
5587#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5588 restore_mach_bad_access_exc();
5589#endif
5590 sigaction(SIGBUS, &prev_sigbus, NULL);
5591 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5592 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5593}
5594
5595static void
5596uninstall_handlers(void)
5597{
5598#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5599 restore_mach_bad_access_exc();
5600#endif
5601 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5602 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5603}
5604
5605static void
5606install_handlers(void)
5607{
5608 struct sigaction action;
5609 memset(&action, 0, sizeof(struct sigaction));
5610 sigemptyset(&action.sa_mask);
5611 action.sa_sigaction = read_barrier_signal;
5612 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5613
5614 sigaction(SIGBUS, &action, &old_sigbus_handler);
5615 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5616#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5617 disable_mach_bad_access_exc();
5618#endif
5619}
5620#endif
5621
5622static void
5623gc_compact_finish(rb_objspace_t *objspace)
5624{
5625 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5626 rb_size_pool_t *size_pool = &size_pools[i];
5627 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5628 gc_unprotect_pages(objspace, heap);
5629 }
5630
5631 uninstall_handlers();
5632
5633 gc_update_references(objspace);
5634 objspace->profile.compact_count++;
5635
5636 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5637 rb_size_pool_t *size_pool = &size_pools[i];
5638 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5639 heap->compact_cursor = NULL;
5640 heap->free_pages = NULL;
5641 heap->compact_cursor_index = 0;
5642 }
5643
5644 if (gc_prof_enabled(objspace)) {
5645 gc_profile_record *record = gc_prof_record(objspace);
5646 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5647 }
5648 objspace->flags.during_compacting = FALSE;
5649}
5650
5652 struct heap_page *page;
5653 int final_slots;
5654 int freed_slots;
5655 int empty_slots;
5656};
5657
5658static inline void
5659gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
5660{
5661 struct heap_page * sweep_page = ctx->page;
5662 short slot_size = sweep_page->slot_size;
5663 short slot_bits = slot_size / BASE_SLOT_SIZE;
5664 GC_ASSERT(slot_bits > 0);
5665
5666 do {
5667 VALUE vp = (VALUE)p;
5668 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5669
5670 asan_unpoison_object(vp, false);
5671 if (bitset & 1) {
5672 switch (BUILTIN_TYPE(vp)) {
5673 default: /* majority case */
5674 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
5675#if RGENGC_CHECK_MODE
5676 if (!is_full_marking(objspace)) {
5677 if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
5678 if (RVALUE_REMEMBERED(vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
5679 }
5680#endif
5681 if (obj_free(objspace, vp)) {
5682 // always add free slots back to the swept pages freelist,
5683 // so that if we're comapacting, we can re-use the slots
5684 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
5685 heap_page_add_freeobj(objspace, sweep_page, vp);
5686 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5687 ctx->freed_slots++;
5688 }
5689 else {
5690 ctx->final_slots++;
5691 }
5692 break;
5693
5694 case T_MOVED:
5695 if (objspace->flags.during_compacting) {
5696 /* The sweep cursor shouldn't have made it to any
5697 * T_MOVED slots while the compact flag is enabled.
5698 * The sweep cursor and compact cursor move in
5699 * opposite directions, and when they meet references will
5700 * get updated and "during_compacting" should get disabled */
5701 rb_bug("T_MOVED shouldn't be seen until compaction is finished");
5702 }
5703 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5704 ctx->empty_slots++;
5705 heap_page_add_freeobj(objspace, sweep_page, vp);
5706 break;
5707 case T_ZOMBIE:
5708 /* already counted */
5709 break;
5710 case T_NONE:
5711 ctx->empty_slots++; /* already freed */
5712 break;
5713 }
5714 }
5715 p += slot_size;
5716 bitset >>= slot_bits;
5717 } while (bitset);
5718}
5719
5720static inline void
5721gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
5722{
5723 struct heap_page *sweep_page = ctx->page;
5724 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5725
5726 uintptr_t p;
5727 bits_t *bits, bitset;
5728
5729 gc_report(2, objspace, "page_sweep: start.\n");
5730
5731#if RGENGC_CHECK_MODE
5732 if (!objspace->flags.immediate_sweep) {
5733 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5734 }
5735#endif
5736 sweep_page->flags.before_sweep = FALSE;
5737 sweep_page->free_slots = 0;
5738
5739 p = (uintptr_t)sweep_page->start;
5740 bits = sweep_page->mark_bits;
5741
5742 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5743 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5744 if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
5745 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5746 }
5747
5748 /* The last bitmap plane may not be used if the last plane does not
5749 * have enough space for the slot_size. In that case, the last plane must
5750 * be skipped since none of the bits will be set. */
5751 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5752 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5753 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5754
5755 // Skip out of range slots at the head of the page
5756 bitset = ~bits[0];
5757 bitset >>= NUM_IN_PAGE(p);
5758 if (bitset) {
5759 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5760 }
5761 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5762
5763 for (int i = 1; i < bitmap_plane_count; i++) {
5764 bitset = ~bits[i];
5765 if (bitset) {
5766 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5767 }
5768 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5769 }
5770
5771 if (!heap->compact_cursor) {
5772 gc_setup_mark_bits(sweep_page);
5773 }
5774
5775#if GC_PROFILE_MORE_DETAIL
5776 if (gc_prof_enabled(objspace)) {
5777 gc_profile_record *record = gc_prof_record(objspace);
5778 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5779 record->empty_objects += ctx->empty_slots;
5780 }
5781#endif
5782 if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5783 rb_gc_count(),
5784 sweep_page->total_slots,
5785 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5786
5787 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5788 sweep_page->size_pool->total_freed_objects += ctx->freed_slots;
5789
5790 if (heap_pages_deferred_final && !finalizing) {
5791 rb_thread_t *th = GET_THREAD();
5792 if (th) {
5793 gc_finalize_deferred_register(objspace);
5794 }
5795 }
5796
5797#if RGENGC_CHECK_MODE
5798 short freelist_len = 0;
5799 asan_unlock_freelist(sweep_page);
5800 RVALUE *ptr = sweep_page->freelist;
5801 while (ptr) {
5802 freelist_len++;
5803 ptr = ptr->as.free.next;
5804 }
5805 asan_lock_freelist(sweep_page);
5806 if (freelist_len != sweep_page->free_slots) {
5807 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5808 }
5809#endif
5810
5811 gc_report(2, objspace, "page_sweep: end.\n");
5812}
5813
5814static const char *
5815gc_mode_name(enum gc_mode mode)
5816{
5817 switch (mode) {
5818 case gc_mode_none: return "none";
5819 case gc_mode_marking: return "marking";
5820 case gc_mode_sweeping: return "sweeping";
5821 case gc_mode_compacting: return "compacting";
5822 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5823 }
5824}
5825
5826static void
5827gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5828{
5829#if RGENGC_CHECK_MODE
5830 enum gc_mode prev_mode = gc_mode(objspace);
5831 switch (prev_mode) {
5832 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5833 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
5834 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
5835 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
5836 }
5837#endif
5838 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5839 gc_mode_set(objspace, mode);
5840}
5841
5842static void
5843heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
5844{
5845 if (freelist) {
5846 asan_unlock_freelist(page);
5847 if (page->freelist) {
5848 RVALUE *p = page->freelist;
5849 asan_unpoison_object((VALUE)p, false);
5850 while (p->as.free.next) {
5851 RVALUE *prev = p;
5852 p = p->as.free.next;
5853 asan_poison_object((VALUE)prev);
5854 asan_unpoison_object((VALUE)p, false);
5855 }
5856 p->as.free.next = freelist;
5857 asan_poison_object((VALUE)p);
5858 }
5859 else {
5860 page->freelist = freelist;
5861 }
5862 asan_lock_freelist(page);
5863 }
5864}
5865
5866static void
5867gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5868{
5869 heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
5870 heap->free_pages = NULL;
5871 heap->pooled_pages = NULL;
5872 if (!objspace->flags.immediate_sweep) {
5873 struct heap_page *page = NULL;
5874
5875 ccan_list_for_each(&heap->pages, page, page_node) {
5876 page->flags.before_sweep = TRUE;
5877 }
5878 }
5879}
5880
5881#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5882__attribute__((noinline))
5883#endif
5884
5885#if GC_CAN_COMPILE_COMPACTION
5886static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func);
5887static int compare_pinned_slots(const void *left, const void *right, void *d);
5888#endif
5889
5890static void
5891gc_sweep_start(rb_objspace_t *objspace)
5892{
5893 gc_mode_transition(objspace, gc_mode_sweeping);
5894 objspace->rincgc.pooled_slots = 0;
5895
5896#if GC_CAN_COMPILE_COMPACTION
5897 if (objspace->flags.during_compacting) {
5898 gc_sort_heap_by_compare_func(
5899 objspace,
5900 objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
5901 );
5902 }
5903#endif
5904
5905 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5906 rb_size_pool_t *size_pool = &size_pools[i];
5907 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5908
5909 gc_sweep_start_heap(objspace, heap);
5910
5911 /* We should call gc_sweep_finish_size_pool for size pools with no pages. */
5912 if (heap->sweeping_page == NULL) {
5913 GC_ASSERT(heap->total_pages == 0);
5914 GC_ASSERT(heap->total_slots == 0);
5915 gc_sweep_finish_size_pool(objspace, size_pool);
5916 }
5917 }
5918
5919 rb_ractor_t *r = NULL;
5920 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5921 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5922 }
5923}
5924
5925static void
5926gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
5927{
5928 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5929 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5930 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5931 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5932
5933 size_t init_slots = gc_params.size_pool_init_slots[size_pool - size_pools];
5934 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
5935
5936 /* If we don't have enough slots and we have pages on the tomb heap, move
5937 * pages from the tomb heap to the eden heap. This may prevent page
5938 * creation thrashing (frequently allocating and deallocting pages) and
5939 * GC thrashing (running GC more frequently than required). */
5940 struct heap_page *resurrected_page;
5941 while (swept_slots < min_free_slots &&
5942 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5943 swept_slots += resurrected_page->free_slots;
5944
5945 heap_add_page(objspace, size_pool, heap, resurrected_page);
5946 heap_add_freepage(heap, resurrected_page);
5947 }
5948
5949 if (swept_slots < min_free_slots) {
5950 bool grow_heap = is_full_marking(objspace);
5951
5952 /* Consider growing or starting a major GC if we are not currently in a
5953 * major GC and we can't allocate any more pages. */
5954 if (!is_full_marking(objspace) && size_pool->allocatable_pages == 0) {
5955 /* The heap is a growth heap if it freed more slots than had empty slots. */
5956 bool is_growth_heap = size_pool->empty_slots == 0 || size_pool->freed_slots > size_pool->empty_slots;
5957
5958 /* Grow this heap if we haven't run at least RVALUE_OLD_AGE minor
5959 * GC since the last major GC or if this heap is smaller than the
5960 * the configured initial size. */
5961 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE ||
5962 total_slots < init_slots) {
5963 grow_heap = TRUE;
5964 }
5965 else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
5966 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5967 size_pool->force_major_gc_count++;
5968 }
5969 }
5970
5971 if (grow_heap) {
5972 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5973
5974 if (extend_page_count > size_pool->allocatable_pages) {
5975 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5976 }
5977 }
5978 }
5979}
5980
5981static void
5982gc_sweep_finish(rb_objspace_t *objspace)
5983{
5984 gc_report(1, objspace, "gc_sweep_finish\n");
5985
5986 gc_prof_set_heap_info(objspace);
5987 heap_pages_free_unused_pages(objspace);
5988
5989 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5990 rb_size_pool_t *size_pool = &size_pools[i];
5991
5992 /* if heap_pages has unused pages, then assign them to increment */
5993 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5994 if (size_pool->allocatable_pages < tomb_pages) {
5995 size_pool->allocatable_pages = tomb_pages;
5996 }
5997
5998 size_pool->freed_slots = 0;
5999 size_pool->empty_slots = 0;
6000
6001 if (!will_be_incremental_marking(objspace)) {
6002 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
6003 struct heap_page *end_page = eden_heap->free_pages;
6004 if (end_page) {
6005 while (end_page->free_next) end_page = end_page->free_next;
6006 end_page->free_next = eden_heap->pooled_pages;
6007 }
6008 else {
6009 eden_heap->free_pages = eden_heap->pooled_pages;
6010 }
6011 eden_heap->pooled_pages = NULL;
6012 objspace->rincgc.pooled_slots = 0;
6013 }
6014 }
6015 heap_pages_expand_sorted(objspace);
6016
6017 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
6018 gc_mode_transition(objspace, gc_mode_none);
6019
6020#if RGENGC_CHECK_MODE >= 2
6021 gc_verify_internal_consistency(objspace);
6022#endif
6023}
6024
6025static int
6026gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
6027{
6028 struct heap_page *sweep_page = heap->sweeping_page;
6029 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
6030 int swept_slots = 0;
6031 int pooled_slots = 0;
6032
6033 if (sweep_page == NULL) return FALSE;
6034
6035#if GC_ENABLE_LAZY_SWEEP
6036 gc_prof_sweep_timer_start(objspace);
6037#endif
6038
6039 do {
6040 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
6041
6042 struct gc_sweep_context ctx = {
6043 .page = sweep_page,
6044 .final_slots = 0,
6045 .freed_slots = 0,
6046 .empty_slots = 0,
6047 };
6048 gc_sweep_page(objspace, heap, &ctx);
6049 int free_slots = ctx.freed_slots + ctx.empty_slots;
6050
6051 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
6052
6053 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
6054 heap_pages_freeable_pages > 0 &&
6055 unlink_limit > 0) {
6056 heap_pages_freeable_pages--;
6057 unlink_limit--;
6058 /* there are no living objects -> move this page to tomb heap */
6059 heap_unlink_page(objspace, heap, sweep_page);
6060 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
6061 }
6062 else if (free_slots > 0) {
6063 size_pool->freed_slots += ctx.freed_slots;
6064 size_pool->empty_slots += ctx.empty_slots;
6065
6066 if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
6067 heap_add_poolpage(objspace, heap, sweep_page);
6068 pooled_slots += free_slots;
6069 }
6070 else {
6071 heap_add_freepage(heap, sweep_page);
6072 swept_slots += free_slots;
6073 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6074 break;
6075 }
6076 }
6077 }
6078 else {
6079 sweep_page->free_next = NULL;
6080 }
6081 } while ((sweep_page = heap->sweeping_page));
6082
6083 if (!heap->sweeping_page) {
6084 gc_sweep_finish_size_pool(objspace, size_pool);
6085
6086 if (!has_sweeping_pages(objspace)) {
6087 gc_sweep_finish(objspace);
6088 }
6089 }
6090
6091#if GC_ENABLE_LAZY_SWEEP
6092 gc_prof_sweep_timer_stop(objspace);
6093#endif
6094
6095 return heap->free_pages != NULL;
6096}
6097
6098static void
6099gc_sweep_rest(rb_objspace_t *objspace)
6100{
6101 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6102 rb_size_pool_t *size_pool = &size_pools[i];
6103
6104 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6105 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6106 }
6107 }
6108}
6109
6110static void
6111gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
6112{
6113 GC_ASSERT(dont_gc_val() == FALSE);
6114 if (!GC_ENABLE_LAZY_SWEEP) return;
6115
6116 gc_sweeping_enter(objspace);
6117
6118 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6119 rb_size_pool_t *size_pool = &size_pools[i];
6120 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6121 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
6122 if (size_pool == sweep_size_pool) {
6123 if (size_pool->allocatable_pages > 0) {
6124 heap_increment(objspace, size_pool, heap);
6125 }
6126 else {
6127 /* Not allowed to create a new page so finish sweeping. */
6128 gc_sweep_rest(objspace);
6129 break;
6130 }
6131 }
6132 }
6133 }
6134
6135 gc_sweeping_exit(objspace);
6136}
6137
6138#if GC_CAN_COMPILE_COMPACTION
6139static void
6140invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
6141{
6142 if (bitset) {
6143 do {
6144 if (bitset & 1) {
6145 VALUE forwarding_object = (VALUE)p;
6146 VALUE object;
6147
6148 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
6149 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6150 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6151
6152 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6153
6154 object = rb_gc_location(forwarding_object);
6155
6156 shape_id_t original_shape_id = 0;
6157 if (RB_TYPE_P(object, T_OBJECT)) {
6158 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6159 }
6160
6161 gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
6162 /* forwarding_object is now our actual object, and "object"
6163 * is the free slot for the original page */
6164
6165 if (original_shape_id) {
6166 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6167 }
6168
6169 struct heap_page *orig_page = GET_HEAP_PAGE(object);
6170 orig_page->free_slots++;
6171 heap_page_add_freeobj(objspace, orig_page, object);
6172
6173 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6174 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
6175 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
6176 }
6177 }
6178 p += BASE_SLOT_SIZE;
6179 bitset >>= 1;
6180 } while (bitset);
6181 }
6182}
6183
6184static void
6185invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
6186{
6187 int i;
6188 bits_t *mark_bits, *pin_bits;
6189 bits_t bitset;
6190
6191 mark_bits = page->mark_bits;
6192 pin_bits = page->pinned_bits;
6193
6194 uintptr_t p = page->start;
6195
6196 // Skip out of range slots at the head of the page
6197 bitset = pin_bits[0] & ~mark_bits[0];
6198 bitset >>= NUM_IN_PAGE(p);
6199 invalidate_moved_plane(objspace, page, p, bitset);
6200 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6201
6202 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6203 /* Moved objects are pinned but never marked. We reuse the pin bits
6204 * to indicate there is a moved object in this slot. */
6205 bitset = pin_bits[i] & ~mark_bits[i];
6206
6207 invalidate_moved_plane(objspace, page, p, bitset);
6208 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6209 }
6210}
6211#endif
6212
6213static void
6214gc_compact_start(rb_objspace_t *objspace)
6215{
6216 struct heap_page *page = NULL;
6217 gc_mode_transition(objspace, gc_mode_compacting);
6218
6219 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6220 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6221 ccan_list_for_each(&heap->pages, page, page_node) {
6222 page->flags.before_sweep = TRUE;
6223 }
6224
6225 heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
6226 heap->compact_cursor_index = 0;
6227 }
6228
6229 if (gc_prof_enabled(objspace)) {
6230 gc_profile_record *record = gc_prof_record(objspace);
6231 record->moved_objects = objspace->rcompactor.total_moved;
6232 }
6233
6234 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
6235 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
6236 memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
6237 memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
6238
6239 /* Set up read barrier for pages containing MOVED objects */
6240 install_handlers();
6241}
6242
6243static void gc_sweep_compact(rb_objspace_t *objspace);
6244
6245static void
6246gc_sweep(rb_objspace_t *objspace)
6247{
6248 gc_sweeping_enter(objspace);
6249
6250 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6251
6252 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
6253
6254 gc_sweep_start(objspace);
6255 if (objspace->flags.during_compacting) {
6256 gc_sweep_compact(objspace);
6257 }
6258
6259 if (immediate_sweep) {
6260#if !GC_ENABLE_LAZY_SWEEP
6261 gc_prof_sweep_timer_start(objspace);
6262#endif
6263 gc_sweep_rest(objspace);
6264#if !GC_ENABLE_LAZY_SWEEP
6265 gc_prof_sweep_timer_stop(objspace);
6266#endif
6267 }
6268 else {
6269
6270 /* Sweep every size pool. */
6271 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6272 rb_size_pool_t *size_pool = &size_pools[i];
6273 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6274 }
6275 }
6276
6277 gc_sweeping_exit(objspace);
6278}
6279
6280/* Marking - Marking stack */
6281
6282static stack_chunk_t *
6283stack_chunk_alloc(void)
6284{
6285 stack_chunk_t *res;
6286
6287 res = malloc(sizeof(stack_chunk_t));
6288 if (!res)
6289 rb_memerror();
6290
6291 return res;
6292}
6293
6294static inline int
6295is_mark_stack_empty(mark_stack_t *stack)
6296{
6297 return stack->chunk == NULL;
6298}
6299
6300static size_t
6301mark_stack_size(mark_stack_t *stack)
6302{
6303 size_t size = stack->index;
6304 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6305
6306 while (chunk) {
6307 size += stack->limit;
6308 chunk = chunk->next;
6309 }
6310 return size;
6311}
6312
6313static void
6314add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
6315{
6316 chunk->next = stack->cache;
6317 stack->cache = chunk;
6318 stack->cache_size++;
6319}
6320
6321static void
6322shrink_stack_chunk_cache(mark_stack_t *stack)
6323{
6324 stack_chunk_t *chunk;
6325
6326 if (stack->unused_cache_size > (stack->cache_size/2)) {
6327 chunk = stack->cache;
6328 stack->cache = stack->cache->next;
6329 stack->cache_size--;
6330 free(chunk);
6331 }
6332 stack->unused_cache_size = stack->cache_size;
6333}
6334
6335static void
6336push_mark_stack_chunk(mark_stack_t *stack)
6337{
6338 stack_chunk_t *next;
6339
6340 GC_ASSERT(stack->index == stack->limit);
6341
6342 if (stack->cache_size > 0) {
6343 next = stack->cache;
6344 stack->cache = stack->cache->next;
6345 stack->cache_size--;
6346 if (stack->unused_cache_size > stack->cache_size)
6347 stack->unused_cache_size = stack->cache_size;
6348 }
6349 else {
6350 next = stack_chunk_alloc();
6351 }
6352 next->next = stack->chunk;
6353 stack->chunk = next;
6354 stack->index = 0;
6355}
6356
6357static void
6358pop_mark_stack_chunk(mark_stack_t *stack)
6359{
6360 stack_chunk_t *prev;
6361
6362 prev = stack->chunk->next;
6363 GC_ASSERT(stack->index == 0);
6364 add_stack_chunk_cache(stack, stack->chunk);
6365 stack->chunk = prev;
6366 stack->index = stack->limit;
6367}
6368
6369static void
6370mark_stack_chunk_list_free(stack_chunk_t *chunk)
6371{
6372 stack_chunk_t *next = NULL;
6373
6374 while (chunk != NULL) {
6375 next = chunk->next;
6376 free(chunk);
6377 chunk = next;
6378 }
6379}
6380
6381static void
6382free_stack_chunks(mark_stack_t *stack)
6383{
6384 mark_stack_chunk_list_free(stack->chunk);
6385}
6386
6387static void
6388mark_stack_free_cache(mark_stack_t *stack)
6389{
6390 mark_stack_chunk_list_free(stack->cache);
6391 stack->cache_size = 0;
6392 stack->unused_cache_size = 0;
6393}
6394
6395static void
6396push_mark_stack(mark_stack_t *stack, VALUE data)
6397{
6398 VALUE obj = data;
6399 switch (BUILTIN_TYPE(obj)) {
6400 case T_OBJECT:
6401 case T_CLASS:
6402 case T_MODULE:
6403 case T_FLOAT:
6404 case T_STRING:
6405 case T_REGEXP:
6406 case T_ARRAY:
6407 case T_HASH:
6408 case T_STRUCT:
6409 case T_BIGNUM:
6410 case T_FILE:
6411 case T_DATA:
6412 case T_MATCH:
6413 case T_COMPLEX:
6414 case T_RATIONAL:
6415 case T_TRUE:
6416 case T_FALSE:
6417 case T_SYMBOL:
6418 case T_IMEMO:
6419 case T_ICLASS:
6420 if (stack->index == stack->limit) {
6421 push_mark_stack_chunk(stack);
6422 }
6423 stack->chunk->data[stack->index++] = data;
6424 return;
6425
6426 case T_NONE:
6427 case T_NIL:
6428 case T_FIXNUM:
6429 case T_MOVED:
6430 case T_ZOMBIE:
6431 case T_UNDEF:
6432 case T_MASK:
6433 rb_bug("push_mark_stack() called for broken object");
6434 break;
6435
6436 case T_NODE:
6437 UNEXPECTED_NODE(push_mark_stack);
6438 break;
6439 }
6440
6441 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6442 BUILTIN_TYPE(obj), (void *)data,
6443 is_pointer_to_heap(&rb_objspace, (void *)data) ? "corrupted object" : "non object");
6444}
6445
6446static int
6447pop_mark_stack(mark_stack_t *stack, VALUE *data)
6448{
6449 if (is_mark_stack_empty(stack)) {
6450 return FALSE;
6451 }
6452 if (stack->index == 1) {
6453 *data = stack->chunk->data[--stack->index];
6454 pop_mark_stack_chunk(stack);
6455 }
6456 else {
6457 *data = stack->chunk->data[--stack->index];
6458 }
6459 return TRUE;
6460}
6461
6462static void
6463init_mark_stack(mark_stack_t *stack)
6464{
6465 int i;
6466
6467 MEMZERO(stack, mark_stack_t, 1);
6468 stack->index = stack->limit = STACK_CHUNK_SIZE;
6469
6470 for (i=0; i < 4; i++) {
6471 add_stack_chunk_cache(stack, stack_chunk_alloc());
6472 }
6473 stack->unused_cache_size = stack->cache_size;
6474}
6475
6476/* Marking */
6477
6478#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6479
6480#define STACK_START (ec->machine.stack_start)
6481#define STACK_END (ec->machine.stack_end)
6482#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6483
6484#if STACK_GROW_DIRECTION < 0
6485# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6486#elif STACK_GROW_DIRECTION > 0
6487# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6488#else
6489# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6490 : (size_t)(STACK_END - STACK_START + 1))
6491#endif
6492#if !STACK_GROW_DIRECTION
6493int ruby_stack_grow_direction;
6494int
6495ruby_get_stack_grow_direction(volatile VALUE *addr)
6496{
6497 VALUE *end;
6498 SET_MACHINE_STACK_END(&end);
6499
6500 if (end > addr) return ruby_stack_grow_direction = 1;
6501 return ruby_stack_grow_direction = -1;
6502}
6503#endif
6504
6505size_t
6507{
6508 rb_execution_context_t *ec = GET_EC();
6509 SET_STACK_END;
6510 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6511 return STACK_LENGTH;
6512}
6513
6514#define PREVENT_STACK_OVERFLOW 1
6515#ifndef PREVENT_STACK_OVERFLOW
6516#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6517# define PREVENT_STACK_OVERFLOW 1
6518#else
6519# define PREVENT_STACK_OVERFLOW 0
6520#endif
6521#endif
6522#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6523static int
6524stack_check(rb_execution_context_t *ec, int water_mark)
6525{
6526 SET_STACK_END;
6527
6528 size_t length = STACK_LENGTH;
6529 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6530
6531 return length > maximum_length;
6532}
6533#else
6534#define stack_check(ec, water_mark) FALSE
6535#endif
6536
6537#define STACKFRAME_FOR_CALL_CFUNC 2048
6538
6539int
6540rb_ec_stack_check(rb_execution_context_t *ec)
6541{
6542 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6543}
6544
6545int
6547{
6548 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6549}
6550
6551ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE)));
6552static void
6553each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE))
6554{
6555 VALUE v;
6556 while (n--) {
6557 v = *x;
6558 cb(objspace, v);
6559 x++;
6560 }
6561}
6562
6563static void
6564gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end, void (*cb)(rb_objspace_t *, VALUE))
6565{
6566 long n;
6567
6568 if (end <= start) return;
6569 n = end - start;
6570 each_location(objspace, start, n, cb);
6571}
6572
6573void
6574rb_gc_mark_locations(const VALUE *start, const VALUE *end)
6575{
6576 gc_mark_locations(&rb_objspace, start, end, gc_mark_maybe);
6577}
6578
6579void
6580rb_gc_mark_values(long n, const VALUE *values)
6581{
6582 long i;
6583 rb_objspace_t *objspace = &rb_objspace;
6584
6585 for (i=0; i<n; i++) {
6586 gc_mark(objspace, values[i]);
6587 }
6588}
6589
6590static void
6591gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
6592{
6593 long i;
6594
6595 for (i=0; i<n; i++) {
6596 if (is_markable_object(values[i])) {
6597 gc_mark_and_pin(objspace, values[i]);
6598 }
6599 }
6600}
6601
6602void
6603rb_gc_mark_vm_stack_values(long n, const VALUE *values)
6604{
6605 rb_objspace_t *objspace = &rb_objspace;
6606 gc_mark_stack_values(objspace, n, values);
6607}
6608
6609static int
6610mark_value(st_data_t key, st_data_t value, st_data_t data)
6611{
6612 rb_objspace_t *objspace = (rb_objspace_t *)data;
6613 gc_mark(objspace, (VALUE)value);
6614 return ST_CONTINUE;
6615}
6616
6617static int
6618mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6619{
6620 rb_objspace_t *objspace = (rb_objspace_t *)data;
6621 gc_mark_and_pin(objspace, (VALUE)value);
6622 return ST_CONTINUE;
6623}
6624
6625static void
6626mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
6627{
6628 if (!tbl || tbl->num_entries == 0) return;
6629 st_foreach(tbl, mark_value, (st_data_t)objspace);
6630}
6631
6632static void
6633mark_tbl(rb_objspace_t *objspace, st_table *tbl)
6634{
6635 if (!tbl || tbl->num_entries == 0) return;
6636 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6637}
6638
6639static int
6640mark_key(st_data_t key, st_data_t value, st_data_t data)
6641{
6642 rb_objspace_t *objspace = (rb_objspace_t *)data;
6643 gc_mark_and_pin(objspace, (VALUE)key);
6644 return ST_CONTINUE;
6645}
6646
6647static void
6648mark_set(rb_objspace_t *objspace, st_table *tbl)
6649{
6650 if (!tbl) return;
6651 st_foreach(tbl, mark_key, (st_data_t)objspace);
6652}
6653
6654static int
6655pin_value(st_data_t key, st_data_t value, st_data_t data)
6656{
6657 rb_objspace_t *objspace = (rb_objspace_t *)data;
6658 gc_mark_and_pin(objspace, (VALUE)value);
6659 return ST_CONTINUE;
6660}
6661
6662static void
6663mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
6664{
6665 if (!tbl) return;
6666 st_foreach(tbl, pin_value, (st_data_t)objspace);
6667}
6668
6669void
6670rb_mark_set(st_table *tbl)
6671{
6672 mark_set(&rb_objspace, tbl);
6673}
6674
6675static int
6676mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6677{
6678 rb_objspace_t *objspace = (rb_objspace_t *)data;
6679
6680 gc_mark(objspace, (VALUE)key);
6681 gc_mark(objspace, (VALUE)value);
6682 return ST_CONTINUE;
6683}
6684
6685static int
6686pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6687{
6688 rb_objspace_t *objspace = (rb_objspace_t *)data;
6689
6690 gc_mark_and_pin(objspace, (VALUE)key);
6691 gc_mark_and_pin(objspace, (VALUE)value);
6692 return ST_CONTINUE;
6693}
6694
6695static int
6696pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6697{
6698 rb_objspace_t *objspace = (rb_objspace_t *)data;
6699
6700 gc_mark_and_pin(objspace, (VALUE)key);
6701 gc_mark(objspace, (VALUE)value);
6702 return ST_CONTINUE;
6703}
6704
6705static void
6706mark_hash(rb_objspace_t *objspace, VALUE hash)
6707{
6708 if (rb_hash_compare_by_id_p(hash)) {
6709 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6710 }
6711 else {
6712 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6713 }
6714
6715 gc_mark(objspace, RHASH(hash)->ifnone);
6716}
6717
6718static void
6719mark_st(rb_objspace_t *objspace, st_table *tbl)
6720{
6721 if (!tbl) return;
6722 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6723}
6724
6725void
6726rb_mark_hash(st_table *tbl)
6727{
6728 mark_st(&rb_objspace, tbl);
6729}
6730
6731static void
6732mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
6733{
6734 const rb_method_definition_t *def = me->def;
6735
6736 gc_mark(objspace, me->owner);
6737 gc_mark(objspace, me->defined_class);
6738
6739 if (def) {
6740 switch (def->type) {
6741 case VM_METHOD_TYPE_ISEQ:
6742 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
6743 gc_mark(objspace, (VALUE)def->body.iseq.cref);
6744
6745 if (def->iseq_overload && me->defined_class) {
6746 // it can be a key of "overloaded_cme" table
6747 // so it should be pinned.
6748 gc_mark_and_pin(objspace, (VALUE)me);
6749 }
6750 break;
6751 case VM_METHOD_TYPE_ATTRSET:
6752 case VM_METHOD_TYPE_IVAR:
6753 gc_mark(objspace, def->body.attr.location);
6754 break;
6755 case VM_METHOD_TYPE_BMETHOD:
6756 gc_mark(objspace, def->body.bmethod.proc);
6757 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6758 break;
6759 case VM_METHOD_TYPE_ALIAS:
6760 gc_mark(objspace, (VALUE)def->body.alias.original_me);
6761 return;
6762 case VM_METHOD_TYPE_REFINED:
6763 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6764 break;
6765 case VM_METHOD_TYPE_CFUNC:
6766 case VM_METHOD_TYPE_ZSUPER:
6767 case VM_METHOD_TYPE_MISSING:
6768 case VM_METHOD_TYPE_OPTIMIZED:
6769 case VM_METHOD_TYPE_UNDEF:
6770 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6771 break;
6772 }
6773 }
6774}
6775
6776static enum rb_id_table_iterator_result
6777mark_method_entry_i(VALUE me, void *data)
6778{
6779 rb_objspace_t *objspace = (rb_objspace_t *)data;
6780
6781 gc_mark(objspace, me);
6782 return ID_TABLE_CONTINUE;
6783}
6784
6785static void
6786mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6787{
6788 if (tbl) {
6789 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6790 }
6791}
6792
6793static enum rb_id_table_iterator_result
6794mark_const_entry_i(VALUE value, void *data)
6795{
6796 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
6797 rb_objspace_t *objspace = data;
6798
6799 gc_mark(objspace, ce->value);
6800 gc_mark(objspace, ce->file);
6801 return ID_TABLE_CONTINUE;
6802}
6803
6804static void
6805mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6806{
6807 if (!tbl) return;
6808 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6809}
6810
6811#if STACK_GROW_DIRECTION < 0
6812#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6813#elif STACK_GROW_DIRECTION > 0
6814#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6815#else
6816#define GET_STACK_BOUNDS(start, end, appendix) \
6817 ((STACK_END < STACK_START) ? \
6818 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6819#endif
6820
6821static void each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6822 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE));
6823
6824#if defined(__wasm__)
6825
6826
6827static VALUE *rb_stack_range_tmp[2];
6828
6829static void
6830rb_mark_locations(void *begin, void *end)
6831{
6832 rb_stack_range_tmp[0] = begin;
6833 rb_stack_range_tmp[1] = end;
6834}
6835
6836# if defined(__EMSCRIPTEN__)
6837
6838static void
6839mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6840{
6841 emscripten_scan_stack(rb_mark_locations);
6842 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6843
6844 emscripten_scan_registers(rb_mark_locations);
6845 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6846}
6847# else // use Asyncify version
6848
6849static void
6850mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6851{
6852 VALUE *stack_start, *stack_end;
6853 SET_STACK_END;
6854 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6855 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6856
6857 rb_wasm_scan_locals(rb_mark_locations);
6858 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6859}
6860
6861# endif
6862
6863#else // !defined(__wasm__)
6864
6865static void
6866mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6867{
6868 union {
6869 rb_jmp_buf j;
6870 VALUE v[sizeof(rb_jmp_buf) / (sizeof(VALUE))];
6871 } save_regs_gc_mark;
6872 VALUE *stack_start, *stack_end;
6873
6874 FLUSH_REGISTER_WINDOWS;
6875 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
6876 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6877 rb_setjmp(save_regs_gc_mark.j);
6878
6879 /* SET_STACK_END must be called in this function because
6880 * the stack frame of this function may contain
6881 * callee save registers and they should be marked. */
6882 SET_STACK_END;
6883 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6884
6885 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6886
6887 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6888}
6889#endif
6890
6891static void
6892each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE))
6893{
6894 rb_objspace_t *objspace = &rb_objspace;
6895 VALUE *stack_start, *stack_end;
6896
6897 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6898 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
6899 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6900}
6901
6902void
6903rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
6904{
6905 each_machine_stack_value(ec, gc_mark_maybe);
6906}
6907
6908static void
6909each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6910 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE))
6911{
6912
6913 gc_mark_locations(objspace, stack_start, stack_end, cb);
6914
6915#if defined(__mc68000__)
6916 gc_mark_locations(objspace,
6917 (VALUE*)((char*)stack_start + 2),
6918 (VALUE*)((char*)stack_end - 2), cb);
6919#endif
6920}
6921
6922void
6923rb_mark_tbl(st_table *tbl)
6924{
6925 mark_tbl(&rb_objspace, tbl);
6926}
6927
6928void
6929rb_mark_tbl_no_pin(st_table *tbl)
6930{
6931 mark_tbl_no_pin(&rb_objspace, tbl);
6932}
6933
6934static void
6935gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
6936{
6937 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
6938
6939 if (is_pointer_to_heap(objspace, (void *)obj)) {
6940 void *ptr = asan_unpoison_object_temporary(obj);
6941
6942 /* Garbage can live on the stack, so do not mark or pin */
6943 switch (BUILTIN_TYPE(obj)) {
6944 case T_ZOMBIE:
6945 case T_NONE:
6946 break;
6947 default:
6948 gc_mark_and_pin(objspace, obj);
6949 break;
6950 }
6951
6952 if (ptr) {
6953 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6954 asan_poison_object(obj);
6955 }
6956 }
6957}
6958
6959void
6960rb_gc_mark_maybe(VALUE obj)
6961{
6962 gc_mark_maybe(&rb_objspace, obj);
6963}
6964
6965static inline int
6966gc_mark_set(rb_objspace_t *objspace, VALUE obj)
6967{
6968 ASSERT_vm_locking();
6969 if (RVALUE_MARKED(obj)) return 0;
6970 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6971 return 1;
6972}
6973
6974static int
6975gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
6976{
6977 struct heap_page *page = GET_HEAP_PAGE(obj);
6978 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6979
6980 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6981 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
6982 MARK_IN_BITMAP(uncollectible_bits, obj);
6983 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6984
6985#if RGENGC_PROFILE > 0
6986 objspace->profile.total_remembered_shady_object_count++;
6987#if RGENGC_PROFILE >= 2
6988 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
6989#endif
6990#endif
6991 return TRUE;
6992 }
6993 else {
6994 return FALSE;
6995 }
6996}
6997
6998static void
6999rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
7000{
7001 const VALUE old_parent = objspace->rgengc.parent_object;
7002
7003 if (old_parent) { /* parent object is old */
7004 if (RVALUE_WB_UNPROTECTED(obj) || !RVALUE_OLD_P(obj)) {
7005 rgengc_remember(objspace, old_parent);
7006 }
7007 }
7008
7009 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
7010}
7011
7012static void
7013gc_grey(rb_objspace_t *objspace, VALUE obj)
7014{
7015#if RGENGC_CHECK_MODE
7016 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
7017 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
7018#endif
7019
7020 if (is_incremental_marking(objspace)) {
7021 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7022 }
7023
7024 push_mark_stack(&objspace->mark_stack, obj);
7025}
7026
7027static void
7028gc_aging(rb_objspace_t *objspace, VALUE obj)
7029{
7030 struct heap_page *page = GET_HEAP_PAGE(obj);
7031
7032 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7033 check_rvalue_consistency(obj);
7034
7035 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7036 if (!RVALUE_OLD_P(obj)) {
7037 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
7038 RVALUE_AGE_INC(objspace, obj);
7039 }
7040 else if (is_full_marking(objspace)) {
7041 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7042 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7043 }
7044 }
7045 check_rvalue_consistency(obj);
7046
7047 objspace->marked_slots++;
7048}
7049
7050NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
7051static void reachable_objects_from_callback(VALUE obj);
7052
7053static void
7054gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
7055{
7056 if (LIKELY(during_gc)) {
7057 rgengc_check_relation(objspace, obj);
7058 if (!gc_mark_set(objspace, obj)) return; /* already marked */
7059
7060 if (0) { // for debug GC marking miss
7061 if (objspace->rgengc.parent_object) {
7062 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
7063 (void *)obj, obj_type_name(obj),
7064 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7065 }
7066 else {
7067 RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
7068 }
7069 }
7070
7071 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
7072 rp(obj);
7073 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
7074 }
7075 gc_aging(objspace, obj);
7076 gc_grey(objspace, obj);
7077 }
7078 else {
7079 reachable_objects_from_callback(obj);
7080 }
7081}
7082
7083static inline void
7084gc_pin(rb_objspace_t *objspace, VALUE obj)
7085{
7086 GC_ASSERT(is_markable_object(obj));
7087 if (UNLIKELY(objspace->flags.during_compacting)) {
7088 if (LIKELY(during_gc)) {
7089 if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj)) {
7090 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
7091 GET_HEAP_PAGE(obj)->pinned_slots++;
7092 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7093 }
7094 }
7095 }
7096}
7097
7098static inline void
7099gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
7100{
7101 if (!is_markable_object(obj)) return;
7102 gc_pin(objspace, obj);
7103 gc_mark_ptr(objspace, obj);
7104}
7105
7106static inline void
7107gc_mark(rb_objspace_t *objspace, VALUE obj)
7108{
7109 if (!is_markable_object(obj)) return;
7110 gc_mark_ptr(objspace, obj);
7111}
7112
7113void
7114rb_gc_mark_movable(VALUE ptr)
7115{
7116 gc_mark(&rb_objspace, ptr);
7117}
7118
7119void
7120rb_gc_mark(VALUE ptr)
7121{
7122 gc_mark_and_pin(&rb_objspace, ptr);
7123}
7124
7125void
7126rb_gc_mark_and_move(VALUE *ptr)
7127{
7128 rb_objspace_t *objspace = &rb_objspace;
7129 if (RB_SPECIAL_CONST_P(*ptr)) return;
7130
7131 if (UNLIKELY(objspace->flags.during_reference_updating)) {
7132 GC_ASSERT(objspace->flags.during_compacting);
7133 GC_ASSERT(during_gc);
7134
7135 *ptr = rb_gc_location(*ptr);
7136 }
7137 else {
7138 gc_mark_ptr(objspace, *ptr);
7139 }
7140}
7141
7142void
7143rb_gc_mark_weak(VALUE *ptr)
7144{
7145 rb_objspace_t *objspace = &rb_objspace;
7146
7147 if (UNLIKELY(!during_gc)) return;
7148
7149 VALUE obj = *ptr;
7150 if (RB_SPECIAL_CONST_P(obj)) return;
7151
7152 GC_ASSERT(objspace->rgengc.parent_object == 0 || FL_TEST(objspace->rgengc.parent_object, FL_WB_PROTECTED));
7153
7154 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
7155 rp(obj);
7156 rb_bug("try to mark T_NONE object");
7157 }
7158
7159 /* If we are in a minor GC and the other object is old, then obj should
7160 * already be marked and cannot be reclaimed in this GC cycle so we don't
7161 * need to add it to the weak refences list. */
7162 if (!is_full_marking(objspace) && RVALUE_OLD_P(obj)) {
7163 GC_ASSERT(RVALUE_MARKED(obj));
7164 GC_ASSERT(!objspace->flags.during_compacting);
7165
7166 return;
7167 }
7168
7169 rgengc_check_relation(objspace, obj);
7170
7171 rb_darray_append_without_gc(&objspace->weak_references, ptr);
7172
7173 objspace->profile.weak_references_count++;
7174}
7175
7176void
7177rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
7178{
7179 rb_objspace_t *objspace = &rb_objspace;
7180
7181 /* If we're not incremental marking, then the state of the objects can't
7182 * change so we don't need to do anything. */
7183 if (!is_incremental_marking(objspace)) return;
7184 /* If parent_obj has not been marked, then ptr has not yet been marked
7185 * weak, so we don't need to do anything. */
7186 if (!RVALUE_MARKED(parent_obj)) return;
7187
7188 VALUE **ptr_ptr;
7189 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
7190 if (*ptr_ptr == ptr) {
7191 *ptr_ptr = NULL;
7192 break;
7193 }
7194 }
7195}
7196
7197/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
7198 * This function is only for GC_END_MARK timing.
7199 */
7200
7201int
7202rb_objspace_marked_object_p(VALUE obj)
7203{
7204 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7205}
7206
7207static inline void
7208gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
7209{
7210 if (RVALUE_OLD_P(obj)) {
7211 objspace->rgengc.parent_object = obj;
7212 }
7213 else {
7214 objspace->rgengc.parent_object = Qfalse;
7215 }
7216}
7217
7218static void
7219gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
7220{
7221 switch (imemo_type(obj)) {
7222 case imemo_env:
7223 {
7224 const rb_env_t *env = (const rb_env_t *)obj;
7225
7226 if (LIKELY(env->ep)) {
7227 // just after newobj() can be NULL here.
7228 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7229 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7230 rb_gc_mark_values((long)env->env_size, env->env);
7231 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7232 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
7233 gc_mark(objspace, (VALUE)env->iseq);
7234 }
7235 }
7236 return;
7237 case imemo_cref:
7238 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7239 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
7240 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7241 return;
7242 case imemo_svar:
7243 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7244 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7245 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7246 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7247 return;
7248 case imemo_throw_data:
7249 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7250 return;
7251 case imemo_ifunc:
7252 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
7253 return;
7254 case imemo_memo:
7255 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7256 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7257 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7258 return;
7259 case imemo_ment:
7260 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7261 return;
7262 case imemo_iseq:
7263 rb_iseq_mark_and_move((rb_iseq_t *)obj, false);
7264 return;
7265 case imemo_tmpbuf:
7266 {
7267 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
7268 do {
7269 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7270 } while ((m = m->next) != NULL);
7271 }
7272 return;
7273 case imemo_ast:
7274 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7275 return;
7276 case imemo_parser_strterm:
7277 return;
7278 case imemo_callinfo:
7279 return;
7280 case imemo_callcache:
7281 /* cc is callcache.
7282 *
7283 * cc->klass (klass) should not be marked because if the klass is
7284 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
7285 *
7286 * cc->cme (cme) should not be marked because if cc is invalidated
7287 * when cme is free'ed.
7288 * - klass marks cme if klass uses cme.
7289 * - caller classe's ccs->cme marks cc->cme.
7290 * - if cc is invalidated (klass doesn't refer the cc),
7291 * cc is invalidated by `vm_cc_invalidate()` and cc->cme is
7292 * not be accessed.
7293 * - On the multi-Ractors, cme will be collected with global GC
7294 * so that it is safe if GC is not interleaving while accessing
7295 * cc and cme.
7296 * - However, cc_type_super and cc_type_refinement are not chained
7297 * from ccs so cc->cme should be marked; the cme might be
7298 * reachable only through cc in these cases.
7299 */
7300 {
7301 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
7302 if (vm_cc_super_p(cc) || vm_cc_refinement_p(cc)) {
7303 gc_mark(objspace, (VALUE)cc->cme_);
7304 }
7305 }
7306 return;
7307 case imemo_constcache:
7308 {
7310 gc_mark(objspace, ice->value);
7311 }
7312 return;
7313#if VM_CHECK_MODE > 0
7314 default:
7315 VM_UNREACHABLE(gc_mark_imemo);
7316#endif
7317 }
7318}
7319
7320static bool
7321gc_declarative_marking_p(const rb_data_type_t *type)
7322{
7323 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
7324}
7325
7326static void mark_cvc_tbl(rb_objspace_t *objspace, VALUE klass);
7327
7328static void
7329gc_mark_children(rb_objspace_t *objspace, VALUE obj)
7330{
7331 register RVALUE *any = RANY(obj);
7332 gc_mark_set_parent(objspace, obj);
7333
7334 if (FL_TEST(obj, FL_EXIVAR)) {
7335 rb_mark_generic_ivar(obj);
7336 }
7337
7338 switch (BUILTIN_TYPE(obj)) {
7339 case T_FLOAT:
7340 case T_BIGNUM:
7341 case T_SYMBOL:
7342 /* Not immediates, but does not have references and singleton
7343 * class */
7344 return;
7345
7346 case T_NIL:
7347 case T_FIXNUM:
7348 rb_bug("rb_gc_mark() called for broken object");
7349 break;
7350
7351 case T_NODE:
7352 UNEXPECTED_NODE(rb_gc_mark);
7353 break;
7354
7355 case T_IMEMO:
7356 gc_mark_imemo(objspace, obj);
7357 return;
7358
7359 default:
7360 break;
7361 }
7362
7363 gc_mark(objspace, any->as.basic.klass);
7364
7365 switch (BUILTIN_TYPE(obj)) {
7366 case T_CLASS:
7367 if (FL_TEST(obj, FL_SINGLETON)) {
7368 gc_mark(objspace, RCLASS_ATTACHED_OBJECT(obj));
7369 }
7370 // Continue to the shared T_CLASS/T_MODULE
7371 case T_MODULE:
7372 if (RCLASS_SUPER(obj)) {
7373 gc_mark(objspace, RCLASS_SUPER(obj));
7374 }
7375
7376 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7377 mark_cvc_tbl(objspace, obj);
7378 cc_table_mark(objspace, obj);
7379 if (rb_shape_obj_too_complex(obj)) {
7380 mark_tbl_no_pin(objspace, (st_table *)RCLASS_IVPTR(obj));
7381 }
7382 else {
7383 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7384 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7385 }
7386 }
7387 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7388
7389 gc_mark(objspace, RCLASS_EXT(obj)->classpath);
7390 break;
7391
7392 case T_ICLASS:
7393 if (RICLASS_OWNS_M_TBL_P(obj)) {
7394 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7395 }
7396 if (RCLASS_SUPER(obj)) {
7397 gc_mark(objspace, RCLASS_SUPER(obj));
7398 }
7399
7400 if (RCLASS_INCLUDER(obj)) {
7401 gc_mark(objspace, RCLASS_INCLUDER(obj));
7402 }
7403 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7404 cc_table_mark(objspace, obj);
7405 break;
7406
7407 case T_ARRAY:
7408 if (ARY_SHARED_P(obj)) {
7409 VALUE root = ARY_SHARED_ROOT(obj);
7410 gc_mark(objspace, root);
7411 }
7412 else {
7413 long i, len = RARRAY_LEN(obj);
7414 const VALUE *ptr = RARRAY_CONST_PTR(obj);
7415 for (i=0; i < len; i++) {
7416 gc_mark(objspace, ptr[i]);
7417 }
7418 }
7419 break;
7420
7421 case T_HASH:
7422 mark_hash(objspace, obj);
7423 break;
7424
7425 case T_STRING:
7426 if (STR_SHARED_P(obj)) {
7427 if (STR_EMBED_P(any->as.string.as.heap.aux.shared)) {
7428 /* Embedded shared strings cannot be moved because this string
7429 * points into the slot of the shared string. There may be code
7430 * using the RSTRING_PTR on the stack, which would pin this
7431 * string but not pin the shared string, causing it to move. */
7432 gc_mark_and_pin(objspace, any->as.string.as.heap.aux.shared);
7433 }
7434 else {
7435 gc_mark(objspace, any->as.string.as.heap.aux.shared);
7436 }
7437 }
7438 break;
7439
7440 case T_DATA:
7441 {
7442 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
7443
7444 if (ptr) {
7445 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.type)) {
7446 size_t *offset_list = (size_t *)RANY(obj)->as.typeddata.type->function.dmark;
7447
7448 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
7449 rb_gc_mark_movable(*(VALUE *)((char *)ptr + offset));
7450 }
7451 }
7452 else {
7453 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
7454 any->as.typeddata.type->function.dmark :
7455 any->as.data.dmark;
7456 if (mark_func) (*mark_func)(ptr);
7457 }
7458 }
7459 }
7460 break;
7461
7462 case T_OBJECT:
7463 {
7464 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7465 if (rb_shape_obj_too_complex(obj)) {
7466 mark_tbl_no_pin(objspace, ROBJECT_IV_HASH(obj));
7467 }
7468 else {
7469 const VALUE * const ptr = ROBJECT_IVPTR(obj);
7470
7471 uint32_t i, len = ROBJECT_IV_COUNT(obj);
7472 for (i = 0; i < len; i++) {
7473 gc_mark(objspace, ptr[i]);
7474 }
7475 }
7476 if (shape) {
7477 VALUE klass = RBASIC_CLASS(obj);
7478
7479 // Increment max_iv_count if applicable, used to determine size pool allocation
7480 attr_index_t num_of_ivs = shape->next_iv_index;
7481 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7482 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7483 }
7484 }
7485 }
7486 break;
7487
7488 case T_FILE:
7489 if (any->as.file.fptr) {
7490 gc_mark(objspace, any->as.file.fptr->self);
7491 gc_mark(objspace, any->as.file.fptr->pathv);
7492 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
7493 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
7494 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
7495 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
7496 gc_mark(objspace, any->as.file.fptr->write_lock);
7497 gc_mark(objspace, any->as.file.fptr->timeout);
7498 }
7499 break;
7500
7501 case T_REGEXP:
7502 gc_mark(objspace, any->as.regexp.src);
7503 break;
7504
7505 case T_MATCH:
7506 gc_mark(objspace, any->as.match.regexp);
7507 if (any->as.match.str) {
7508 gc_mark(objspace, any->as.match.str);
7509 }
7510 break;
7511
7512 case T_RATIONAL:
7513 gc_mark(objspace, any->as.rational.num);
7514 gc_mark(objspace, any->as.rational.den);
7515 break;
7516
7517 case T_COMPLEX:
7518 gc_mark(objspace, any->as.complex.real);
7519 gc_mark(objspace, any->as.complex.imag);
7520 break;
7521
7522 case T_STRUCT:
7523 {
7524 long i;
7525 const long len = RSTRUCT_LEN(obj);
7526 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
7527
7528 for (i=0; i<len; i++) {
7529 gc_mark(objspace, ptr[i]);
7530 }
7531 }
7532 break;
7533
7534 default:
7535#if GC_DEBUG
7536 rb_gcdebug_print_obj_condition((VALUE)obj);
7537#endif
7538 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
7539 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
7540 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
7541 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7542 BUILTIN_TYPE(obj), (void *)any,
7543 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
7544 }
7545}
7546
7551static inline int
7552gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
7553{
7554 mark_stack_t *mstack = &objspace->mark_stack;
7555 VALUE obj;
7556 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7557 size_t popped_count = 0;
7558
7559 while (pop_mark_stack(mstack, &obj)) {
7560 if (UNDEF_P(obj)) continue; /* skip */
7561
7562 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7563 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7564 }
7565 gc_mark_children(objspace, obj);
7566
7567 if (incremental) {
7568 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7569 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7570 }
7571 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7572 popped_count++;
7573
7574 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7575 break;
7576 }
7577 }
7578 else {
7579 /* just ignore marking bits */
7580 }
7581 }
7582
7583 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7584
7585 if (is_mark_stack_empty(mstack)) {
7586 shrink_stack_chunk_cache(mstack);
7587 return TRUE;
7588 }
7589 else {
7590 return FALSE;
7591 }
7592}
7593
7594static int
7595gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
7596{
7597 return gc_mark_stacked_objects(objspace, TRUE, count);
7598}
7599
7600static int
7601gc_mark_stacked_objects_all(rb_objspace_t *objspace)
7602{
7603 return gc_mark_stacked_objects(objspace, FALSE, 0);
7604}
7605
7606#if PRINT_ROOT_TICKS
7607#define MAX_TICKS 0x100
7608static tick_t mark_ticks[MAX_TICKS];
7609static const char *mark_ticks_categories[MAX_TICKS];
7610
7611static void
7612show_mark_ticks(void)
7613{
7614 int i;
7615 fprintf(stderr, "mark ticks result:\n");
7616 for (i=0; i<MAX_TICKS; i++) {
7617 const char *category = mark_ticks_categories[i];
7618 if (category) {
7619 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
7620 }
7621 else {
7622 break;
7623 }
7624 }
7625}
7626
7627#endif /* PRINT_ROOT_TICKS */
7628
7629static void
7630gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
7631{
7632 struct gc_list *list;
7633 rb_execution_context_t *ec = GET_EC();
7634 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7635
7636#if PRINT_ROOT_TICKS
7637 tick_t start_tick = tick();
7638 int tick_count = 0;
7639 const char *prev_category = 0;
7640
7641 if (mark_ticks_categories[0] == 0) {
7642 atexit(show_mark_ticks);
7643 }
7644#endif
7645
7646 if (categoryp) *categoryp = "xxx";
7647
7648 objspace->rgengc.parent_object = Qfalse;
7649
7650#if PRINT_ROOT_TICKS
7651#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7652 if (prev_category) { \
7653 tick_t t = tick(); \
7654 mark_ticks[tick_count] = t - start_tick; \
7655 mark_ticks_categories[tick_count] = prev_category; \
7656 tick_count++; \
7657 } \
7658 prev_category = category; \
7659 start_tick = tick(); \
7660} while (0)
7661#else /* PRINT_ROOT_TICKS */
7662#define MARK_CHECKPOINT_PRINT_TICK(category)
7663#endif
7664
7665#define MARK_CHECKPOINT(category) do { \
7666 if (categoryp) *categoryp = category; \
7667 MARK_CHECKPOINT_PRINT_TICK(category); \
7668} while (0)
7669
7670 MARK_CHECKPOINT("vm");
7671 SET_STACK_END;
7672 rb_vm_mark(vm);
7673 if (vm->self) gc_mark(objspace, vm->self);
7674
7675 MARK_CHECKPOINT("finalizers");
7676 mark_finalizer_tbl(objspace, finalizer_table);
7677
7678 MARK_CHECKPOINT("machine_context");
7679 mark_current_machine_context(objspace, ec);
7680
7681 /* mark protected global variables */
7682 MARK_CHECKPOINT("global_list");
7683 for (list = global_list; list; list = list->next) {
7684 gc_mark_maybe(objspace, *list->varptr);
7685 }
7686
7687 MARK_CHECKPOINT("end_proc");
7688 rb_mark_end_proc();
7689
7690 MARK_CHECKPOINT("global_tbl");
7691 rb_gc_mark_global_tbl();
7692
7693 MARK_CHECKPOINT("object_id");
7694 rb_gc_mark(objspace->next_object_id);
7695 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
7696
7697 if (stress_to_class) rb_gc_mark(stress_to_class);
7698
7699 MARK_CHECKPOINT("finish");
7700#undef MARK_CHECKPOINT
7701}
7702
7703#if RGENGC_CHECK_MODE >= 4
7704
7705#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7706#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7707#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7708
7709struct reflist {
7710 VALUE *list;
7711 int pos;
7712 int size;
7713};
7714
7715static struct reflist *
7716reflist_create(VALUE obj)
7717{
7718 struct reflist *refs = xmalloc(sizeof(struct reflist));
7719 refs->size = 1;
7720 refs->list = ALLOC_N(VALUE, refs->size);
7721 refs->list[0] = obj;
7722 refs->pos = 1;
7723 return refs;
7724}
7725
7726static void
7727reflist_destruct(struct reflist *refs)
7728{
7729 xfree(refs->list);
7730 xfree(refs);
7731}
7732
7733static void
7734reflist_add(struct reflist *refs, VALUE obj)
7735{
7736 if (refs->pos == refs->size) {
7737 refs->size *= 2;
7738 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7739 }
7740
7741 refs->list[refs->pos++] = obj;
7742}
7743
7744static void
7745reflist_dump(struct reflist *refs)
7746{
7747 int i;
7748 for (i=0; i<refs->pos; i++) {
7749 VALUE obj = refs->list[i];
7750 if (IS_ROOTSIG(obj)) { /* root */
7751 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
7752 }
7753 else {
7754 fprintf(stderr, "<%s>", obj_info(obj));
7755 }
7756 if (i+1 < refs->pos) fprintf(stderr, ", ");
7757 }
7758}
7759
7760static int
7761reflist_referred_from_machine_context(struct reflist *refs)
7762{
7763 int i;
7764 for (i=0; i<refs->pos; i++) {
7765 VALUE obj = refs->list[i];
7766 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
7767 }
7768 return 0;
7769}
7770
7771struct allrefs {
7772 rb_objspace_t *objspace;
7773 /* a -> obj1
7774 * b -> obj1
7775 * c -> obj1
7776 * c -> obj2
7777 * d -> obj3
7778 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7779 */
7780 struct st_table *references;
7781 const char *category;
7782 VALUE root_obj;
7784};
7785
7786static int
7787allrefs_add(struct allrefs *data, VALUE obj)
7788{
7789 struct reflist *refs;
7790 st_data_t r;
7791
7792 if (st_lookup(data->references, obj, &r)) {
7793 refs = (struct reflist *)r;
7794 reflist_add(refs, data->root_obj);
7795 return 0;
7796 }
7797 else {
7798 refs = reflist_create(data->root_obj);
7799 st_insert(data->references, obj, (st_data_t)refs);
7800 return 1;
7801 }
7802}
7803
7804static void
7805allrefs_i(VALUE obj, void *ptr)
7806{
7807 struct allrefs *data = (struct allrefs *)ptr;
7808
7809 if (allrefs_add(data, obj)) {
7810 push_mark_stack(&data->mark_stack, obj);
7811 }
7812}
7813
7814static void
7815allrefs_roots_i(VALUE obj, void *ptr)
7816{
7817 struct allrefs *data = (struct allrefs *)ptr;
7818 if (strlen(data->category) == 0) rb_bug("!!!");
7819 data->root_obj = MAKE_ROOTSIG(data->category);
7820
7821 if (allrefs_add(data, obj)) {
7822 push_mark_stack(&data->mark_stack, obj);
7823 }
7824}
7825#define PUSH_MARK_FUNC_DATA(v) do { \
7826 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7827 GET_RACTOR()->mfd = (v);
7828
7829#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7830
7831static st_table *
7832objspace_allrefs(rb_objspace_t *objspace)
7833{
7834 struct allrefs data;
7835 struct gc_mark_func_data_struct mfd;
7836 VALUE obj;
7837 int prev_dont_gc = dont_gc_val();
7838 dont_gc_on();
7839
7840 data.objspace = objspace;
7841 data.references = st_init_numtable();
7842 init_mark_stack(&data.mark_stack);
7843
7844 mfd.mark_func = allrefs_roots_i;
7845 mfd.data = &data;
7846
7847 /* traverse root objects */
7848 PUSH_MARK_FUNC_DATA(&mfd);
7849 GET_RACTOR()->mfd = &mfd;
7850 gc_mark_roots(objspace, &data.category);
7851 POP_MARK_FUNC_DATA();
7852
7853 /* traverse rest objects reachable from root objects */
7854 while (pop_mark_stack(&data.mark_stack, &obj)) {
7855 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7856 }
7857 free_stack_chunks(&data.mark_stack);
7858
7859 dont_gc_set(prev_dont_gc);
7860 return data.references;
7861}
7862
7863static int
7864objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7865{
7866 struct reflist *refs = (struct reflist *)value;
7867 reflist_destruct(refs);
7868 return ST_CONTINUE;
7869}
7870
7871static void
7872objspace_allrefs_destruct(struct st_table *refs)
7873{
7874 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7875 st_free_table(refs);
7876}
7877
7878#if RGENGC_CHECK_MODE >= 5
7879static int
7880allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7881{
7882 VALUE obj = (VALUE)k;
7883 struct reflist *refs = (struct reflist *)v;
7884 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
7885 reflist_dump(refs);
7886 fprintf(stderr, "\n");
7887 return ST_CONTINUE;
7888}
7889
7890static void
7891allrefs_dump(rb_objspace_t *objspace)
7892{
7893 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7894 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
7895 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7896}
7897#endif
7898
7899static int
7900gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7901{
7902 VALUE obj = k;
7903 struct reflist *refs = (struct reflist *)v;
7904 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
7905
7906 /* object should be marked or oldgen */
7907 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7908 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7909 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
7910 reflist_dump(refs);
7911
7912 if (reflist_referred_from_machine_context(refs)) {
7913 fprintf(stderr, " (marked from machine stack).\n");
7914 /* marked from machine context can be false positive */
7915 }
7916 else {
7917 objspace->rgengc.error_count++;
7918 fprintf(stderr, "\n");
7919 }
7920 }
7921 return ST_CONTINUE;
7922}
7923
7924static void
7925gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
7926{
7927 size_t saved_malloc_increase = objspace->malloc_params.increase;
7928#if RGENGC_ESTIMATE_OLDMALLOC
7929 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7930#endif
7931 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7932
7933 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7934
7935 if (checker_func) {
7936 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7937 }
7938
7939 if (objspace->rgengc.error_count > 0) {
7940#if RGENGC_CHECK_MODE >= 5
7941 allrefs_dump(objspace);
7942#endif
7943 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
7944 }
7945
7946 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7947 objspace->rgengc.allrefs_table = 0;
7948
7949 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
7950 objspace->malloc_params.increase = saved_malloc_increase;
7951#if RGENGC_ESTIMATE_OLDMALLOC
7952 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7953#endif
7954}
7955#endif /* RGENGC_CHECK_MODE >= 4 */
7956
7958 rb_objspace_t *objspace;
7959 int err_count;
7960 size_t live_object_count;
7961 size_t zombie_object_count;
7962
7963 VALUE parent;
7964 size_t old_object_count;
7965 size_t remembered_shady_count;
7966};
7967
7968static void
7969check_generation_i(const VALUE child, void *ptr)
7970{
7972 const VALUE parent = data->parent;
7973
7974 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7975
7976 if (!RVALUE_OLD_P(child)) {
7977 if (!RVALUE_REMEMBERED(parent) &&
7978 !RVALUE_REMEMBERED(child) &&
7979 !RVALUE_UNCOLLECTIBLE(child)) {
7980 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7981 data->err_count++;
7982 }
7983 }
7984}
7985
7986static void
7987check_color_i(const VALUE child, void *ptr)
7988{
7990 const VALUE parent = data->parent;
7991
7992 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7993 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7994 obj_info(parent), obj_info(child));
7995 data->err_count++;
7996 }
7997}
7998
7999static void
8000check_children_i(const VALUE child, void *ptr)
8001{
8003 if (check_rvalue_consistency_force(child, FALSE) != 0) {
8004 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
8005 obj_info(child), obj_info(data->parent));
8006 rb_print_backtrace(stderr); /* C backtrace will help to debug */
8007
8008 data->err_count++;
8009 }
8010}
8011
8012static int
8013verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
8015{
8016 VALUE obj;
8017 rb_objspace_t *objspace = data->objspace;
8018
8019 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
8020 void *poisoned = asan_unpoison_object_temporary(obj);
8021
8022 if (is_live_object(objspace, obj)) {
8023 /* count objects */
8024 data->live_object_count++;
8025 data->parent = obj;
8026
8027 /* Normally, we don't expect T_MOVED objects to be in the heap.
8028 * But they can stay alive on the stack, */
8029 if (!gc_object_moved_p(objspace, obj)) {
8030 /* moved slots don't have children */
8031 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
8032 }
8033
8034 /* check health of children */
8035 if (RVALUE_OLD_P(obj)) data->old_object_count++;
8036 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
8037
8038 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
8039 /* reachable objects from an oldgen object should be old or (young with remember) */
8040 data->parent = obj;
8041 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
8042 }
8043
8044 if (is_incremental_marking(objspace)) {
8045 if (RVALUE_BLACK_P(obj)) {
8046 /* reachable objects from black objects should be black or grey objects */
8047 data->parent = obj;
8048 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
8049 }
8050 }
8051 }
8052 else {
8053 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
8054 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
8055 data->zombie_object_count++;
8056 }
8057 }
8058 if (poisoned) {
8059 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
8060 asan_poison_object(obj);
8061 }
8062 }
8063
8064 return 0;
8065}
8066
8067static int
8068gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
8069{
8070 unsigned int has_remembered_shady = FALSE;
8071 unsigned int has_remembered_old = FALSE;
8072 int remembered_old_objects = 0;
8073 int free_objects = 0;
8074 int zombie_objects = 0;
8075
8076 short slot_size = page->slot_size;
8077 uintptr_t start = (uintptr_t)page->start;
8078 uintptr_t end = start + page->total_slots * slot_size;
8079
8080 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8081 VALUE val = (VALUE)ptr;
8082 void *poisoned = asan_unpoison_object_temporary(val);
8083 enum ruby_value_type type = BUILTIN_TYPE(val);
8084
8085 if (type == T_NONE) free_objects++;
8086 if (type == T_ZOMBIE) zombie_objects++;
8087 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
8088 has_remembered_shady = TRUE;
8089 }
8090 if (RVALUE_PAGE_MARKING(page, val)) {
8091 has_remembered_old = TRUE;
8092 remembered_old_objects++;
8093 }
8094
8095 if (poisoned) {
8096 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
8097 asan_poison_object(val);
8098 }
8099 }
8100
8101 if (!is_incremental_marking(objspace) &&
8102 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
8103
8104 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
8105 VALUE val = (VALUE)ptr;
8106 if (RVALUE_PAGE_MARKING(page, val)) {
8107 fprintf(stderr, "marking -> %s\n", obj_info(val));
8108 }
8109 }
8110 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
8111 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
8112 }
8113
8114 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
8115 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
8116 (void *)page, obj ? obj_info(obj) : "");
8117 }
8118
8119 if (0) {
8120 /* free_slots may not equal to free_objects */
8121 if (page->free_slots != free_objects) {
8122 rb_bug("page %p's free_slots should be %d, but %d", (void *)page, page->free_slots, free_objects);
8123 }
8124 }
8125 if (page->final_slots != zombie_objects) {
8126 rb_bug("page %p's final_slots should be %d, but %d", (void *)page, page->final_slots, zombie_objects);
8127 }
8128
8129 return remembered_old_objects;
8130}
8131
8132static int
8133gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
8134{
8135 int remembered_old_objects = 0;
8136 struct heap_page *page = 0;
8137
8138 ccan_list_for_each(head, page, page_node) {
8139 asan_unlock_freelist(page);
8140 RVALUE *p = page->freelist;
8141 while (p) {
8142 VALUE vp = (VALUE)p;
8143 VALUE prev = vp;
8144 asan_unpoison_object(vp, false);
8145 if (BUILTIN_TYPE(vp) != T_NONE) {
8146 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8147 }
8148 p = p->as.free.next;
8149 asan_poison_object(prev);
8150 }
8151 asan_lock_freelist(page);
8152
8153 if (page->flags.has_remembered_objects == FALSE) {
8154 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
8155 }
8156 }
8157
8158 return remembered_old_objects;
8159}
8160
8161static int
8162gc_verify_heap_pages(rb_objspace_t *objspace)
8163{
8164 int remembered_old_objects = 0;
8165 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8166 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8167 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8168 }
8169 return remembered_old_objects;
8170}
8171
8172/*
8173 * call-seq:
8174 * GC.verify_internal_consistency -> nil
8175 *
8176 * Verify internal consistency.
8177 *
8178 * This method is implementation specific.
8179 * Now this method checks generational consistency
8180 * if RGenGC is supported.
8181 */
8182static VALUE
8183gc_verify_internal_consistency_m(VALUE dummy)
8184{
8185 gc_verify_internal_consistency(&rb_objspace);
8186 return Qnil;
8187}
8188
8189static void
8190gc_verify_internal_consistency_(rb_objspace_t *objspace)
8191{
8192 struct verify_internal_consistency_struct data = {0};
8193
8194 data.objspace = objspace;
8195 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
8196
8197 /* check relations */
8198 for (size_t i = 0; i < heap_allocated_pages; i++) {
8199 struct heap_page *page = heap_pages_sorted[i];
8200 short slot_size = page->slot_size;
8201
8202 uintptr_t start = (uintptr_t)page->start;
8203 uintptr_t end = start + page->total_slots * slot_size;
8204
8205 verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
8206 }
8207
8208 if (data.err_count != 0) {
8209#if RGENGC_CHECK_MODE >= 5
8210 objspace->rgengc.error_count = data.err_count;
8211 gc_marks_check(objspace, NULL, NULL);
8212 allrefs_dump(objspace);
8213#endif
8214 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
8215 }
8216
8217 /* check heap_page status */
8218 gc_verify_heap_pages(objspace);
8219
8220 /* check counters */
8221
8222 if (!is_lazy_sweeping(objspace) &&
8223 !finalizing &&
8224 ruby_single_main_ractor != NULL) {
8225 if (objspace_live_slots(objspace) != data.live_object_count) {
8226 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", total_freed_objects: %"PRIdSIZE"\n",
8227 heap_pages_final_slots, total_freed_objects(objspace));
8228 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8229 objspace_live_slots(objspace), data.live_object_count);
8230 }
8231 }
8232
8233 if (!is_marking(objspace)) {
8234 if (objspace->rgengc.old_objects != data.old_object_count) {
8235 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8236 objspace->rgengc.old_objects, data.old_object_count);
8237 }
8238 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8239 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
8240 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8241 }
8242 }
8243
8244 if (!finalizing) {
8245 size_t list_count = 0;
8246
8247 {
8248 VALUE z = heap_pages_deferred_final;
8249 while (z) {
8250 list_count++;
8251 z = RZOMBIE(z)->next;
8252 }
8253 }
8254
8255 if (heap_pages_final_slots != data.zombie_object_count ||
8256 heap_pages_final_slots != list_count) {
8257
8258 rb_bug("inconsistent finalizing object count:\n"
8259 " expect %"PRIuSIZE"\n"
8260 " but %"PRIuSIZE" zombies\n"
8261 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
8262 heap_pages_final_slots,
8263 data.zombie_object_count,
8264 list_count);
8265 }
8266 }
8267
8268 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
8269}
8270
8271static void
8272gc_verify_internal_consistency(rb_objspace_t *objspace)
8273{
8274 RB_VM_LOCK_ENTER();
8275 {
8276 rb_vm_barrier(); // stop other ractors
8277
8278 unsigned int prev_during_gc = during_gc;
8279 during_gc = FALSE; // stop gc here
8280 {
8281 gc_verify_internal_consistency_(objspace);
8282 }
8283 during_gc = prev_during_gc;
8284 }
8285 RB_VM_LOCK_LEAVE();
8286}
8287
8288void
8289rb_gc_verify_internal_consistency(void)
8290{
8291 gc_verify_internal_consistency(&rb_objspace);
8292}
8293
8294static void
8295heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
8296{
8297 if (heap->pooled_pages) {
8298 if (heap->free_pages) {
8299 struct heap_page *free_pages_tail = heap->free_pages;
8300 while (free_pages_tail->free_next) {
8301 free_pages_tail = free_pages_tail->free_next;
8302 }
8303 free_pages_tail->free_next = heap->pooled_pages;
8304 }
8305 else {
8306 heap->free_pages = heap->pooled_pages;
8307 }
8308
8309 heap->pooled_pages = NULL;
8310 }
8311}
8312
8313/* marks */
8314
8315static void
8316gc_marks_start(rb_objspace_t *objspace, int full_mark)
8317{
8318 /* start marking */
8319 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
8320 gc_mode_transition(objspace, gc_mode_marking);
8321
8322 if (full_mark) {
8323 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8324 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8325
8326 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
8327 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
8328 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
8329 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8330 objspace->flags.during_minor_gc = FALSE;
8331 if (ruby_enable_autocompact) {
8332 objspace->flags.during_compacting |= TRUE;
8333 }
8334 objspace->profile.major_gc_count++;
8335 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8336 objspace->rgengc.old_objects = 0;
8337 objspace->rgengc.last_major_gc = objspace->profile.count;
8338 objspace->marked_slots = 0;
8339
8340 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8341 rb_size_pool_t *size_pool = &size_pools[i];
8342 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8343 rgengc_mark_and_rememberset_clear(objspace, heap);
8344 heap_move_pooled_pages_to_free_pages(heap);
8345
8346 if (objspace->flags.during_compacting) {
8347 struct heap_page *page = NULL;
8348
8349 ccan_list_for_each(&heap->pages, page, page_node) {
8350 page->pinned_slots = 0;
8351 }
8352 }
8353 }
8354 }
8355 else {
8356 objspace->flags.during_minor_gc = TRUE;
8357 objspace->marked_slots =
8358 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
8359 objspace->profile.minor_gc_count++;
8360
8361 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8362 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8363 }
8364 }
8365
8366 gc_mark_roots(objspace, NULL);
8367
8368 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
8369 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
8370}
8371
8372static inline void
8373gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8374{
8375 if (bits) {
8376 do {
8377 if (bits & 1) {
8378 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
8379 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
8380 GC_ASSERT(RVALUE_MARKED((VALUE)p));
8381 gc_mark_children(objspace, (VALUE)p);
8382 }
8383 p += BASE_SLOT_SIZE;
8384 bits >>= 1;
8385 } while (bits);
8386 }
8387}
8388
8389static void
8390gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
8391{
8392 struct heap_page *page = 0;
8393
8394 ccan_list_for_each(&heap->pages, page, page_node) {
8395 bits_t *mark_bits = page->mark_bits;
8396 bits_t *wbun_bits = page->wb_unprotected_bits;
8397 uintptr_t p = page->start;
8398 size_t j;
8399
8400 bits_t bits = mark_bits[0] & wbun_bits[0];
8401 bits >>= NUM_IN_PAGE(p);
8402 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8403 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8404
8405 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8406 bits_t bits = mark_bits[j] & wbun_bits[j];
8407
8408 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8409 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8410 }
8411 }
8412
8413 gc_mark_stacked_objects_all(objspace);
8414}
8415
8416static void
8417gc_update_weak_references(rb_objspace_t *objspace)
8418{
8419 size_t retained_weak_references_count = 0;
8420 VALUE **ptr_ptr;
8421 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
8422 if (!*ptr_ptr) continue;
8423
8424 VALUE obj = **ptr_ptr;
8425
8426 if (RB_SPECIAL_CONST_P(obj)) continue;
8427
8428 if (!RVALUE_MARKED(obj)) {
8429 **ptr_ptr = Qundef;
8430 }
8431 else {
8432 retained_weak_references_count++;
8433 }
8434 }
8435
8436 objspace->profile.retained_weak_references_count = retained_weak_references_count;
8437
8438 rb_darray_clear(objspace->weak_references);
8439 rb_darray_resize_capa_without_gc(&objspace->weak_references, retained_weak_references_count);
8440}
8441
8442static void
8443gc_marks_finish(rb_objspace_t *objspace)
8444{
8445 /* finish incremental GC */
8446 if (is_incremental_marking(objspace)) {
8447 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8448 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
8449 mark_stack_size(&objspace->mark_stack));
8450 }
8451
8452 gc_mark_roots(objspace, 0);
8453 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
8454
8455#if RGENGC_CHECK_MODE >= 2
8456 if (gc_verify_heap_pages(objspace) != 0) {
8457 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
8458 }
8459#endif
8460
8461 objspace->flags.during_incremental_marking = FALSE;
8462 /* check children of all marked wb-unprotected objects */
8463 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8464 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8465 }
8466 }
8467
8468 gc_update_weak_references(objspace);
8469
8470#if RGENGC_CHECK_MODE >= 2
8471 gc_verify_internal_consistency(objspace);
8472#endif
8473
8474#if RGENGC_CHECK_MODE >= 4
8475 during_gc = FALSE;
8476 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
8477 during_gc = TRUE;
8478#endif
8479
8480 {
8481 /* decide full GC is needed or not */
8482 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8483 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
8484 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8485 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8486 int full_marking = is_full_marking(objspace);
8487 const int r_cnt = GET_VM()->ractor.cnt;
8488 const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
8489
8490 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8491
8492 /* Setup freeable slots. */
8493 size_t total_init_slots = 0;
8494 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8495 total_init_slots += gc_params.size_pool_init_slots[i] * r_mul;
8496 }
8497
8498 if (max_free_slots < total_init_slots) {
8499 max_free_slots = total_init_slots;
8500 }
8501
8502 if (sweep_slots > max_free_slots) {
8503 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8504 }
8505 else {
8506 heap_pages_freeable_pages = 0;
8507 }
8508
8509 /* check free_min */
8510 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8511 min_free_slots = gc_params.heap_free_slots * r_mul;
8512 }
8513
8514 if (sweep_slots < min_free_slots) {
8515 if (!full_marking) {
8516 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8517 full_marking = TRUE;
8518 /* do not update last_major_gc, because full marking is not done. */
8519 /* goto increment; */
8520 }
8521 else {
8522 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
8523 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8524 }
8525 }
8526 }
8527
8528 if (full_marking) {
8529 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8530 const double r = gc_params.oldobject_limit_factor;
8531 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
8532 (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
8533 (size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
8534 );
8535 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8536 }
8537
8538 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8539 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8540 }
8541 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8542 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8543 }
8544 if (RGENGC_FORCE_MAJOR_GC) {
8545 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8546 }
8547
8548 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
8549 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
8550 "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
8551 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8552 objspace->rgengc.need_major_gc ? "major" : "minor");
8553 }
8554
8555 rb_ractor_finish_marking();
8556
8557 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
8558}
8559
8560static bool
8561gc_compact_heap_cursors_met_p(rb_heap_t *heap)
8562{
8563 return heap->sweeping_page == heap->compact_cursor;
8564}
8565
8566static rb_size_pool_t *
8567gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, VALUE src)
8568{
8569 size_t obj_size;
8570 size_t idx = 0;
8571
8572 switch (BUILTIN_TYPE(src)) {
8573 case T_ARRAY:
8574 obj_size = rb_ary_size_as_embedded(src);
8575 break;
8576
8577 case T_OBJECT:
8578 if (rb_shape_obj_too_complex(src)) {
8579 return &size_pools[0];
8580 }
8581 else {
8582 obj_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src));
8583 }
8584 break;
8585
8586 case T_STRING:
8587 obj_size = rb_str_size_as_embedded(src);
8588 break;
8589
8590 case T_HASH:
8591 obj_size = sizeof(struct RHash) + (RHASH_ST_TABLE_P(src) ? sizeof(st_table) : sizeof(ar_table));
8592 break;
8593
8594 default:
8595 return src_pool;
8596 }
8597
8598 if (rb_gc_size_allocatable_p(obj_size)){
8599 idx = size_pool_idx_for_size(obj_size);
8600 }
8601 return &size_pools[idx];
8602}
8603
8604static bool
8605gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_pool, VALUE src)
8606{
8607 GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
8608 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8609
8610 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8611 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8612 rb_shape_t *new_shape = NULL;
8613 rb_shape_t *orig_shape = NULL;
8614
8615 if (gc_compact_heap_cursors_met_p(dheap)) {
8616 return dheap != heap;
8617 }
8618
8619 if (RB_TYPE_P(src, T_OBJECT)) {
8620 orig_shape = rb_shape_get_shape(src);
8621 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8622 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8623 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8624
8625 if (!new_shape) {
8626 dest_pool = size_pool;
8627 dheap = heap;
8628 }
8629 }
8630 }
8631
8632 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8633 struct gc_sweep_context ctx = {
8634 .page = dheap->sweeping_page,
8635 .final_slots = 0,
8636 .freed_slots = 0,
8637 .empty_slots = 0,
8638 };
8639
8640 /* The page of src could be partially compacted, so it may contain
8641 * T_MOVED. Sweeping a page may read objects on this page, so we
8642 * need to lock the page. */
8643 lock_page_body(objspace, GET_PAGE_BODY(src));
8644 gc_sweep_page(objspace, dheap, &ctx);
8645 unlock_page_body(objspace, GET_PAGE_BODY(src));
8646
8647 if (dheap->sweeping_page->free_slots > 0) {
8648 heap_add_freepage(dheap, dheap->sweeping_page);
8649 }
8650
8651 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8652 if (gc_compact_heap_cursors_met_p(dheap)) {
8653 return dheap != heap;
8654 }
8655 }
8656
8657 if (orig_shape) {
8658 if (new_shape) {
8659 VALUE dest = rb_gc_location(src);
8660 rb_shape_set_shape(dest, new_shape);
8661 }
8662 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8663 }
8664
8665 return true;
8666}
8667
8668static bool
8669gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
8670{
8671 short slot_size = page->slot_size;
8672 short slot_bits = slot_size / BASE_SLOT_SIZE;
8673 GC_ASSERT(slot_bits > 0);
8674
8675 do {
8676 VALUE vp = (VALUE)p;
8677 GC_ASSERT(vp % sizeof(RVALUE) == 0);
8678
8679 if (bitset & 1) {
8680 objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
8681
8682 if (gc_is_moveable_obj(objspace, vp)) {
8683 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8684 //the cursors met. bubble up
8685 return false;
8686 }
8687 }
8688 }
8689 p += slot_size;
8690 bitset >>= slot_bits;
8691 } while (bitset);
8692
8693 return true;
8694}
8695
8696// Iterate up all the objects in page, moving them to where they want to go
8697static bool
8698gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
8699{
8700 GC_ASSERT(page == heap->compact_cursor);
8701
8702 bits_t *mark_bits, *pin_bits;
8703 bits_t bitset;
8704 uintptr_t p = page->start;
8705
8706 mark_bits = page->mark_bits;
8707 pin_bits = page->pinned_bits;
8708
8709 // objects that can be moved are marked and not pinned
8710 bitset = (mark_bits[0] & ~pin_bits[0]);
8711 bitset >>= NUM_IN_PAGE(p);
8712 if (bitset) {
8713 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8714 return false;
8715 }
8716 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8717
8718 for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8719 bitset = (mark_bits[j] & ~pin_bits[j]);
8720 if (bitset) {
8721 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8722 return false;
8723 }
8724 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8725 }
8726
8727 return true;
8728}
8729
8730static bool
8731gc_compact_all_compacted_p(rb_objspace_t *objspace)
8732{
8733 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8734 rb_size_pool_t *size_pool = &size_pools[i];
8735 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8736
8737 if (heap->total_pages > 0 &&
8738 !gc_compact_heap_cursors_met_p(heap)) {
8739 return false;
8740 }
8741 }
8742
8743 return true;
8744}
8745
8746static void
8747gc_sweep_compact(rb_objspace_t *objspace)
8748{
8749 gc_compact_start(objspace);
8750#if RGENGC_CHECK_MODE >= 2
8751 gc_verify_internal_consistency(objspace);
8752#endif
8753
8754 while (!gc_compact_all_compacted_p(objspace)) {
8755 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8756 rb_size_pool_t *size_pool = &size_pools[i];
8757 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8758
8759 if (gc_compact_heap_cursors_met_p(heap)) {
8760 continue;
8761 }
8762
8763 struct heap_page *start_page = heap->compact_cursor;
8764
8765 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8766 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8767
8768 continue;
8769 }
8770
8771 // If we get here, we've finished moving all objects on the compact_cursor page
8772 // So we can lock it and move the cursor on to the next one.
8773 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8774 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8775 }
8776 }
8777
8778 gc_compact_finish(objspace);
8779
8780#if RGENGC_CHECK_MODE >= 2
8781 gc_verify_internal_consistency(objspace);
8782#endif
8783}
8784
8785static void
8786gc_marks_rest(rb_objspace_t *objspace)
8787{
8788 gc_report(1, objspace, "gc_marks_rest\n");
8789
8790 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8791 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8792 }
8793
8794 if (is_incremental_marking(objspace)) {
8795 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8796 }
8797 else {
8798 gc_mark_stacked_objects_all(objspace);
8799 }
8800
8801 gc_marks_finish(objspace);
8802}
8803
8804static bool
8805gc_marks_step(rb_objspace_t *objspace, size_t slots)
8806{
8807 bool marking_finished = false;
8808
8809 GC_ASSERT(is_marking(objspace));
8810 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8811 gc_marks_finish(objspace);
8812
8813 marking_finished = true;
8814 }
8815
8816 return marking_finished;
8817}
8818
8819static bool
8820gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8821{
8822 GC_ASSERT(dont_gc_val() == FALSE);
8823 bool marking_finished = true;
8824
8825 gc_marking_enter(objspace);
8826
8827 if (heap->free_pages) {
8828 gc_report(2, objspace, "gc_marks_continue: has pooled pages");
8829
8830 marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
8831 }
8832 else {
8833 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
8834 mark_stack_size(&objspace->mark_stack));
8835 size_pool->force_incremental_marking_finish_count++;
8836 gc_marks_rest(objspace);
8837 }
8838
8839 gc_marking_exit(objspace);
8840
8841 return marking_finished;
8842}
8843
8844static bool
8845gc_marks(rb_objspace_t *objspace, int full_mark)
8846{
8847 gc_prof_mark_timer_start(objspace);
8848 gc_marking_enter(objspace);
8849
8850 bool marking_finished = false;
8851
8852 /* setup marking */
8853
8854 gc_marks_start(objspace, full_mark);
8855 if (!is_incremental_marking(objspace)) {
8856 gc_marks_rest(objspace);
8857 marking_finished = true;
8858 }
8859
8860#if RGENGC_PROFILE > 0
8861 if (gc_prof_record(objspace)) {
8862 gc_profile_record *record = gc_prof_record(objspace);
8863 record->old_objects = objspace->rgengc.old_objects;
8864 }
8865#endif
8866
8867 gc_marking_exit(objspace);
8868 gc_prof_mark_timer_stop(objspace);
8869
8870 return marking_finished;
8871}
8872
8873/* RGENGC */
8874
8875static void
8876gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
8877{
8878 if (level <= RGENGC_DEBUG) {
8879 char buf[1024];
8880 FILE *out = stderr;
8881 va_list args;
8882 const char *status = " ";
8883
8884 if (during_gc) {
8885 status = is_full_marking(objspace) ? "+" : "-";
8886 }
8887 else {
8888 if (is_lazy_sweeping(objspace)) {
8889 status = "S";
8890 }
8891 if (is_incremental_marking(objspace)) {
8892 status = "M";
8893 }
8894 }
8895
8896 va_start(args, fmt);
8897 vsnprintf(buf, 1024, fmt, args);
8898 va_end(args);
8899
8900 fprintf(out, "%s|", status);
8901 fputs(buf, out);
8902 }
8903}
8904
8905/* bit operations */
8906
8907static int
8908rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
8909{
8910 struct heap_page *page = GET_HEAP_PAGE(obj);
8911 bits_t *bits = &page->remembered_bits[0];
8912
8913 if (MARKED_IN_BITMAP(bits, obj)) {
8914 return FALSE;
8915 }
8916 else {
8917 page->flags.has_remembered_objects = TRUE;
8918 MARK_IN_BITMAP(bits, obj);
8919 return TRUE;
8920 }
8921}
8922
8923/* wb, etc */
8924
8925/* return FALSE if already remembered */
8926static int
8927rgengc_remember(rb_objspace_t *objspace, VALUE obj)
8928{
8929 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
8930 RVALUE_REMEMBERED(obj) ? "was already remembered" : "is remembered now");
8931
8932 check_rvalue_consistency(obj);
8933
8934 if (RGENGC_CHECK_MODE) {
8935 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
8936 }
8937
8938#if RGENGC_PROFILE > 0
8939 if (!RVALUE_REMEMBERED(obj)) {
8940 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8941 objspace->profile.total_remembered_normal_object_count++;
8942#if RGENGC_PROFILE >= 2
8943 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
8944#endif
8945 }
8946 }
8947#endif /* RGENGC_PROFILE > 0 */
8948
8949 return rgengc_remembersetbits_set(objspace, obj);
8950}
8951
8952#ifndef PROFILE_REMEMBERSET_MARK
8953#define PROFILE_REMEMBERSET_MARK 0
8954#endif
8955
8956static inline void
8957rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8958{
8959 if (bitset) {
8960 do {
8961 if (bitset & 1) {
8962 VALUE obj = (VALUE)p;
8963 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8964 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8965 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8966
8967 gc_mark_children(objspace, obj);
8968 }
8969 p += BASE_SLOT_SIZE;
8970 bitset >>= 1;
8971 } while (bitset);
8972 }
8973}
8974
8975static void
8976rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
8977{
8978 size_t j;
8979 struct heap_page *page = 0;
8980#if PROFILE_REMEMBERSET_MARK
8981 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8982#endif
8983 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
8984
8985 ccan_list_for_each(&heap->pages, page, page_node) {
8986 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
8987 uintptr_t p = page->start;
8988 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8989 bits_t *remembered_bits = page->remembered_bits;
8990 bits_t *uncollectible_bits = page->uncollectible_bits;
8991 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8992#if PROFILE_REMEMBERSET_MARK
8993 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
8994 else if (page->flags.has_remembered_objects) has_old++;
8995 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
8996#endif
8997 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8998 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8999 remembered_bits[j] = 0;
9000 }
9001 page->flags.has_remembered_objects = FALSE;
9002
9003 bitset = bits[0];
9004 bitset >>= NUM_IN_PAGE(p);
9005 rgengc_rememberset_mark_plane(objspace, p, bitset);
9006 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
9007
9008 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
9009 bitset = bits[j];
9010 rgengc_rememberset_mark_plane(objspace, p, bitset);
9011 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
9012 }
9013 }
9014#if PROFILE_REMEMBERSET_MARK
9015 else {
9016 skip++;
9017 }
9018#endif
9019 }
9020
9021#if PROFILE_REMEMBERSET_MARK
9022 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
9023#endif
9024 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
9025}
9026
9027static void
9028rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
9029{
9030 struct heap_page *page = 0;
9031
9032 ccan_list_for_each(&heap->pages, page, page_node) {
9033 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9034 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9035 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9036 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9037 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
9038 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
9039 page->flags.has_remembered_objects = FALSE;
9040 }
9041}
9042
9043/* RGENGC: APIs */
9044
9045NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
9046
9047static void
9048gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
9049{
9050 if (RGENGC_CHECK_MODE) {
9051 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
9052 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
9053 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
9054 }
9055
9056 /* mark `a' and remember (default behavior) */
9057 if (!RVALUE_REMEMBERED(a)) {
9058 RB_VM_LOCK_ENTER_NO_BARRIER();
9059 {
9060 rgengc_remember(objspace, a);
9061 }
9062 RB_VM_LOCK_LEAVE_NO_BARRIER();
9063 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
9064 }
9065
9066 check_rvalue_consistency(a);
9067 check_rvalue_consistency(b);
9068}
9069
9070static void
9071gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
9072{
9073 gc_mark_set_parent(objspace, parent);
9074 rgengc_check_relation(objspace, obj);
9075 if (gc_mark_set(objspace, obj) == FALSE) return;
9076 gc_aging(objspace, obj);
9077 gc_grey(objspace, obj);
9078}
9079
9080NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
9081
9082static void
9083gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
9084{
9085 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
9086
9087 if (RVALUE_BLACK_P(a)) {
9088 if (RVALUE_WHITE_P(b)) {
9089 if (!RVALUE_WB_UNPROTECTED(a)) {
9090 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
9091 gc_mark_from(objspace, b, a);
9092 }
9093 }
9094 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
9095 rgengc_remember(objspace, a);
9096 }
9097
9098 if (UNLIKELY(objspace->flags.during_compacting)) {
9099 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9100 }
9101 }
9102}
9103
9104void
9105rb_gc_writebarrier(VALUE a, VALUE b)
9106{
9107 rb_objspace_t *objspace = &rb_objspace;
9108
9109 if (RGENGC_CHECK_MODE) {
9110 if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
9111 if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
9112 }
9113
9114 retry:
9115 if (!is_incremental_marking(objspace)) {
9116 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9117 // do nothing
9118 }
9119 else {
9120 gc_writebarrier_generational(a, b, objspace);
9121 }
9122 }
9123 else {
9124 bool retry = false;
9125 /* slow path */
9126 RB_VM_LOCK_ENTER_NO_BARRIER();
9127 {
9128 if (is_incremental_marking(objspace)) {
9129 gc_writebarrier_incremental(a, b, objspace);
9130 }
9131 else {
9132 retry = true;
9133 }
9134 }
9135 RB_VM_LOCK_LEAVE_NO_BARRIER();
9136
9137 if (retry) goto retry;
9138 }
9139 return;
9140}
9141
9142void
9143rb_gc_writebarrier_unprotect(VALUE obj)
9144{
9145 if (RVALUE_WB_UNPROTECTED(obj)) {
9146 return;
9147 }
9148 else {
9149 rb_objspace_t *objspace = &rb_objspace;
9150
9151 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9152 RVALUE_REMEMBERED(obj) ? " (already remembered)" : "");
9153
9154 RB_VM_LOCK_ENTER_NO_BARRIER();
9155 {
9156 if (RVALUE_OLD_P(obj)) {
9157 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9158 RVALUE_DEMOTE(objspace, obj);
9159 gc_mark_set(objspace, obj);
9160 gc_remember_unprotected(objspace, obj);
9161
9162#if RGENGC_PROFILE
9163 objspace->profile.total_shade_operation_count++;
9164#if RGENGC_PROFILE >= 2
9165 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
9166#endif /* RGENGC_PROFILE >= 2 */
9167#endif /* RGENGC_PROFILE */
9168 }
9169 else {
9170 RVALUE_AGE_RESET(obj);
9171 }
9172
9173 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9174 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9175 }
9176 RB_VM_LOCK_LEAVE_NO_BARRIER();
9177 }
9178}
9179
9180/*
9181 * remember `obj' if needed.
9182 */
9183void
9184rb_gc_writebarrier_remember(VALUE obj)
9185{
9186 rb_objspace_t *objspace = &rb_objspace;
9187
9188 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9189
9190 if (is_incremental_marking(objspace)) {
9191 if (RVALUE_BLACK_P(obj)) {
9192 gc_grey(objspace, obj);
9193 }
9194 }
9195 else {
9196 if (RVALUE_OLD_P(obj)) {
9197 rgengc_remember(objspace, obj);
9198 }
9199 }
9200}
9201
9202void
9203rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
9204{
9205 rb_objspace_t *objspace = &rb_objspace;
9206
9207 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9208 if (!RVALUE_OLD_P(dest)) {
9209 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9210 RVALUE_AGE_RESET(dest);
9211 }
9212 else {
9213 RVALUE_DEMOTE(objspace, dest);
9214 }
9215 }
9216
9217 check_rvalue_consistency(dest);
9218}
9219
9220/* RGENGC analysis information */
9221
9222VALUE
9223rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
9224{
9225 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9226}
9227
9228VALUE
9229rb_obj_rgengc_promoted_p(VALUE obj)
9230{
9231 return RBOOL(OBJ_PROMOTED(obj));
9232}
9233
9234size_t
9235rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
9236{
9237 size_t n = 0;
9238 static ID ID_marked;
9239 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9240
9241 if (!ID_marked) {
9242#define I(s) ID_##s = rb_intern(#s);
9243 I(marked);
9244 I(wb_protected);
9245 I(old);
9246 I(marking);
9247 I(uncollectible);
9248 I(pinned);
9249#undef I
9250 }
9251
9252 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9253 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9254 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9255 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9256 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9257 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9258 return n;
9259}
9260
9261/* GC */
9262
9263void
9264rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
9265{
9266 newobj_cache->incremental_mark_step_allocated_slots = 0;
9267
9268 for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9269 rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
9270
9271 struct heap_page *page = cache->using_page;
9272 RVALUE *freelist = cache->freelist;
9273 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
9274
9275 heap_page_freelist_append(page, freelist);
9276
9277 cache->using_page = NULL;
9278 cache->freelist = NULL;
9279 }
9280}
9281
9282void
9283rb_gc_force_recycle(VALUE obj)
9284{
9285 /* no-op */
9286}
9287
9288#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9289#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9290#endif
9291
9292void
9293rb_gc_register_mark_object(VALUE obj)
9294{
9295 if (!is_pointer_to_heap(&rb_objspace, (void *)obj))
9296 return;
9297
9298 RB_VM_LOCK_ENTER();
9299 {
9300 VALUE ary_ary = GET_VM()->mark_object_ary;
9301 VALUE ary = rb_ary_last(0, 0, ary_ary);
9302
9303 if (NIL_P(ary) || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
9304 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9305 rb_ary_push(ary_ary, ary);
9306 }
9307
9308 rb_ary_push(ary, obj);
9309 }
9310 RB_VM_LOCK_LEAVE();
9311}
9312
9313void
9314rb_gc_register_address(VALUE *addr)
9315{
9316 rb_objspace_t *objspace = &rb_objspace;
9317 struct gc_list *tmp;
9318
9319 VALUE obj = *addr;
9320
9321 tmp = ALLOC(struct gc_list);
9322 tmp->next = global_list;
9323 tmp->varptr = addr;
9324 global_list = tmp;
9325
9326 /*
9327 * Because some C extensions have assignment-then-register bugs,
9328 * we guard `obj` here so that it would not get swept defensively.
9329 */
9330 RB_GC_GUARD(obj);
9331 if (0 && !SPECIAL_CONST_P(obj)) {
9332 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
9333 rb_obj_class(obj));
9334 rb_print_backtrace(stderr);
9335 }
9336}
9337
9338void
9339rb_gc_unregister_address(VALUE *addr)
9340{
9341 rb_objspace_t *objspace = &rb_objspace;
9342 struct gc_list *tmp = global_list;
9343
9344 if (tmp->varptr == addr) {
9345 global_list = tmp->next;
9346 xfree(tmp);
9347 return;
9348 }
9349 while (tmp->next) {
9350 if (tmp->next->varptr == addr) {
9351 struct gc_list *t = tmp->next;
9352
9353 tmp->next = tmp->next->next;
9354 xfree(t);
9355 break;
9356 }
9357 tmp = tmp->next;
9358 }
9359}
9360
9361void
9363{
9364 rb_gc_register_address(var);
9365}
9366
9367#define GC_NOTIFY 0
9368
9369enum {
9370 gc_stress_no_major,
9371 gc_stress_no_immediate_sweep,
9372 gc_stress_full_mark_after_malloc,
9373 gc_stress_max
9374};
9375
9376#define gc_stress_full_mark_after_malloc_p() \
9377 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9378
9379static void
9380heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
9381{
9382 if (!heap->free_pages) {
9383 if (!heap_increment(objspace, size_pool, heap)) {
9384 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9385 heap_increment(objspace, size_pool, heap);
9386 }
9387 }
9388}
9389
9390static int
9391ready_to_gc(rb_objspace_t *objspace)
9392{
9393 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9394 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
9395 rb_size_pool_t *size_pool = &size_pools[i];
9396 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9397 }
9398 return FALSE;
9399 }
9400 else {
9401 return TRUE;
9402 }
9403}
9404
9405static void
9406gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
9407{
9408 gc_prof_set_malloc_info(objspace);
9409 {
9410 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9411 size_t old_limit = malloc_limit;
9412
9413 if (inc > malloc_limit) {
9414 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9415 if (malloc_limit > gc_params.malloc_limit_max) {
9416 malloc_limit = gc_params.malloc_limit_max;
9417 }
9418 }
9419 else {
9420 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
9421 if (malloc_limit < gc_params.malloc_limit_min) {
9422 malloc_limit = gc_params.malloc_limit_min;
9423 }
9424 }
9425
9426 if (0) {
9427 if (old_limit != malloc_limit) {
9428 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
9429 rb_gc_count(), old_limit, malloc_limit);
9430 }
9431 else {
9432 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
9433 rb_gc_count(), malloc_limit);
9434 }
9435 }
9436 }
9437
9438 /* reset oldmalloc info */
9439#if RGENGC_ESTIMATE_OLDMALLOC
9440 if (!full_mark) {
9441 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9442 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9443 objspace->rgengc.oldmalloc_increase_limit =
9444 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9445
9446 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9447 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9448 }
9449 }
9450
9451 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
9452 rb_gc_count(),
9453 objspace->rgengc.need_major_gc,
9454 objspace->rgengc.oldmalloc_increase,
9455 objspace->rgengc.oldmalloc_increase_limit,
9456 gc_params.oldmalloc_limit_max);
9457 }
9458 else {
9459 /* major GC */
9460 objspace->rgengc.oldmalloc_increase = 0;
9461
9462 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9463 objspace->rgengc.oldmalloc_increase_limit =
9464 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9465 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9466 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9467 }
9468 }
9469 }
9470#endif
9471}
9472
9473static int
9474garbage_collect(rb_objspace_t *objspace, unsigned int reason)
9475{
9476 int ret;
9477
9478 RB_VM_LOCK_ENTER();
9479 {
9480#if GC_PROFILE_MORE_DETAIL
9481 objspace->profile.prepare_time = getrusage_time();
9482#endif
9483
9484 gc_rest(objspace);
9485
9486#if GC_PROFILE_MORE_DETAIL
9487 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9488#endif
9489
9490 ret = gc_start(objspace, reason);
9491 }
9492 RB_VM_LOCK_LEAVE();
9493
9494 return ret;
9495}
9496
9497static int
9498gc_start(rb_objspace_t *objspace, unsigned int reason)
9499{
9500 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9501
9502 /* reason may be clobbered, later, so keep set immediate_sweep here */
9503 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9504
9505 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
9506 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
9507
9508 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9509 GC_ASSERT(!is_lazy_sweeping(objspace));
9510 GC_ASSERT(!is_incremental_marking(objspace));
9511
9512 unsigned int lock_lev;
9513 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9514
9515#if RGENGC_CHECK_MODE >= 2
9516 gc_verify_internal_consistency(objspace);
9517#endif
9518
9519 if (ruby_gc_stressful) {
9520 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
9521
9522 if ((flag & (1<<gc_stress_no_major)) == 0) {
9523 do_full_mark = TRUE;
9524 }
9525
9526 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9527 }
9528
9529 if (objspace->rgengc.need_major_gc) {
9530 reason |= objspace->rgengc.need_major_gc;
9531 do_full_mark = TRUE;
9532 }
9533 else if (RGENGC_FORCE_MAJOR_GC) {
9534 reason = GPR_FLAG_MAJOR_BY_FORCE;
9535 do_full_mark = TRUE;
9536 }
9537
9538 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9539
9540 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9541 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
9542 }
9543
9544 if (objspace->flags.dont_incremental ||
9545 reason & GPR_FLAG_IMMEDIATE_MARK ||
9546 ruby_gc_stressful) {
9547 objspace->flags.during_incremental_marking = FALSE;
9548 }
9549 else {
9550 objspace->flags.during_incremental_marking = do_full_mark;
9551 }
9552
9553 /* Explicitly enable compaction (GC.compact) */
9554 if (do_full_mark && ruby_enable_autocompact) {
9555 objspace->flags.during_compacting = TRUE;
9556#if RGENGC_CHECK_MODE
9557 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
9558#endif
9559 }
9560 else {
9561 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9562 }
9563
9564 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9565 objspace->flags.immediate_sweep = TRUE;
9566 }
9567
9568 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9569
9570 gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
9571 reason,
9572 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9573
9574#if USE_DEBUG_COUNTER
9575 RB_DEBUG_COUNTER_INC(gc_count);
9576
9577 if (reason & GPR_FLAG_MAJOR_MASK) {
9578 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9579 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9580 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9581 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9582#if RGENGC_ESTIMATE_OLDMALLOC
9583 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9584#endif
9585 }
9586 else {
9587 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9588 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9589 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9590 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9591 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9592 }
9593#endif
9594
9595 objspace->profile.count++;
9596 objspace->profile.latest_gc_info = reason;
9597 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
9598 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9599 objspace->profile.weak_references_count = 0;
9600 objspace->profile.retained_weak_references_count = 0;
9601 gc_prof_setup_new_record(objspace, reason);
9602 gc_reset_malloc_info(objspace, do_full_mark);
9603
9604 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
9605 GC_ASSERT(during_gc);
9606
9607 gc_prof_timer_start(objspace);
9608 {
9609 if (gc_marks(objspace, do_full_mark)) {
9610 gc_sweep(objspace);
9611 }
9612 }
9613 gc_prof_timer_stop(objspace);
9614
9615 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9616 return TRUE;
9617}
9618
9619static void
9620gc_rest(rb_objspace_t *objspace)
9621{
9622 int marking = is_incremental_marking(objspace);
9623 int sweeping = is_lazy_sweeping(objspace);
9624
9625 if (marking || sweeping) {
9626 unsigned int lock_lev;
9627 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9628
9629 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9630
9631 if (is_incremental_marking(objspace)) {
9632 gc_marking_enter(objspace);
9633 gc_marks_rest(objspace);
9634 gc_marking_exit(objspace);
9635
9636 gc_sweep(objspace);
9637 }
9638
9639 if (is_lazy_sweeping(objspace)) {
9640 gc_sweeping_enter(objspace);
9641 gc_sweep_rest(objspace);
9642 gc_sweeping_exit(objspace);
9643 }
9644
9645 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9646 }
9647}
9648
9650 rb_objspace_t *objspace;
9651 unsigned int reason;
9652};
9653
9654static void
9655gc_current_status_fill(rb_objspace_t *objspace, char *buff)
9656{
9657 int i = 0;
9658 if (is_marking(objspace)) {
9659 buff[i++] = 'M';
9660 if (is_full_marking(objspace)) buff[i++] = 'F';
9661 if (is_incremental_marking(objspace)) buff[i++] = 'I';
9662 }
9663 else if (is_sweeping(objspace)) {
9664 buff[i++] = 'S';
9665 if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
9666 }
9667 else {
9668 buff[i++] = 'N';
9669 }
9670 buff[i] = '\0';
9671}
9672
9673static const char *
9674gc_current_status(rb_objspace_t *objspace)
9675{
9676 static char buff[0x10];
9677 gc_current_status_fill(objspace, buff);
9678 return buff;
9679}
9680
9681#if PRINT_ENTER_EXIT_TICK
9682
9683static tick_t last_exit_tick;
9684static tick_t enter_tick;
9685static int enter_count = 0;
9686static char last_gc_status[0x10];
9687
9688static inline void
9689gc_record(rb_objspace_t *objspace, int direction, const char *event)
9690{
9691 if (direction == 0) { /* enter */
9692 enter_count++;
9693 enter_tick = tick();
9694 gc_current_status_fill(objspace, last_gc_status);
9695 }
9696 else { /* exit */
9697 tick_t exit_tick = tick();
9698 char current_gc_status[0x10];
9699 gc_current_status_fill(objspace, current_gc_status);
9700#if 1
9701 /* [last mutator time] [gc time] [event] */
9702 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9703 enter_tick - last_exit_tick,
9704 exit_tick - enter_tick,
9705 event,
9706 last_gc_status, current_gc_status,
9707 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9708 last_exit_tick = exit_tick;
9709#else
9710 /* [enter_tick] [gc time] [event] */
9711 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9712 enter_tick,
9713 exit_tick - enter_tick,
9714 event,
9715 last_gc_status, current_gc_status,
9716 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9717#endif
9718 }
9719}
9720#else /* PRINT_ENTER_EXIT_TICK */
9721static inline void
9722gc_record(rb_objspace_t *objspace, int direction, const char *event)
9723{
9724 /* null */
9725}
9726#endif /* PRINT_ENTER_EXIT_TICK */
9727
9728static const char *
9729gc_enter_event_cstr(enum gc_enter_event event)
9730{
9731 switch (event) {
9732 case gc_enter_event_start: return "start";
9733 case gc_enter_event_continue: return "continue";
9734 case gc_enter_event_rest: return "rest";
9735 case gc_enter_event_finalizer: return "finalizer";
9736 case gc_enter_event_rb_memerror: return "rb_memerror";
9737 }
9738 return NULL;
9739}
9740
9741static void
9742gc_enter_count(enum gc_enter_event event)
9743{
9744 switch (event) {
9745 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
9746 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue); break;
9747 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
9748 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
9749 case gc_enter_event_rb_memerror: /* nothing */ break;
9750 }
9751}
9752
9753static bool current_process_time(struct timespec *ts);
9754
9755static void
9756gc_clock_start(struct timespec *ts)
9757{
9758 if (!current_process_time(ts)) {
9759 ts->tv_sec = 0;
9760 ts->tv_nsec = 0;
9761 }
9762}
9763
9764static uint64_t
9765gc_clock_end(struct timespec *ts)
9766{
9767 struct timespec end_time;
9768
9769 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
9770 current_process_time(&end_time) &&
9771 end_time.tv_sec >= ts->tv_sec) {
9772 return (uint64_t)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
9773 (end_time.tv_nsec - ts->tv_nsec);
9774 }
9775
9776 return 0;
9777}
9778
9779static inline void
9780gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9781{
9782 RB_VM_LOCK_ENTER_LEV(lock_lev);
9783
9784 switch (event) {
9785 case gc_enter_event_rest:
9786 if (!is_marking(objspace)) break;
9787 // fall through
9788 case gc_enter_event_start:
9789 case gc_enter_event_continue:
9790 // stop other ractors
9791 rb_vm_barrier();
9792 break;
9793 default:
9794 break;
9795 }
9796
9797 gc_enter_count(event);
9798 if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
9799 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9800
9801 during_gc = TRUE;
9802 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9803 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9804 gc_record(objspace, 0, gc_enter_event_cstr(event));
9805 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
9806}
9807
9808static inline void
9809gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9810{
9811 GC_ASSERT(during_gc != 0);
9812
9813 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passed? */
9814 gc_record(objspace, 1, gc_enter_event_cstr(event));
9815 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9816 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9817 during_gc = FALSE;
9818
9819 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9820}
9821
9822#ifndef MEASURE_GC
9823#define MEASURE_GC (objspace->flags.measure_gc)
9824#endif
9825
9826static void
9827gc_marking_enter(rb_objspace_t *objspace)
9828{
9829 GC_ASSERT(during_gc != 0);
9830
9831 if (MEASURE_GC) {
9832 gc_clock_start(&objspace->profile.marking_start_time);
9833 }
9834}
9835
9836static void
9837gc_marking_exit(rb_objspace_t *objspace)
9838{
9839 GC_ASSERT(during_gc != 0);
9840
9841 if (MEASURE_GC) {
9842 objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
9843 }
9844}
9845
9846static void
9847gc_sweeping_enter(rb_objspace_t *objspace)
9848{
9849 GC_ASSERT(during_gc != 0);
9850
9851 if (MEASURE_GC) {
9852 gc_clock_start(&objspace->profile.sweeping_start_time);
9853 }
9854}
9855
9856static void
9857gc_sweeping_exit(rb_objspace_t *objspace)
9858{
9859 GC_ASSERT(during_gc != 0);
9860
9861 if (MEASURE_GC) {
9862 objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
9863 }
9864}
9865
9866static void *
9867gc_with_gvl(void *ptr)
9868{
9869 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
9870 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
9871}
9872
9873static int
9874garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
9875{
9876 if (dont_gc_val()) return TRUE;
9877 if (ruby_thread_has_gvl_p()) {
9878 return garbage_collect(objspace, reason);
9879 }
9880 else {
9881 if (ruby_native_thread_p()) {
9882 struct objspace_and_reason oar;
9883 oar.objspace = objspace;
9884 oar.reason = reason;
9885 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
9886 }
9887 else {
9888 /* no ruby thread */
9889 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9890 exit(EXIT_FAILURE);
9891 }
9892 }
9893}
9894
9895static int
9896gc_set_candidate_object_i(void *vstart, void *vend, size_t stride, void *data)
9897{
9898 rb_objspace_t *objspace = &rb_objspace;
9899 VALUE v = (VALUE)vstart;
9900 for (; v != (VALUE)vend; v += stride) {
9901 switch (BUILTIN_TYPE(v)) {
9902 case T_NONE:
9903 case T_ZOMBIE:
9904 break;
9905 case T_STRING:
9906 // precompute the string coderange. This both save time for when it will be
9907 // eventually needed, and avoid mutating heap pages after a potential fork.
9909 // fall through
9910 default:
9911 if (!RVALUE_OLD_P(v) && !RVALUE_WB_UNPROTECTED(v)) {
9912 RVALUE_AGE_SET_CANDIDATE(objspace, v);
9913 }
9914 }
9915 }
9916
9917 return 0;
9918}
9919
9920static VALUE
9921gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9922{
9923 rb_objspace_t *objspace = &rb_objspace;
9924 unsigned int reason = (GPR_FLAG_FULL_MARK |
9925 GPR_FLAG_IMMEDIATE_MARK |
9926 GPR_FLAG_IMMEDIATE_SWEEP |
9927 GPR_FLAG_METHOD);
9928
9929 /* For now, compact implies full mark / sweep, so ignore other flags */
9930 if (RTEST(compact)) {
9931 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9932
9933 reason |= GPR_FLAG_COMPACT;
9934 }
9935 else {
9936 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9937 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9938 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9939 }
9940
9941 garbage_collect(objspace, reason);
9942 gc_finalize_deferred(objspace);
9943
9944 return Qnil;
9945}
9946
9947static void
9948free_empty_pages(void)
9949{
9950 rb_objspace_t *objspace = &rb_objspace;
9951
9952 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
9953 /* Move all empty pages to the tomb heap for freeing. */
9954 rb_size_pool_t *size_pool = &size_pools[i];
9955 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
9956 rb_heap_t *tomb_heap = SIZE_POOL_TOMB_HEAP(size_pool);
9957
9958 size_t freed_pages = 0;
9959
9960 struct heap_page **next_page_ptr = &heap->free_pages;
9961 struct heap_page *page = heap->free_pages;
9962 while (page) {
9963 /* All finalizers should have been ran in gc_start_internal, so there
9964 * should be no objects that require finalization. */
9965 GC_ASSERT(page->final_slots == 0);
9966
9967 struct heap_page *next_page = page->free_next;
9968
9969 if (page->free_slots == page->total_slots) {
9970 heap_unlink_page(objspace, heap, page);
9971 heap_add_page(objspace, size_pool, tomb_heap, page);
9972 freed_pages++;
9973 }
9974 else {
9975 *next_page_ptr = page;
9976 next_page_ptr = &page->free_next;
9977 }
9978
9979 page = next_page;
9980 }
9981
9982 *next_page_ptr = NULL;
9983
9984 size_pool_allocatable_pages_set(objspace, size_pool, size_pool->allocatable_pages + freed_pages);
9985 }
9986
9987 heap_pages_free_unused_pages(objspace);
9988}
9989
9990void
9991rb_gc_prepare_heap(void)
9992{
9993 rb_objspace_each_objects(gc_set_candidate_object_i, NULL);
9994 gc_start_internal(NULL, Qtrue, Qtrue, Qtrue, Qtrue, Qtrue);
9995 free_empty_pages();
9996
9997#if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
9998 malloc_trim(0);
9999#endif
10000}
10001
10002static int
10003gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
10004{
10005 GC_ASSERT(!SPECIAL_CONST_P(obj));
10006
10007 switch (BUILTIN_TYPE(obj)) {
10008 case T_NONE:
10009 case T_NIL:
10010 case T_MOVED:
10011 case T_ZOMBIE:
10012 return FALSE;
10013 case T_SYMBOL:
10014 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
10015 return FALSE;
10016 }
10017 /* fall through */
10018 case T_STRING:
10019 case T_OBJECT:
10020 case T_FLOAT:
10021 case T_IMEMO:
10022 case T_ARRAY:
10023 case T_BIGNUM:
10024 case T_ICLASS:
10025 case T_MODULE:
10026 case T_REGEXP:
10027 case T_DATA:
10028 case T_MATCH:
10029 case T_STRUCT:
10030 case T_HASH:
10031 case T_FILE:
10032 case T_COMPLEX:
10033 case T_RATIONAL:
10034 case T_NODE:
10035 case T_CLASS:
10036 if (FL_TEST(obj, FL_FINALIZE)) {
10037 /* The finalizer table is a numtable. It looks up objects by address.
10038 * We can't mark the keys in the finalizer table because that would
10039 * prevent the objects from being collected. This check prevents
10040 * objects that are keys in the finalizer table from being moved
10041 * without directly pinning them. */
10042 GC_ASSERT(st_is_member(finalizer_table, obj));
10043
10044 return FALSE;
10045 }
10046 GC_ASSERT(RVALUE_MARKED(obj));
10047 GC_ASSERT(!RVALUE_PINNED(obj));
10048
10049 return TRUE;
10050
10051 default:
10052 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
10053 break;
10054 }
10055
10056 return FALSE;
10057}
10058
10059static VALUE
10060gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size)
10061{
10062 int marked;
10063 int wb_unprotected;
10064 int uncollectible;
10065 int age;
10066 RVALUE *dest = (RVALUE *)free;
10067 RVALUE *src = (RVALUE *)scan;
10068
10069 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
10070
10071 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
10072 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
10073
10074 GC_ASSERT(!RVALUE_MARKING((VALUE)src));
10075
10076 /* Save off bits for current object. */
10077 marked = rb_objspace_marked_object_p((VALUE)src);
10078 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
10079 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
10080 bool remembered = RVALUE_REMEMBERED((VALUE)src);
10081 age = RVALUE_AGE_GET((VALUE)src);
10082
10083 /* Clear bits for eventual T_MOVED */
10084 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
10085 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
10086 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
10087 CLEAR_IN_BITMAP(GET_HEAP_PAGE((VALUE)src)->remembered_bits, (VALUE)src);
10088
10089 if (FL_TEST((VALUE)src, FL_EXIVAR)) {
10090 /* Resizing the st table could cause a malloc */
10091 DURING_GC_COULD_MALLOC_REGION_START();
10092 {
10093 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
10094 }
10095 DURING_GC_COULD_MALLOC_REGION_END();
10096 }
10097
10098 st_data_t srcid = (st_data_t)src, id;
10099
10100 /* If the source object's object_id has been seen, we need to update
10101 * the object to object id mapping. */
10102 if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
10103 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
10104 /* Resizing the st table could cause a malloc */
10105 DURING_GC_COULD_MALLOC_REGION_START();
10106 {
10107 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
10108 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
10109 }
10110 DURING_GC_COULD_MALLOC_REGION_END();
10111 }
10112
10113 /* Move the object */
10114 memcpy(dest, src, MIN(src_slot_size, slot_size));
10115
10116 if (RVALUE_OVERHEAD > 0) {
10117 void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
10118 void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
10119
10120 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
10121 }
10122
10123 memset(src, 0, src_slot_size);
10124 RVALUE_AGE_RESET((VALUE)src);
10125
10126 /* Set bits for object in new location */
10127 if (remembered) {
10128 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (VALUE)dest);
10129 }
10130 else {
10131 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, (VALUE)dest);
10132 }
10133
10134 if (marked) {
10135 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
10136 }
10137 else {
10138 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
10139 }
10140
10141 if (wb_unprotected) {
10142 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10143 }
10144 else {
10145 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10146 }
10147
10148 if (uncollectible) {
10149 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10150 }
10151 else {
10152 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10153 }
10154
10155 RVALUE_AGE_SET((VALUE)dest, age);
10156 /* Assign forwarding address */
10157 src->as.moved.flags = T_MOVED;
10158 src->as.moved.dummy = Qundef;
10159 src->as.moved.destination = (VALUE)dest;
10160 GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
10161
10162 return (VALUE)src;
10163}
10164
10165#if GC_CAN_COMPILE_COMPACTION
10166static int
10167compare_pinned_slots(const void *left, const void *right, void *dummy)
10168{
10169 struct heap_page *left_page;
10170 struct heap_page *right_page;
10171
10172 left_page = *(struct heap_page * const *)left;
10173 right_page = *(struct heap_page * const *)right;
10174
10175 return left_page->pinned_slots - right_page->pinned_slots;
10176}
10177
10178static int
10179compare_free_slots(const void *left, const void *right, void *dummy)
10180{
10181 struct heap_page *left_page;
10182 struct heap_page *right_page;
10183
10184 left_page = *(struct heap_page * const *)left;
10185 right_page = *(struct heap_page * const *)right;
10186
10187 return left_page->free_slots - right_page->free_slots;
10188}
10189
10190static void
10191gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func)
10192{
10193 for (int j = 0; j < SIZE_POOL_COUNT; j++) {
10194 rb_size_pool_t *size_pool = &size_pools[j];
10195
10196 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10197 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
10198 struct heap_page *page = 0, **page_list = malloc(size);
10199 size_t i = 0;
10200
10201 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10202 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10203 page_list[i++] = page;
10204 GC_ASSERT(page);
10205 }
10206
10207 GC_ASSERT((size_t)i == total_pages);
10208
10209 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
10210 * head of the list, so empty pages will end up at the start of the heap */
10211 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_func, NULL);
10212
10213 /* Reset the eden heap */
10214 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10215
10216 for (i = 0; i < total_pages; i++) {
10217 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10218 if (page_list[i]->free_slots != 0) {
10219 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10220 }
10221 }
10222
10223 free(page_list);
10224 }
10225}
10226#endif
10227
10228static void
10229gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
10230{
10231 if (ARY_SHARED_P(v)) {
10232 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
10233
10234 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
10235
10236 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
10237 // If the root is embedded and its location has changed
10238 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10239 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
10240 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
10241 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
10242 }
10243 }
10244 else {
10245 long len = RARRAY_LEN(v);
10246
10247 if (len > 0) {
10248 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
10249 for (long i = 0; i < len; i++) {
10250 UPDATE_IF_MOVED(objspace, ptr[i]);
10251 }
10252 }
10253
10254 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10255 if (rb_ary_embeddable_p(v)) {
10256 rb_ary_make_embedded(v);
10257 }
10258 }
10259 }
10260}
10261
10262static void gc_ref_update_table_values_only(rb_objspace_t *objspace, st_table *tbl);
10263
10264static void
10265gc_ref_update_object(rb_objspace_t *objspace, VALUE v)
10266{
10267 VALUE *ptr = ROBJECT_IVPTR(v);
10268
10269 if (rb_shape_obj_too_complex(v)) {
10270 gc_ref_update_table_values_only(objspace, ROBJECT_IV_HASH(v));
10271 return;
10272 }
10273
10274 size_t slot_size = rb_gc_obj_slot_size(v);
10275 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
10276 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10277 // Object can be re-embedded
10278 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
10279 RB_FL_SET_RAW(v, ROBJECT_EMBED);
10280 xfree(ptr);
10281 ptr = ROBJECT(v)->as.ary;
10282 }
10283
10284 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10285 UPDATE_IF_MOVED(objspace, ptr[i]);
10286 }
10287}
10288
10289static int
10290hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10291{
10292 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10293
10294 if (gc_object_moved_p(objspace, (VALUE)*key)) {
10295 *key = rb_gc_location((VALUE)*key);
10296 }
10297
10298 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10299 *value = rb_gc_location((VALUE)*value);
10300 }
10301
10302 return ST_CONTINUE;
10303}
10304
10305static int
10306hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
10307{
10308 rb_objspace_t *objspace;
10309
10310 objspace = (rb_objspace_t *)argp;
10311
10312 if (gc_object_moved_p(objspace, (VALUE)key)) {
10313 return ST_REPLACE;
10314 }
10315
10316 if (gc_object_moved_p(objspace, (VALUE)value)) {
10317 return ST_REPLACE;
10318 }
10319 return ST_CONTINUE;
10320}
10321
10322static int
10323hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10324{
10325 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10326
10327 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10328 *value = rb_gc_location((VALUE)*value);
10329 }
10330
10331 return ST_CONTINUE;
10332}
10333
10334static int
10335hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
10336{
10337 rb_objspace_t *objspace;
10338
10339 objspace = (rb_objspace_t *)argp;
10340
10341 if (gc_object_moved_p(objspace, (VALUE)value)) {
10342 return ST_REPLACE;
10343 }
10344 return ST_CONTINUE;
10345}
10346
10347static void
10348gc_ref_update_table_values_only(rb_objspace_t *objspace, st_table *tbl)
10349{
10350 if (!tbl || tbl->num_entries == 0) return;
10351
10352 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10353 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10354 }
10355}
10356
10357void
10358rb_gc_ref_update_table_values_only(st_table *tbl)
10359{
10360 gc_ref_update_table_values_only(&rb_objspace, tbl);
10361}
10362
10363static void
10364gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
10365{
10366 if (!tbl || tbl->num_entries == 0) return;
10367
10368 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10369 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10370 }
10371}
10372
10373/* Update MOVED references in a VALUE=>VALUE st_table */
10374void
10375rb_gc_update_tbl_refs(st_table *ptr)
10376{
10377 rb_objspace_t *objspace = &rb_objspace;
10378 gc_update_table_refs(objspace, ptr);
10379}
10380
10381static void
10382gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
10383{
10384 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10385}
10386
10387static void
10388gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
10389{
10390 rb_method_definition_t *def = me->def;
10391
10392 UPDATE_IF_MOVED(objspace, me->owner);
10393 UPDATE_IF_MOVED(objspace, me->defined_class);
10394
10395 if (def) {
10396 switch (def->type) {
10397 case VM_METHOD_TYPE_ISEQ:
10398 if (def->body.iseq.iseqptr) {
10399 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
10400 }
10401 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
10402 break;
10403 case VM_METHOD_TYPE_ATTRSET:
10404 case VM_METHOD_TYPE_IVAR:
10405 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10406 break;
10407 case VM_METHOD_TYPE_BMETHOD:
10408 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10409 break;
10410 case VM_METHOD_TYPE_ALIAS:
10411 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
10412 return;
10413 case VM_METHOD_TYPE_REFINED:
10414 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
10415 break;
10416 case VM_METHOD_TYPE_CFUNC:
10417 case VM_METHOD_TYPE_ZSUPER:
10418 case VM_METHOD_TYPE_MISSING:
10419 case VM_METHOD_TYPE_OPTIMIZED:
10420 case VM_METHOD_TYPE_UNDEF:
10421 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10422 break;
10423 }
10424 }
10425}
10426
10427static void
10428gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
10429{
10430 long i;
10431
10432 for (i=0; i<n; i++) {
10433 UPDATE_IF_MOVED(objspace, values[i]);
10434 }
10435}
10436
10437void
10438rb_gc_update_values(long n, VALUE *values)
10439{
10440 gc_update_values(&rb_objspace, n, values);
10441}
10442
10443static bool
10444moved_or_living_object_strictly_p(rb_objspace_t *objspace, VALUE obj)
10445{
10446 return obj &&
10447 is_pointer_to_heap(objspace, (void *)obj) &&
10448 (is_live_object(objspace, obj) || BUILTIN_TYPE(obj) == T_MOVED);
10449}
10450
10451static void
10452gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
10453{
10454 switch (imemo_type(obj)) {
10455 case imemo_env:
10456 {
10457 rb_env_t *env = (rb_env_t *)obj;
10458 if (LIKELY(env->ep)) {
10459 // just after newobj() can be NULL here.
10460 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
10461 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10462 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
10463 }
10464 }
10465 break;
10466 case imemo_cref:
10467 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10468 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10469 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10470 break;
10471 case imemo_svar:
10472 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10473 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10474 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10475 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10476 break;
10477 case imemo_throw_data:
10478 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10479 break;
10480 case imemo_ifunc:
10481 break;
10482 case imemo_memo:
10483 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10484 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10485 break;
10486 case imemo_ment:
10487 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10488 break;
10489 case imemo_iseq:
10490 rb_iseq_mark_and_move((rb_iseq_t *)obj, true);
10491 break;
10492 case imemo_ast:
10493 rb_ast_update_references((rb_ast_t *)obj);
10494 break;
10495 case imemo_callcache:
10496 {
10497 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
10498
10499 if (!cc->klass) {
10500 // already invalidated
10501 }
10502 else {
10503 if (moved_or_living_object_strictly_p(objspace, cc->klass) &&
10504 moved_or_living_object_strictly_p(objspace, (VALUE)cc->cme_)) {
10505 UPDATE_IF_MOVED(objspace, cc->klass);
10506 TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
10507 }
10508 else {
10509 vm_cc_invalidate(cc);
10510 }
10511 }
10512 }
10513 break;
10514 case imemo_constcache:
10515 {
10517 UPDATE_IF_MOVED(objspace, ice->value);
10518 }
10519 break;
10520 case imemo_parser_strterm:
10521 case imemo_tmpbuf:
10522 case imemo_callinfo:
10523 break;
10524 default:
10525 rb_bug("not reachable %d", imemo_type(obj));
10526 break;
10527 }
10528}
10529
10530static enum rb_id_table_iterator_result
10531check_id_table_move(VALUE value, void *data)
10532{
10533 rb_objspace_t *objspace = (rb_objspace_t *)data;
10534
10535 if (gc_object_moved_p(objspace, (VALUE)value)) {
10536 return ID_TABLE_REPLACE;
10537 }
10538
10539 return ID_TABLE_CONTINUE;
10540}
10541
10542/* Returns the new location of an object, if it moved. Otherwise returns
10543 * the existing location. */
10544VALUE
10545rb_gc_location(VALUE value)
10546{
10547
10548 VALUE destination;
10549
10550 if (!SPECIAL_CONST_P(value)) {
10551 void *poisoned = asan_unpoison_object_temporary(value);
10552
10553 if (BUILTIN_TYPE(value) == T_MOVED) {
10554 destination = (VALUE)RMOVED(value)->destination;
10555 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
10556 }
10557 else {
10558 destination = value;
10559 }
10560
10561 /* Re-poison slot if it's not the one we want */
10562 if (poisoned) {
10563 GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
10564 asan_poison_object(value);
10565 }
10566 }
10567 else {
10568 destination = value;
10569 }
10570
10571 return destination;
10572}
10573
10574static enum rb_id_table_iterator_result
10575update_id_table(VALUE *value, void *data, int existing)
10576{
10577 rb_objspace_t *objspace = (rb_objspace_t *)data;
10578
10579 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10580 *value = rb_gc_location((VALUE)*value);
10581 }
10582
10583 return ID_TABLE_CONTINUE;
10584}
10585
10586static void
10587update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10588{
10589 if (tbl) {
10590 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10591 }
10592}
10593
10594static enum rb_id_table_iterator_result
10595update_cc_tbl_i(VALUE ccs_ptr, void *data)
10596{
10597 rb_objspace_t *objspace = (rb_objspace_t *)data;
10598 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
10599 VM_ASSERT(vm_ccs_p(ccs));
10600
10601 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
10602 ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
10603 }
10604
10605 for (int i=0; i<ccs->len; i++) {
10606 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
10607 ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
10608 }
10609 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
10610 ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
10611 }
10612 }
10613
10614 // do not replace
10615 return ID_TABLE_CONTINUE;
10616}
10617
10618static void
10619update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
10620{
10621 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
10622 if (tbl) {
10623 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10624 }
10625}
10626
10627static enum rb_id_table_iterator_result
10628update_cvc_tbl_i(VALUE cvc_entry, void *data)
10629{
10630 struct rb_cvar_class_tbl_entry *entry;
10631 rb_objspace_t * objspace = (rb_objspace_t *)data;
10632
10633 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10634
10635 if (entry->cref) {
10636 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
10637 }
10638
10639 entry->class_value = rb_gc_location(entry->class_value);
10640
10641 return ID_TABLE_CONTINUE;
10642}
10643
10644static void
10645update_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10646{
10647 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10648 if (tbl) {
10649 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10650 }
10651}
10652
10653static enum rb_id_table_iterator_result
10654mark_cvc_tbl_i(VALUE cvc_entry, void *data)
10655{
10656 rb_objspace_t *objspace = (rb_objspace_t *)data;
10657 struct rb_cvar_class_tbl_entry *entry;
10658
10659 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10660
10661 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
10662 gc_mark(objspace, (VALUE) entry->cref);
10663
10664 return ID_TABLE_CONTINUE;
10665}
10666
10667static void
10668mark_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10669{
10670 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10671 if (tbl) {
10672 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
10673 }
10674}
10675
10676static enum rb_id_table_iterator_result
10677update_const_table(VALUE value, void *data)
10678{
10679 rb_const_entry_t *ce = (rb_const_entry_t *)value;
10680 rb_objspace_t * objspace = (rb_objspace_t *)data;
10681
10682 if (gc_object_moved_p(objspace, ce->value)) {
10683 ce->value = rb_gc_location(ce->value);
10684 }
10685
10686 if (gc_object_moved_p(objspace, ce->file)) {
10687 ce->file = rb_gc_location(ce->file);
10688 }
10689
10690 return ID_TABLE_CONTINUE;
10691}
10692
10693static void
10694update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10695{
10696 if (!tbl) return;
10697 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10698}
10699
10700static void
10701update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
10702{
10703 while (entry) {
10704 UPDATE_IF_MOVED(objspace, entry->klass);
10705 entry = entry->next;
10706 }
10707}
10708
10709static void
10710update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
10711{
10712 UPDATE_IF_MOVED(objspace, ext->origin_);
10713 UPDATE_IF_MOVED(objspace, ext->includer);
10714 UPDATE_IF_MOVED(objspace, ext->refined_class);
10715 update_subclass_entries(objspace, ext->subclasses);
10716}
10717
10718static void
10719update_superclasses(rb_objspace_t *objspace, VALUE obj)
10720{
10721 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10722 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10723 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10724 }
10725 }
10726}
10727
10728static void
10729gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
10730{
10731 RVALUE *any = RANY(obj);
10732
10733 gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
10734
10735 if (FL_TEST(obj, FL_EXIVAR)) {
10736 rb_ref_update_generic_ivar(obj);
10737 }
10738
10739 switch (BUILTIN_TYPE(obj)) {
10740 case T_CLASS:
10741 if (FL_TEST(obj, FL_SINGLETON)) {
10742 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
10743 }
10744 // Continue to the shared T_CLASS/T_MODULE
10745 case T_MODULE:
10746 if (RCLASS_SUPER((VALUE)obj)) {
10747 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10748 }
10749 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10750 update_cc_tbl(objspace, obj);
10751 update_cvc_tbl(objspace, obj);
10752 update_superclasses(objspace, obj);
10753
10754 if (rb_shape_obj_too_complex(obj)) {
10755 gc_ref_update_table_values_only(objspace, RCLASS_IV_HASH(obj));
10756 }
10757 else {
10758 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10759 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10760 }
10761 }
10762
10763 update_class_ext(objspace, RCLASS_EXT(obj));
10764 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10765
10766 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
10767 break;
10768
10769 case T_ICLASS:
10770 if (RICLASS_OWNS_M_TBL_P(obj)) {
10771 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10772 }
10773 if (RCLASS_SUPER((VALUE)obj)) {
10774 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10775 }
10776 update_class_ext(objspace, RCLASS_EXT(obj));
10777 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10778 update_cc_tbl(objspace, obj);
10779 break;
10780
10781 case T_IMEMO:
10782 gc_ref_update_imemo(objspace, obj);
10783 return;
10784
10785 case T_NIL:
10786 case T_FIXNUM:
10787 case T_NODE:
10788 case T_MOVED:
10789 case T_NONE:
10790 /* These can't move */
10791 return;
10792
10793 case T_ARRAY:
10794 gc_ref_update_array(objspace, obj);
10795 break;
10796
10797 case T_HASH:
10798 gc_ref_update_hash(objspace, obj);
10799 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10800 break;
10801
10802 case T_STRING:
10803 {
10804 if (STR_SHARED_P(obj)) {
10805 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
10806 }
10807
10808 /* If, after move the string is not embedded, and can fit in the
10809 * slot it's been placed in, then re-embed it. */
10810 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10811 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10812 rb_str_make_embedded(obj);
10813 }
10814 }
10815
10816 break;
10817 }
10818 case T_DATA:
10819 /* Call the compaction callback, if it exists */
10820 {
10821 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
10822 if (ptr) {
10823 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(any->as.typeddata.type)) {
10824 size_t *offset_list = (size_t *)RANY(obj)->as.typeddata.type->function.dmark;
10825
10826 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
10827 VALUE *ref = (VALUE *)((char *)ptr + offset);
10828 if (SPECIAL_CONST_P(*ref)) continue;
10829 *ref = rb_gc_location(*ref);
10830 }
10831 }
10832 else if (RTYPEDDATA_P(obj)) {
10833 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
10834 if (compact_func) (*compact_func)(ptr);
10835 }
10836 }
10837 }
10838 break;
10839
10840 case T_OBJECT:
10841 gc_ref_update_object(objspace, obj);
10842 break;
10843
10844 case T_FILE:
10845 if (any->as.file.fptr) {
10846 UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
10847 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
10848 UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
10849 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_asciicompat);
10850 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
10851 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
10852 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
10853 }
10854 break;
10855 case T_REGEXP:
10856 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
10857 break;
10858
10859 case T_SYMBOL:
10860 if (DYNAMIC_SYM_P((VALUE)any)) {
10861 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10862 }
10863 break;
10864
10865 case T_FLOAT:
10866 case T_BIGNUM:
10867 break;
10868
10869 case T_MATCH:
10870 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10871
10872 if (any->as.match.str) {
10873 UPDATE_IF_MOVED(objspace, any->as.match.str);
10874 }
10875 break;
10876
10877 case T_RATIONAL:
10878 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10879 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10880 break;
10881
10882 case T_COMPLEX:
10883 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10884 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10885
10886 break;
10887
10888 case T_STRUCT:
10889 {
10890 long i, len = RSTRUCT_LEN(obj);
10891 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10892
10893 for (i = 0; i < len; i++) {
10894 UPDATE_IF_MOVED(objspace, ptr[i]);
10895 }
10896 }
10897 break;
10898 default:
10899#if GC_DEBUG
10900 rb_gcdebug_print_obj_condition((VALUE)obj);
10901 rb_obj_info_dump(obj);
10902 rb_bug("unreachable");
10903#endif
10904 break;
10905
10906 }
10907
10908 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
10909
10910 gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
10911}
10912
10913static int
10914gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
10915{
10916 VALUE v = (VALUE)vstart;
10917 asan_unlock_freelist(page);
10918 asan_lock_freelist(page);
10919 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
10920 page->flags.has_remembered_objects = FALSE;
10921
10922 /* For each object on the page */
10923 for (; v != (VALUE)vend; v += stride) {
10924 void *poisoned = asan_unpoison_object_temporary(v);
10925
10926 switch (BUILTIN_TYPE(v)) {
10927 case T_NONE:
10928 case T_MOVED:
10929 case T_ZOMBIE:
10930 break;
10931 default:
10932 if (RVALUE_WB_UNPROTECTED(v)) {
10933 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
10934 }
10935 if (RVALUE_REMEMBERED(v)) {
10936 page->flags.has_remembered_objects = TRUE;
10937 }
10938 if (page->flags.before_sweep) {
10939 if (RVALUE_MARKED(v)) {
10940 gc_update_object_references(objspace, v);
10941 }
10942 }
10943 else {
10944 gc_update_object_references(objspace, v);
10945 }
10946 }
10947
10948 if (poisoned) {
10949 asan_poison_object(v);
10950 }
10951 }
10952
10953 return 0;
10954}
10955
10956extern rb_symbols_t ruby_global_symbols;
10957#define global_symbols ruby_global_symbols
10958
10959static void
10960gc_update_references(rb_objspace_t *objspace)
10961{
10962 objspace->flags.during_reference_updating = true;
10963
10964 rb_execution_context_t *ec = GET_EC();
10965 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10966
10967 struct heap_page *page = NULL;
10968
10969 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10970 bool should_set_mark_bits = TRUE;
10971 rb_size_pool_t *size_pool = &size_pools[i];
10972 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10973
10974 ccan_list_for_each(&heap->pages, page, page_node) {
10975 uintptr_t start = (uintptr_t)page->start;
10976 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10977
10978 gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
10979 if (page == heap->sweeping_page) {
10980 should_set_mark_bits = FALSE;
10981 }
10982 if (should_set_mark_bits) {
10983 gc_setup_mark_bits(page);
10984 }
10985 }
10986 }
10987 rb_vm_update_references(vm);
10988 rb_gc_update_global_tbl();
10989 global_symbols.ids = rb_gc_location(global_symbols.ids);
10990 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10991 gc_ref_update_table_values_only(objspace, objspace->obj_to_id_tbl);
10992 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10993 gc_update_table_refs(objspace, global_symbols.str_sym);
10994 gc_update_table_refs(objspace, finalizer_table);
10995
10996 objspace->flags.during_reference_updating = false;
10997}
10998
10999#if GC_CAN_COMPILE_COMPACTION
11000/*
11001 * call-seq:
11002 * GC.latest_compact_info -> hash
11003 *
11004 * Returns information about object moved in the most recent \GC compaction.
11005 *
11006 * The returned hash has two keys :considered and :moved. The hash for
11007 * :considered lists the number of objects that were considered for movement
11008 * by the compactor, and the :moved hash lists the number of objects that
11009 * were actually moved. Some objects can't be moved (maybe they were pinned)
11010 * so these numbers can be used to calculate compaction efficiency.
11011 */
11012static VALUE
11013gc_compact_stats(VALUE self)
11014{
11015 size_t i;
11016 rb_objspace_t *objspace = &rb_objspace;
11017 VALUE h = rb_hash_new();
11018 VALUE considered = rb_hash_new();
11019 VALUE moved = rb_hash_new();
11020 VALUE moved_up = rb_hash_new();
11021 VALUE moved_down = rb_hash_new();
11022
11023 for (i=0; i<T_MASK; i++) {
11024 if (objspace->rcompactor.considered_count_table[i]) {
11025 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
11026 }
11027
11028 if (objspace->rcompactor.moved_count_table[i]) {
11029 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
11030 }
11031
11032 if (objspace->rcompactor.moved_up_count_table[i]) {
11033 rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
11034 }
11035
11036 if (objspace->rcompactor.moved_down_count_table[i]) {
11037 rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
11038 }
11039 }
11040
11041 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
11042 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
11043 rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
11044 rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
11045
11046 return h;
11047}
11048#else
11049# define gc_compact_stats rb_f_notimplement
11050#endif
11051
11052#if GC_CAN_COMPILE_COMPACTION
11053static void
11054root_obj_check_moved_i(const char *category, VALUE obj, void *data)
11055{
11056 if (gc_object_moved_p(&rb_objspace, obj)) {
11057 rb_bug("ROOT %s points to MOVED: %p -> %s", category, (void *)obj, obj_info(rb_gc_location(obj)));
11058 }
11059}
11060
11061static void
11062reachable_object_check_moved_i(VALUE ref, void *data)
11063{
11064 VALUE parent = (VALUE)data;
11065 if (gc_object_moved_p(&rb_objspace, ref)) {
11066 rb_bug("Object %s points to MOVED: %p -> %s", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
11067 }
11068}
11069
11070static int
11071heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
11072{
11073 VALUE v = (VALUE)vstart;
11074 for (; v != (VALUE)vend; v += stride) {
11075 if (gc_object_moved_p(&rb_objspace, v)) {
11076 /* Moved object still on the heap, something may have a reference. */
11077 }
11078 else {
11079 void *poisoned = asan_unpoison_object_temporary(v);
11080
11081 switch (BUILTIN_TYPE(v)) {
11082 case T_NONE:
11083 case T_ZOMBIE:
11084 break;
11085 default:
11086 if (!rb_objspace_garbage_object_p(v)) {
11087 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
11088 }
11089 }
11090
11091 if (poisoned) {
11092 GC_ASSERT(BUILTIN_TYPE(v) == T_NONE);
11093 asan_poison_object(v);
11094 }
11095 }
11096 }
11097
11098 return 0;
11099}
11100
11101/*
11102 * call-seq:
11103 * GC.compact
11104 *
11105 * This function compacts objects together in Ruby's heap. It eliminates
11106 * unused space (or fragmentation) in the heap by moving objects in to that
11107 * unused space. This function returns a hash which contains statistics about
11108 * which objects were moved. See <tt>GC.latest_gc_info</tt> for details about
11109 * compaction statistics.
11110 *
11111 * This method is implementation specific and not expected to be implemented
11112 * in any implementation besides MRI.
11113 *
11114 * To test whether \GC compaction is supported, use the idiom:
11115 *
11116 * GC.respond_to?(:compact)
11117 */
11118static VALUE
11119gc_compact(VALUE self)
11120{
11121 /* Run GC with compaction enabled */
11122 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
11123
11124 return gc_compact_stats(self);
11125}
11126#else
11127# define gc_compact rb_f_notimplement
11128#endif
11129
11130#if GC_CAN_COMPILE_COMPACTION
11131
11132struct desired_compaction_pages_i_data {
11133 rb_objspace_t *objspace;
11134 size_t required_slots[SIZE_POOL_COUNT];
11135};
11136
11137static int
11138desired_compaction_pages_i(struct heap_page *page, void *data)
11139{
11140 struct desired_compaction_pages_i_data *tdata = data;
11141 rb_objspace_t *objspace = tdata->objspace;
11142 VALUE vstart = (VALUE)page->start;
11143 VALUE vend = vstart + (VALUE)(page->total_slots * page->size_pool->slot_size);
11144
11145
11146 for (VALUE v = vstart; v != vend; v += page->size_pool->slot_size) {
11147 /* skip T_NONEs; they won't be moved */
11148 void *poisoned = asan_unpoison_object_temporary(v);
11149 if (BUILTIN_TYPE(v) == T_NONE) {
11150 if (poisoned) {
11151 asan_poison_object(v);
11152 }
11153 continue;
11154 }
11155
11156 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, page->size_pool, v);
11157 size_t dest_pool_idx = dest_pool - size_pools;
11158 tdata->required_slots[dest_pool_idx]++;
11159 }
11160
11161 return 0;
11162}
11163
11164static VALUE
11165gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE expand_heap, VALUE toward_empty)
11166{
11167 rb_objspace_t *objspace = &rb_objspace;
11168
11169 /* Clear the heap. */
11170 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qfalse);
11171
11172 if (RTEST(double_heap)) {
11173 rb_warn("double_heap is deprecated, please use expand_heap instead");
11174 }
11175
11176 RB_VM_LOCK_ENTER();
11177 {
11178 gc_rest(objspace);
11179
11180 /* if both double_heap and expand_heap are set, expand_heap takes precedence */
11181 if (RTEST(expand_heap)) {
11182 struct desired_compaction_pages_i_data desired_compaction = {
11183 .objspace = objspace,
11184 .required_slots = {0},
11185 };
11186 /* Work out how many objects want to be in each size pool, taking account of moves */
11187 objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
11188
11189 /* Find out which pool has the most pages */
11190 size_t max_existing_pages = 0;
11191 for(int i = 0; i < SIZE_POOL_COUNT; i++) {
11192 rb_size_pool_t *size_pool = &size_pools[i];
11193 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11194 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
11195 }
11196 /* Add pages to each size pool so that compaction is guaranteed to move every object */
11197 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11198 rb_size_pool_t *size_pool = &size_pools[i];
11199 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11200
11201 size_t pages_to_add = 0;
11202 /*
11203 * Step 1: Make sure every pool has the same number of pages, by adding empty pages
11204 * to smaller pools. This is required to make sure the compact cursor can advance
11205 * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
11206 * compact cursors met" condition on some pools before fully compacting others
11207 */
11208 pages_to_add += max_existing_pages - heap->total_pages;
11209 /*
11210 * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
11211 * that want to be in that size pool, whether moved into it or moved within it
11212 */
11213 pages_to_add += slots_to_pages_for_size_pool(objspace, size_pool, desired_compaction.required_slots[i]);
11214 /*
11215 * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
11216 * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
11217 */
11218 pages_to_add += 2;
11219
11220 heap_add_pages(objspace, size_pool, heap, pages_to_add);
11221 }
11222 }
11223 else if (RTEST(double_heap)) {
11224 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11225 rb_size_pool_t *size_pool = &size_pools[i];
11226 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
11227 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
11228 }
11229
11230 }
11231
11232 if (RTEST(toward_empty)) {
11233 objspace->rcompactor.compare_func = compare_free_slots;
11234 }
11235 }
11236 RB_VM_LOCK_LEAVE();
11237
11238 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
11239
11240 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
11241 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
11242
11243 objspace->rcompactor.compare_func = NULL;
11244 return gc_compact_stats(self);
11245}
11246#else
11247# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
11248#endif
11249
11250VALUE
11251rb_gc_start(void)
11252{
11253 rb_gc();
11254 return Qnil;
11255}
11256
11257void
11258rb_gc(void)
11259{
11260 unless_objspace(objspace) { return; }
11261 unsigned int reason = GPR_DEFAULT_REASON;
11262 garbage_collect(objspace, reason);
11263}
11264
11265int
11266rb_during_gc(void)
11267{
11268 unless_objspace(objspace) { return FALSE; }
11269 return during_gc;
11270}
11271
11272#if RGENGC_PROFILE >= 2
11273
11274static const char *type_name(int type, VALUE obj);
11275
11276static void
11277gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
11278{
11279 VALUE result = rb_hash_new_with_size(T_MASK);
11280 int i;
11281 for (i=0; i<T_MASK; i++) {
11282 const char *type = type_name(i, 0);
11283 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
11284 }
11285 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
11286}
11287#endif
11288
11289size_t
11290rb_gc_count(void)
11291{
11292 return rb_objspace.profile.count;
11293}
11294
11295static VALUE
11296gc_count(rb_execution_context_t *ec, VALUE self)
11297{
11298 return SIZET2NUM(rb_gc_count());
11299}
11300
11301static VALUE
11302gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
11303{
11304 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11305 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11306#if RGENGC_ESTIMATE_OLDMALLOC
11307 static VALUE sym_oldmalloc;
11308#endif
11309 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11310 static VALUE sym_none, sym_marking, sym_sweeping;
11311 static VALUE sym_weak_references_count, sym_retained_weak_references_count;
11312 VALUE hash = Qnil, key = Qnil;
11313 VALUE major_by, need_major_by;
11314 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11315
11316 if (SYMBOL_P(hash_or_key)) {
11317 key = hash_or_key;
11318 }
11319 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
11320 hash = hash_or_key;
11321 }
11322 else {
11323 rb_raise(rb_eTypeError, "non-hash or symbol given");
11324 }
11325
11326 if (NIL_P(sym_major_by)) {
11327#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11328 S(major_by);
11329 S(gc_by);
11330 S(immediate_sweep);
11331 S(have_finalizer);
11332 S(state);
11333 S(need_major_by);
11334
11335 S(stress);
11336 S(nofree);
11337 S(oldgen);
11338 S(shady);
11339 S(force);
11340#if RGENGC_ESTIMATE_OLDMALLOC
11341 S(oldmalloc);
11342#endif
11343 S(newobj);
11344 S(malloc);
11345 S(method);
11346 S(capi);
11347
11348 S(none);
11349 S(marking);
11350 S(sweeping);
11351
11352 S(weak_references_count);
11353 S(retained_weak_references_count);
11354#undef S
11355 }
11356
11357#define SET(name, attr) \
11358 if (key == sym_##name) \
11359 return (attr); \
11360 else if (hash != Qnil) \
11361 rb_hash_aset(hash, sym_##name, (attr));
11362
11363 major_by =
11364 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11365 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11366 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11367 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11368#if RGENGC_ESTIMATE_OLDMALLOC
11369 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11370#endif
11371 Qnil;
11372 SET(major_by, major_by);
11373
11374 if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
11375 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11376 need_major_by =
11377 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11378 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11379 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11380 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11381#if RGENGC_ESTIMATE_OLDMALLOC
11382 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11383#endif
11384 Qnil;
11385 SET(need_major_by, need_major_by);
11386 }
11387
11388 SET(gc_by,
11389 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11390 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11391 (flags & GPR_FLAG_METHOD) ? sym_method :
11392 (flags & GPR_FLAG_CAPI) ? sym_capi :
11393 (flags & GPR_FLAG_STRESS) ? sym_stress :
11394 Qnil
11395 );
11396
11397 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11398 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11399
11400 if (orig_flags == 0) {
11401 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11402 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11403 }
11404
11405 SET(weak_references_count, LONG2FIX(objspace->profile.weak_references_count));
11406 SET(retained_weak_references_count, LONG2FIX(objspace->profile.retained_weak_references_count));
11407#undef SET
11408
11409 if (!NIL_P(key)) {/* matched key should return above */
11410 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11411 }
11412
11413 return hash;
11414}
11415
11416VALUE
11417rb_gc_latest_gc_info(VALUE key)
11418{
11419 rb_objspace_t *objspace = &rb_objspace;
11420 return gc_info_decode(objspace, key, 0);
11421}
11422
11423static VALUE
11424gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
11425{
11426 rb_objspace_t *objspace = &rb_objspace;
11427
11428 if (NIL_P(arg)) {
11429 arg = rb_hash_new();
11430 }
11431 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
11432 rb_raise(rb_eTypeError, "non-hash or symbol given");
11433 }
11434
11435 return gc_info_decode(objspace, arg, 0);
11436}
11437
11438enum gc_stat_sym {
11439 gc_stat_sym_count,
11440 gc_stat_sym_time,
11441 gc_stat_sym_marking_time,
11442 gc_stat_sym_sweeping_time,
11443 gc_stat_sym_heap_allocated_pages,
11444 gc_stat_sym_heap_sorted_length,
11445 gc_stat_sym_heap_allocatable_pages,
11446 gc_stat_sym_heap_available_slots,
11447 gc_stat_sym_heap_live_slots,
11448 gc_stat_sym_heap_free_slots,
11449 gc_stat_sym_heap_final_slots,
11450 gc_stat_sym_heap_marked_slots,
11451 gc_stat_sym_heap_eden_pages,
11452 gc_stat_sym_heap_tomb_pages,
11453 gc_stat_sym_total_allocated_pages,
11454 gc_stat_sym_total_freed_pages,
11455 gc_stat_sym_total_allocated_objects,
11456 gc_stat_sym_total_freed_objects,
11457 gc_stat_sym_malloc_increase_bytes,
11458 gc_stat_sym_malloc_increase_bytes_limit,
11459 gc_stat_sym_minor_gc_count,
11460 gc_stat_sym_major_gc_count,
11461 gc_stat_sym_compact_count,
11462 gc_stat_sym_read_barrier_faults,
11463 gc_stat_sym_total_moved_objects,
11464 gc_stat_sym_remembered_wb_unprotected_objects,
11465 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11466 gc_stat_sym_old_objects,
11467 gc_stat_sym_old_objects_limit,
11468#if RGENGC_ESTIMATE_OLDMALLOC
11469 gc_stat_sym_oldmalloc_increase_bytes,
11470 gc_stat_sym_oldmalloc_increase_bytes_limit,
11471#endif
11472 gc_stat_sym_weak_references_count,
11473#if RGENGC_PROFILE
11474 gc_stat_sym_total_generated_normal_object_count,
11475 gc_stat_sym_total_generated_shady_object_count,
11476 gc_stat_sym_total_shade_operation_count,
11477 gc_stat_sym_total_promoted_count,
11478 gc_stat_sym_total_remembered_normal_object_count,
11479 gc_stat_sym_total_remembered_shady_object_count,
11480#endif
11481 gc_stat_sym_last
11482};
11483
11484static VALUE gc_stat_symbols[gc_stat_sym_last];
11485
11486static void
11487setup_gc_stat_symbols(void)
11488{
11489 if (gc_stat_symbols[0] == 0) {
11490#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11491 S(count);
11492 S(time);
11493 S(marking_time),
11494 S(sweeping_time),
11495 S(heap_allocated_pages);
11496 S(heap_sorted_length);
11497 S(heap_allocatable_pages);
11498 S(heap_available_slots);
11499 S(heap_live_slots);
11500 S(heap_free_slots);
11501 S(heap_final_slots);
11502 S(heap_marked_slots);
11503 S(heap_eden_pages);
11504 S(heap_tomb_pages);
11505 S(total_allocated_pages);
11506 S(total_freed_pages);
11507 S(total_allocated_objects);
11508 S(total_freed_objects);
11509 S(malloc_increase_bytes);
11510 S(malloc_increase_bytes_limit);
11511 S(minor_gc_count);
11512 S(major_gc_count);
11513 S(compact_count);
11514 S(read_barrier_faults);
11515 S(total_moved_objects);
11516 S(remembered_wb_unprotected_objects);
11517 S(remembered_wb_unprotected_objects_limit);
11518 S(old_objects);
11519 S(old_objects_limit);
11520#if RGENGC_ESTIMATE_OLDMALLOC
11521 S(oldmalloc_increase_bytes);
11522 S(oldmalloc_increase_bytes_limit);
11523#endif
11524 S(weak_references_count);
11525#if RGENGC_PROFILE
11526 S(total_generated_normal_object_count);
11527 S(total_generated_shady_object_count);
11528 S(total_shade_operation_count);
11529 S(total_promoted_count);
11530 S(total_remembered_normal_object_count);
11531 S(total_remembered_shady_object_count);
11532#endif /* RGENGC_PROFILE */
11533#undef S
11534 }
11535}
11536
11537static uint64_t
11538ns_to_ms(uint64_t ns)
11539{
11540 return ns / (1000 * 1000);
11541}
11542
11543static size_t
11544gc_stat_internal(VALUE hash_or_sym)
11545{
11546 rb_objspace_t *objspace = &rb_objspace;
11547 VALUE hash = Qnil, key = Qnil;
11548
11549 setup_gc_stat_symbols();
11550
11551 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11552 hash = hash_or_sym;
11553 }
11554 else if (SYMBOL_P(hash_or_sym)) {
11555 key = hash_or_sym;
11556 }
11557 else {
11558 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11559 }
11560
11561#define SET(name, attr) \
11562 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11563 return attr; \
11564 else if (hash != Qnil) \
11565 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11566
11567 SET(count, objspace->profile.count);
11568 SET(time, (size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns)); // TODO: UINT64T2NUM
11569 SET(marking_time, (size_t)ns_to_ms(objspace->profile.marking_time_ns));
11570 SET(sweeping_time, (size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
11571
11572 /* implementation dependent counters */
11573 SET(heap_allocated_pages, heap_allocated_pages);
11574 SET(heap_sorted_length, heap_pages_sorted_length);
11575 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11576 SET(heap_available_slots, objspace_available_slots(objspace));
11577 SET(heap_live_slots, objspace_live_slots(objspace));
11578 SET(heap_free_slots, objspace_free_slots(objspace));
11579 SET(heap_final_slots, heap_pages_final_slots);
11580 SET(heap_marked_slots, objspace->marked_slots);
11581 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11582 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11583 SET(total_allocated_pages, total_allocated_pages(objspace));
11584 SET(total_freed_pages, total_freed_pages(objspace));
11585 SET(total_allocated_objects, total_allocated_objects(objspace));
11586 SET(total_freed_objects, total_freed_objects(objspace));
11587 SET(malloc_increase_bytes, malloc_increase);
11588 SET(malloc_increase_bytes_limit, malloc_limit);
11589 SET(minor_gc_count, objspace->profile.minor_gc_count);
11590 SET(major_gc_count, objspace->profile.major_gc_count);
11591 SET(compact_count, objspace->profile.compact_count);
11592 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11593 SET(total_moved_objects, objspace->rcompactor.total_moved);
11594 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11595 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11596 SET(old_objects, objspace->rgengc.old_objects);
11597 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11598#if RGENGC_ESTIMATE_OLDMALLOC
11599 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11600 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11601#endif
11602
11603#if RGENGC_PROFILE
11604 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11605 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11606 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11607 SET(total_promoted_count, objspace->profile.total_promoted_count);
11608 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11609 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11610#endif /* RGENGC_PROFILE */
11611#undef SET
11612
11613 if (!NIL_P(key)) { /* matched key should return above */
11614 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11615 }
11616
11617#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11618 if (hash != Qnil) {
11619 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11620 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11621 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
11622 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
11623 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11624 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11625 }
11626#endif
11627
11628 return 0;
11629}
11630
11631static VALUE
11632gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
11633{
11634 if (NIL_P(arg)) {
11635 arg = rb_hash_new();
11636 }
11637 else if (SYMBOL_P(arg)) {
11638 size_t value = gc_stat_internal(arg);
11639 return SIZET2NUM(value);
11640 }
11641 else if (RB_TYPE_P(arg, T_HASH)) {
11642 // ok
11643 }
11644 else {
11645 rb_raise(rb_eTypeError, "non-hash or symbol given");
11646 }
11647
11648 gc_stat_internal(arg);
11649 return arg;
11650}
11651
11652size_t
11653rb_gc_stat(VALUE key)
11654{
11655 if (SYMBOL_P(key)) {
11656 size_t value = gc_stat_internal(key);
11657 return value;
11658 }
11659 else {
11660 gc_stat_internal(key);
11661 return 0;
11662 }
11663}
11664
11665
11666enum gc_stat_heap_sym {
11667 gc_stat_heap_sym_slot_size,
11668 gc_stat_heap_sym_heap_allocatable_pages,
11669 gc_stat_heap_sym_heap_eden_pages,
11670 gc_stat_heap_sym_heap_eden_slots,
11671 gc_stat_heap_sym_heap_tomb_pages,
11672 gc_stat_heap_sym_heap_tomb_slots,
11673 gc_stat_heap_sym_total_allocated_pages,
11674 gc_stat_heap_sym_total_freed_pages,
11675 gc_stat_heap_sym_force_major_gc_count,
11676 gc_stat_heap_sym_force_incremental_marking_finish_count,
11677 gc_stat_heap_sym_total_allocated_objects,
11678 gc_stat_heap_sym_total_freed_objects,
11679 gc_stat_heap_sym_last
11680};
11681
11682static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11683
11684static void
11685setup_gc_stat_heap_symbols(void)
11686{
11687 if (gc_stat_heap_symbols[0] == 0) {
11688#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11689 S(slot_size);
11690 S(heap_allocatable_pages);
11691 S(heap_eden_pages);
11692 S(heap_eden_slots);
11693 S(heap_tomb_pages);
11694 S(heap_tomb_slots);
11695 S(total_allocated_pages);
11696 S(total_freed_pages);
11697 S(force_major_gc_count);
11698 S(force_incremental_marking_finish_count);
11699 S(total_allocated_objects);
11700 S(total_freed_objects);
11701#undef S
11702 }
11703}
11704
11705static size_t
11706gc_stat_heap_internal(int size_pool_idx, VALUE hash_or_sym)
11707{
11708 rb_objspace_t *objspace = &rb_objspace;
11709 VALUE hash = Qnil, key = Qnil;
11710
11711 setup_gc_stat_heap_symbols();
11712
11713 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11714 hash = hash_or_sym;
11715 }
11716 else if (SYMBOL_P(hash_or_sym)) {
11717 key = hash_or_sym;
11718 }
11719 else {
11720 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11721 }
11722
11723 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11724 rb_raise(rb_eArgError, "size pool index out of range");
11725 }
11726
11727 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
11728
11729#define SET(name, attr) \
11730 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11731 return attr; \
11732 else if (hash != Qnil) \
11733 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11734
11735 SET(slot_size, size_pool->slot_size);
11736 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11737 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11738 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11739 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11740 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11741 SET(total_allocated_pages, size_pool->total_allocated_pages);
11742 SET(total_freed_pages, size_pool->total_freed_pages);
11743 SET(force_major_gc_count, size_pool->force_major_gc_count);
11744 SET(force_incremental_marking_finish_count, size_pool->force_incremental_marking_finish_count);
11745 SET(total_allocated_objects, size_pool->total_allocated_objects);
11746 SET(total_freed_objects, size_pool->total_freed_objects);
11747#undef SET
11748
11749 if (!NIL_P(key)) { /* matched key should return above */
11750 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11751 }
11752
11753 return 0;
11754}
11755
11756static VALUE
11757gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
11758{
11759 if (NIL_P(heap_name)) {
11760 if (NIL_P(arg)) {
11761 arg = rb_hash_new();
11762 }
11763 else if (RB_TYPE_P(arg, T_HASH)) {
11764 // ok
11765 }
11766 else {
11767 rb_raise(rb_eTypeError, "non-hash given");
11768 }
11769
11770 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11771 VALUE hash = rb_hash_aref(arg, INT2FIX(i));
11772 if (NIL_P(hash)) {
11773 hash = rb_hash_new();
11774 rb_hash_aset(arg, INT2FIX(i), hash);
11775 }
11776 gc_stat_heap_internal(i, hash);
11777 }
11778 }
11779 else if (FIXNUM_P(heap_name)) {
11780 int size_pool_idx = FIX2INT(heap_name);
11781
11782 if (NIL_P(arg)) {
11783 arg = rb_hash_new();
11784 }
11785 else if (SYMBOL_P(arg)) {
11786 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11787 return SIZET2NUM(value);
11788 }
11789 else if (RB_TYPE_P(arg, T_HASH)) {
11790 // ok
11791 }
11792 else {
11793 rb_raise(rb_eTypeError, "non-hash or symbol given");
11794 }
11795
11796 gc_stat_heap_internal(size_pool_idx, arg);
11797 }
11798 else {
11799 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
11800 }
11801
11802 return arg;
11803}
11804
11805static VALUE
11806gc_stress_get(rb_execution_context_t *ec, VALUE self)
11807{
11808 rb_objspace_t *objspace = &rb_objspace;
11809 return ruby_gc_stress_mode;
11810}
11811
11812static void
11813gc_stress_set(rb_objspace_t *objspace, VALUE flag)
11814{
11815 objspace->flags.gc_stressful = RTEST(flag);
11816 objspace->gc_stress_mode = flag;
11817}
11818
11819static VALUE
11820gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
11821{
11822 rb_objspace_t *objspace = &rb_objspace;
11823 gc_stress_set(objspace, flag);
11824 return flag;
11825}
11826
11827VALUE
11828rb_gc_enable(void)
11829{
11830 rb_objspace_t *objspace = &rb_objspace;
11831 return rb_objspace_gc_enable(objspace);
11832}
11833
11834VALUE
11835rb_objspace_gc_enable(rb_objspace_t *objspace)
11836{
11837 int old = dont_gc_val();
11838
11839 dont_gc_off();
11840 return RBOOL(old);
11841}
11842
11843static VALUE
11844gc_enable(rb_execution_context_t *ec, VALUE _)
11845{
11846 return rb_gc_enable();
11847}
11848
11849VALUE
11850rb_gc_disable_no_rest(void)
11851{
11852 rb_objspace_t *objspace = &rb_objspace;
11853 return gc_disable_no_rest(objspace);
11854}
11855
11856static VALUE
11857gc_disable_no_rest(rb_objspace_t *objspace)
11858{
11859 int old = dont_gc_val();
11860 dont_gc_on();
11861 return RBOOL(old);
11862}
11863
11864VALUE
11865rb_gc_disable(void)
11866{
11867 rb_objspace_t *objspace = &rb_objspace;
11868 return rb_objspace_gc_disable(objspace);
11869}
11870
11871VALUE
11872rb_objspace_gc_disable(rb_objspace_t *objspace)
11873{
11874 gc_rest(objspace);
11875 return gc_disable_no_rest(objspace);
11876}
11877
11878static VALUE
11879gc_disable(rb_execution_context_t *ec, VALUE _)
11880{
11881 return rb_gc_disable();
11882}
11883
11884#if GC_CAN_COMPILE_COMPACTION
11885/*
11886 * call-seq:
11887 * GC.auto_compact = flag
11888 *
11889 * Updates automatic compaction mode.
11890 *
11891 * When enabled, the compactor will execute on every major collection.
11892 *
11893 * Enabling compaction will degrade performance on major collections.
11894 */
11895static VALUE
11896gc_set_auto_compact(VALUE _, VALUE v)
11897{
11898 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11899
11900 ruby_enable_autocompact = RTEST(v);
11901
11902#if RGENGC_CHECK_MODE
11903 ruby_autocompact_compare_func = NULL;
11904
11905 if (SYMBOL_P(v)) {
11906 ID id = RB_SYM2ID(v);
11907 if (id == rb_intern("empty")) {
11908 ruby_autocompact_compare_func = compare_free_slots;
11909 }
11910 }
11911#endif
11912
11913 return v;
11914}
11915#else
11916# define gc_set_auto_compact rb_f_notimplement
11917#endif
11918
11919#if GC_CAN_COMPILE_COMPACTION
11920/*
11921 * call-seq:
11922 * GC.auto_compact -> true or false
11923 *
11924 * Returns whether or not automatic compaction has been enabled.
11925 */
11926static VALUE
11927gc_get_auto_compact(VALUE _)
11928{
11929 return RBOOL(ruby_enable_autocompact);
11930}
11931#else
11932# define gc_get_auto_compact rb_f_notimplement
11933#endif
11934
11935static int
11936get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
11937{
11938 const char *ptr = getenv(name);
11939 ssize_t val;
11940
11941 if (ptr != NULL && *ptr) {
11942 size_t unit = 0;
11943 char *end;
11944#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11945 val = strtoll(ptr, &end, 0);
11946#else
11947 val = strtol(ptr, &end, 0);
11948#endif
11949 switch (*end) {
11950 case 'k': case 'K':
11951 unit = 1024;
11952 ++end;
11953 break;
11954 case 'm': case 'M':
11955 unit = 1024*1024;
11956 ++end;
11957 break;
11958 case 'g': case 'G':
11959 unit = 1024*1024*1024;
11960 ++end;
11961 break;
11962 }
11963 while (*end && isspace((unsigned char)*end)) end++;
11964 if (*end) {
11965 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11966 return 0;
11967 }
11968 if (unit > 0) {
11969 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11970 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
11971 return 0;
11972 }
11973 val *= unit;
11974 }
11975 if (val > 0 && (size_t)val > lower_bound) {
11976 if (RTEST(ruby_verbose)) {
11977 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
11978 }
11979 *default_value = (size_t)val;
11980 return 1;
11981 }
11982 else {
11983 if (RTEST(ruby_verbose)) {
11984 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
11985 name, val, *default_value, lower_bound);
11986 }
11987 return 0;
11988 }
11989 }
11990 return 0;
11991}
11992
11993static int
11994get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
11995{
11996 const char *ptr = getenv(name);
11997 double val;
11998
11999 if (ptr != NULL && *ptr) {
12000 char *end;
12001 val = strtod(ptr, &end);
12002 if (!*ptr || *end) {
12003 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
12004 return 0;
12005 }
12006
12007 if (accept_zero && val == 0.0) {
12008 goto accept;
12009 }
12010 else if (val <= lower_bound) {
12011 if (RTEST(ruby_verbose)) {
12012 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
12013 name, val, *default_value, lower_bound);
12014 }
12015 }
12016 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
12017 val > upper_bound) {
12018 if (RTEST(ruby_verbose)) {
12019 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
12020 name, val, *default_value, upper_bound);
12021 }
12022 }
12023 else {
12024 goto accept;
12025 }
12026 }
12027 return 0;
12028
12029 accept:
12030 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
12031 *default_value = val;
12032 return 1;
12033}
12034
12035static void
12036gc_set_initial_pages(rb_objspace_t *objspace)
12037{
12038 gc_rest(objspace);
12039
12040 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
12041 rb_size_pool_t *size_pool = &size_pools[i];
12042 char env_key[sizeof("RUBY_GC_HEAP_" "_INIT_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(int) * CHAR_BIT)];
12043 snprintf(env_key, sizeof(env_key), "RUBY_GC_HEAP_%d_INIT_SLOTS", i);
12044
12045 size_t size_pool_init_slots = gc_params.size_pool_init_slots[i];
12046 if (get_envparam_size(env_key, &size_pool_init_slots, 0)) {
12047 gc_params.size_pool_init_slots[i] = size_pool_init_slots;
12048 }
12049
12050 if (size_pool_init_slots > size_pool->eden_heap.total_slots) {
12051 size_t slots = size_pool_init_slots - size_pool->eden_heap.total_slots;
12052 size_pool->allocatable_pages = slots_to_pages_for_size_pool(objspace, size_pool, slots);
12053 }
12054 else {
12055 /* We already have more slots than size_pool_init_slots allows, so
12056 * prevent creating more pages. */
12057 size_pool->allocatable_pages = 0;
12058 }
12059 }
12060 heap_pages_expand_sorted(objspace);
12061}
12062
12063/*
12064 * GC tuning environment variables
12065 *
12066 * * RUBY_GC_HEAP_FREE_SLOTS
12067 * - Prepare at least this amount of slots after GC.
12068 * - Allocate slots if there are not enough slots.
12069 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
12070 * - Allocate slots by this factor.
12071 * - (next slots number) = (current slots number) * (this factor)
12072 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
12073 * - Allocation rate is limited to this number of slots.
12074 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
12075 * - Allocate additional pages when the number of free slots is
12076 * lower than the value (total_slots * (this ratio)).
12077 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
12078 * - Allocate slots to satisfy this formula:
12079 * free_slots = total_slots * goal_ratio
12080 * - In other words, prepare (total_slots * goal_ratio) free slots.
12081 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
12082 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
12083 * - Allow to free pages when the number of free slots is
12084 * greater than the value (total_slots * (this ratio)).
12085 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
12086 * - Do full GC when the number of old objects is more than R * N
12087 * where R is this factor and
12088 * N is the number of old objects just after last full GC.
12089 *
12090 * * obsolete
12091 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
12092 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
12093 *
12094 * * RUBY_GC_MALLOC_LIMIT
12095 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
12096 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
12097 *
12098 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
12099 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
12100 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
12101 */
12102
12103void
12104ruby_gc_set_params(void)
12105{
12106 rb_objspace_t *objspace = &rb_objspace;
12107 /* RUBY_GC_HEAP_FREE_SLOTS */
12108 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
12109 /* ok */
12110 }
12111
12112 gc_set_initial_pages(objspace);
12113
12114 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
12115 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
12116 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
12117 0.0, 1.0, FALSE);
12118 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
12119 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
12120 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
12121 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
12122 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
12123 get_envparam_double("RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
12124
12125 if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
12126 malloc_limit = gc_params.malloc_limit_min;
12127 }
12128 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
12129 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
12130 gc_params.malloc_limit_max = SIZE_MAX;
12131 }
12132 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
12133
12134#if RGENGC_ESTIMATE_OLDMALLOC
12135 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
12136 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
12137 }
12138 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
12139 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
12140#endif
12141}
12142
12143static void
12144reachable_objects_from_callback(VALUE obj)
12145{
12146 rb_ractor_t *cr = GET_RACTOR();
12147 cr->mfd->mark_func(obj, cr->mfd->data);
12148}
12149
12150void
12151rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
12152{
12153 rb_objspace_t *objspace = &rb_objspace;
12154
12155 RB_VM_LOCK_ENTER();
12156 {
12157 if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
12158
12159 if (is_markable_object(obj)) {
12160 rb_ractor_t *cr = GET_RACTOR();
12161 struct gc_mark_func_data_struct mfd = {
12162 .mark_func = func,
12163 .data = data,
12164 }, *prev_mfd = cr->mfd;
12165
12166 cr->mfd = &mfd;
12167 gc_mark_children(objspace, obj);
12168 cr->mfd = prev_mfd;
12169 }
12170 }
12171 RB_VM_LOCK_LEAVE();
12172}
12173
12175 const char *category;
12176 void (*func)(const char *category, VALUE, void *);
12177 void *data;
12178};
12179
12180static void
12181root_objects_from(VALUE obj, void *ptr)
12182{
12183 const struct root_objects_data *data = (struct root_objects_data *)ptr;
12184 (*data->func)(data->category, obj, data->data);
12185}
12186
12187void
12188rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
12189{
12190 rb_objspace_t *objspace = &rb_objspace;
12191 objspace_reachable_objects_from_root(objspace, func, passing_data);
12192}
12193
12194static void
12195objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
12196{
12197 if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
12198
12199 rb_ractor_t *cr = GET_RACTOR();
12200 struct root_objects_data data = {
12201 .func = func,
12202 .data = passing_data,
12203 };
12204 struct gc_mark_func_data_struct mfd = {
12205 .mark_func = root_objects_from,
12206 .data = &data,
12207 }, *prev_mfd = cr->mfd;
12208
12209 cr->mfd = &mfd;
12210 gc_mark_roots(objspace, &data.category);
12211 cr->mfd = prev_mfd;
12212}
12213
12214/*
12215 ------------------------ Extended allocator ------------------------
12216*/
12217
12219 VALUE exc;
12220 const char *fmt;
12221 va_list *ap;
12222};
12223
12224static void *
12225gc_vraise(void *ptr)
12226{
12227 struct gc_raise_tag *argv = ptr;
12228 rb_vraise(argv->exc, argv->fmt, *argv->ap);
12229 UNREACHABLE_RETURN(NULL);
12230}
12231
12232static void
12233gc_raise(VALUE exc, const char *fmt, ...)
12234{
12235 va_list ap;
12236 va_start(ap, fmt);
12237 struct gc_raise_tag argv = {
12238 exc, fmt, &ap,
12239 };
12240
12241 if (ruby_thread_has_gvl_p()) {
12242 gc_vraise(&argv);
12244 }
12245 else if (ruby_native_thread_p()) {
12246 rb_thread_call_with_gvl(gc_vraise, &argv);
12248 }
12249 else {
12250 /* Not in a ruby thread */
12251 fprintf(stderr, "%s", "[FATAL] ");
12252 vfprintf(stderr, fmt, ap);
12253 }
12254
12255 va_end(ap);
12256 abort();
12257}
12258
12259static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
12260
12261static void
12262negative_size_allocation_error(const char *msg)
12263{
12264 gc_raise(rb_eNoMemError, "%s", msg);
12265}
12266
12267static void *
12268ruby_memerror_body(void *dummy)
12269{
12270 rb_memerror();
12271 return 0;
12272}
12273
12274NORETURN(static void ruby_memerror(void));
12276static void
12277ruby_memerror(void)
12278{
12279 if (ruby_thread_has_gvl_p()) {
12280 rb_memerror();
12281 }
12282 else {
12283 if (ruby_native_thread_p()) {
12284 rb_thread_call_with_gvl(ruby_memerror_body, 0);
12285 }
12286 else {
12287 /* no ruby thread */
12288 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12289 }
12290 }
12291 exit(EXIT_FAILURE);
12292}
12293
12294void
12295rb_memerror(void)
12296{
12297 rb_execution_context_t *ec = GET_EC();
12298 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
12299 VALUE exc;
12300
12301 if (0) {
12302 // Print out pid, sleep, so you can attach debugger to see what went wrong:
12303 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
12304 sleep(60);
12305 }
12306
12307 if (during_gc) {
12308 // TODO: OMG!! How to implement it?
12309 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
12310 }
12311
12312 exc = nomem_error;
12313 if (!exc ||
12314 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12315 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12316 exit(EXIT_FAILURE);
12317 }
12318 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12319 rb_ec_raised_clear(ec);
12320 }
12321 else {
12322 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12323 exc = ruby_vm_special_exception_copy(exc);
12324 }
12325 ec->errinfo = exc;
12326 EC_JUMP_TAG(ec, TAG_RAISE);
12327}
12328
12329void *
12330rb_aligned_malloc(size_t alignment, size_t size)
12331{
12332 /* alignment must be a power of 2 */
12333 GC_ASSERT(((alignment - 1) & alignment) == 0);
12334 GC_ASSERT(alignment % sizeof(void*) == 0);
12335
12336 void *res;
12337
12338#if defined __MINGW32__
12339 res = __mingw_aligned_malloc(size, alignment);
12340#elif defined _WIN32
12341 void *_aligned_malloc(size_t, size_t);
12342 res = _aligned_malloc(size, alignment);
12343#elif defined(HAVE_POSIX_MEMALIGN)
12344 if (posix_memalign(&res, alignment, size) != 0) {
12345 return NULL;
12346 }
12347#elif defined(HAVE_MEMALIGN)
12348 res = memalign(alignment, size);
12349#else
12350 char* aligned;
12351 res = malloc(alignment + size + sizeof(void*));
12352 aligned = (char*)res + alignment + sizeof(void*);
12353 aligned -= ((VALUE)aligned & (alignment - 1));
12354 ((void**)aligned)[-1] = res;
12355 res = (void*)aligned;
12356#endif
12357
12358 GC_ASSERT((uintptr_t)res % alignment == 0);
12359
12360 return res;
12361}
12362
12363static void
12364rb_aligned_free(void *ptr, size_t size)
12365{
12366#if defined __MINGW32__
12367 __mingw_aligned_free(ptr);
12368#elif defined _WIN32
12369 _aligned_free(ptr);
12370#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12371 free(ptr);
12372#else
12373 free(((void**)ptr)[-1]);
12374#endif
12375}
12376
12377static inline size_t
12378objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
12379{
12380#ifdef HAVE_MALLOC_USABLE_SIZE
12381 return malloc_usable_size(ptr);
12382#else
12383 return hint;
12384#endif
12385}
12386
12387enum memop_type {
12388 MEMOP_TYPE_MALLOC = 0,
12389 MEMOP_TYPE_FREE,
12390 MEMOP_TYPE_REALLOC
12391};
12392
12393static inline void
12394atomic_sub_nounderflow(size_t *var, size_t sub)
12395{
12396 if (sub == 0) return;
12397
12398 while (1) {
12399 size_t val = *var;
12400 if (val < sub) sub = val;
12401 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
12402 }
12403}
12404
12405static void
12406objspace_malloc_gc_stress(rb_objspace_t *objspace)
12407{
12408 if (ruby_gc_stressful && ruby_native_thread_p()) {
12409 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12410 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12411
12412 if (gc_stress_full_mark_after_malloc_p()) {
12413 reason |= GPR_FLAG_FULL_MARK;
12414 }
12415 garbage_collect_with_gvl(objspace, reason);
12416 }
12417}
12418
12419static inline bool
12420objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12421{
12422 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
12423 mem,
12424 type == MEMOP_TYPE_MALLOC ? "malloc" :
12425 type == MEMOP_TYPE_FREE ? "free " :
12426 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
12427 new_size, old_size);
12428 return false;
12429}
12430
12431static bool
12432objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12433{
12434 if (new_size > old_size) {
12435 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12436#if RGENGC_ESTIMATE_OLDMALLOC
12437 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12438#endif
12439 }
12440 else {
12441 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12442#if RGENGC_ESTIMATE_OLDMALLOC
12443 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12444#endif
12445 }
12446
12447 if (type == MEMOP_TYPE_MALLOC) {
12448 retry:
12449 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
12450 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12451 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
12452 goto retry;
12453 }
12454 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12455 }
12456 }
12457
12458#if MALLOC_ALLOCATED_SIZE
12459 if (new_size >= old_size) {
12460 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12461 }
12462 else {
12463 size_t dec_size = old_size - new_size;
12464 size_t allocated_size = objspace->malloc_params.allocated_size;
12465
12466#if MALLOC_ALLOCATED_SIZE_CHECK
12467 if (allocated_size < dec_size) {
12468 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
12469 }
12470#endif
12471 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12472 }
12473
12474 switch (type) {
12475 case MEMOP_TYPE_MALLOC:
12476 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12477 break;
12478 case MEMOP_TYPE_FREE:
12479 {
12480 size_t allocations = objspace->malloc_params.allocations;
12481 if (allocations > 0) {
12482 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12483 }
12484#if MALLOC_ALLOCATED_SIZE_CHECK
12485 else {
12486 GC_ASSERT(objspace->malloc_params.allocations > 0);
12487 }
12488#endif
12489 }
12490 break;
12491 case MEMOP_TYPE_REALLOC: /* ignore */ break;
12492 }
12493#endif
12494 return true;
12495}
12496
12497#define objspace_malloc_increase(...) \
12498 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12499 !malloc_increase_done; \
12500 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12501
12502struct malloc_obj_info { /* 4 words */
12503 size_t size;
12504#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12505 size_t gen;
12506 const char *file;
12507 size_t line;
12508#endif
12509};
12510
12511#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12512const char *ruby_malloc_info_file;
12513int ruby_malloc_info_line;
12514#endif
12515
12516static inline size_t
12517objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
12518{
12519 if (size == 0) size = 1;
12520
12521#if CALC_EXACT_MALLOC_SIZE
12522 size += sizeof(struct malloc_obj_info);
12523#endif
12524
12525 return size;
12526}
12527
12528static bool
12529malloc_during_gc_p(rb_objspace_t *objspace)
12530{
12531 /* malloc is not allowed during GC when we're not using multiple ractors
12532 * (since ractors can run while another thread is sweeping) and when we
12533 * have the GVL (since if we don't have the GVL, we'll try to acquire the
12534 * GVL which will block and ensure the other thread finishes GC). */
12535 return during_gc && !dont_gc_val() && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12536}
12537
12538static inline void *
12539objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
12540{
12541 size = objspace_malloc_size(objspace, mem, size);
12542 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12543
12544#if CALC_EXACT_MALLOC_SIZE
12545 {
12546 struct malloc_obj_info *info = mem;
12547 info->size = size;
12548#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12549 info->gen = objspace->profile.count;
12550 info->file = ruby_malloc_info_file;
12551 info->line = info->file ? ruby_malloc_info_line : 0;
12552#endif
12553 mem = info + 1;
12554 }
12555#endif
12556
12557 return mem;
12558}
12559
12560#if defined(__GNUC__) && RUBY_DEBUG
12561#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12562#endif
12563
12564#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12565# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12566#endif
12567
12568#define GC_MEMERROR(...) \
12569 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12570
12571#define TRY_WITH_GC(siz, expr) do { \
12572 const gc_profile_record_flag gpr = \
12573 GPR_FLAG_FULL_MARK | \
12574 GPR_FLAG_IMMEDIATE_MARK | \
12575 GPR_FLAG_IMMEDIATE_SWEEP | \
12576 GPR_FLAG_MALLOC; \
12577 objspace_malloc_gc_stress(objspace); \
12578 \
12579 if (LIKELY((expr))) { \
12580 /* Success on 1st try */ \
12581 } \
12582 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12583 /* @shyouhei thinks this doesn't happen */ \
12584 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12585 } \
12586 else if ((expr)) { \
12587 /* Success on 2nd try */ \
12588 } \
12589 else { \
12590 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12591 "%"PRIdSIZE" bytes for %s", \
12592 siz, # expr); \
12593 } \
12594 } while (0)
12595
12596static void
12597check_malloc_not_in_gc(rb_objspace_t *objspace, const char *msg)
12598{
12599 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12600 dont_gc_on();
12601 during_gc = false;
12602 rb_bug("Cannot %s during GC", msg);
12603 }
12604}
12605
12606/* these shouldn't be called directly.
12607 * objspace_* functions do not check allocation size.
12608 */
12609static void *
12610objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
12611{
12612 check_malloc_not_in_gc(objspace, "malloc");
12613
12614 void *mem;
12615
12616 size = objspace_malloc_prepare(objspace, size);
12617 TRY_WITH_GC(size, mem = malloc(size));
12618 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12619 return objspace_malloc_fixup(objspace, mem, size);
12620}
12621
12622static inline size_t
12623xmalloc2_size(const size_t count, const size_t elsize)
12624{
12625 return size_mul_or_raise(count, elsize, rb_eArgError);
12626}
12627
12628static void *
12629objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
12630{
12631 check_malloc_not_in_gc(objspace, "realloc");
12632
12633 void *mem;
12634
12635 if (!ptr) return objspace_xmalloc0(objspace, new_size);
12636
12637 /*
12638 * The behavior of realloc(ptr, 0) is implementation defined.
12639 * Therefore we don't use realloc(ptr, 0) for portability reason.
12640 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
12641 */
12642 if (new_size == 0) {
12643 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12644 /*
12645 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
12646 * returns a non-NULL pointer to an access-protected memory page.
12647 * The returned pointer cannot be read / written at all, but
12648 * still be a valid argument of free().
12649 *
12650 * https://man.openbsd.org/malloc.3
12651 *
12652 * - Linux's malloc(3) man page says that it _might_ perhaps return
12653 * a non-NULL pointer when its argument is 0. That return value
12654 * is safe (and is expected) to be passed to free().
12655 *
12656 * https://man7.org/linux/man-pages/man3/malloc.3.html
12657 *
12658 * - As I read the implementation jemalloc's malloc() returns fully
12659 * normal 16 bytes memory region when its argument is 0.
12660 *
12661 * - As I read the implementation musl libc's malloc() returns
12662 * fully normal 32 bytes memory region when its argument is 0.
12663 *
12664 * - Other malloc implementations can also return non-NULL.
12665 */
12666 objspace_xfree(objspace, ptr, old_size);
12667 return mem;
12668 }
12669 else {
12670 /*
12671 * It is dangerous to return NULL here, because that could lead to
12672 * RCE. Fallback to 1 byte instead of zero.
12673 *
12674 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
12675 */
12676 new_size = 1;
12677 }
12678 }
12679
12680#if CALC_EXACT_MALLOC_SIZE
12681 {
12682 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12683 new_size += sizeof(struct malloc_obj_info);
12684 ptr = info;
12685 old_size = info->size;
12686 }
12687#endif
12688
12689 old_size = objspace_malloc_size(objspace, ptr, old_size);
12690 TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
12691 new_size = objspace_malloc_size(objspace, mem, new_size);
12692
12693#if CALC_EXACT_MALLOC_SIZE
12694 {
12695 struct malloc_obj_info *info = mem;
12696 info->size = new_size;
12697 mem = info + 1;
12698 }
12699#endif
12700
12701 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12702
12703 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12704 return mem;
12705}
12706
12707#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12708
12709#define MALLOC_INFO_GEN_SIZE 100
12710#define MALLOC_INFO_SIZE_SIZE 10
12711static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12712static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12713static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12714static st_table *malloc_info_file_table;
12715
12716static int
12717mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12718{
12719 const char *file = (void *)key;
12720 const size_t *data = (void *)val;
12721
12722 fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
12723
12724 return ST_CONTINUE;
12725}
12726
12727__attribute__((destructor))
12728void
12729rb_malloc_info_show_results(void)
12730{
12731 int i;
12732
12733 fprintf(stderr, "* malloc_info gen statistics\n");
12734 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12735 if (i == MALLOC_INFO_GEN_SIZE-1) {
12736 fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12737 }
12738 else {
12739 fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12740 }
12741 }
12742
12743 fprintf(stderr, "* malloc_info size statistics\n");
12744 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12745 int s = 16 << i;
12746 fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
12747 }
12748 fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
12749
12750 if (malloc_info_file_table) {
12751 fprintf(stderr, "* malloc_info file statistics\n");
12752 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12753 }
12754}
12755#else
12756void
12757rb_malloc_info_show_results(void)
12758{
12759}
12760#endif
12761
12762static void
12763objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
12764{
12765 if (!ptr) {
12766 /*
12767 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
12768 * its first version. We would better follow.
12769 */
12770 return;
12771 }
12772#if CALC_EXACT_MALLOC_SIZE
12773 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12774 ptr = info;
12775 old_size = info->size;
12776
12777#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12778 {
12779 int gen = (int)(objspace->profile.count - info->gen);
12780 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12781 int i;
12782
12783 malloc_info_gen_cnt[gen_index]++;
12784 malloc_info_gen_size[gen_index] += info->size;
12785
12786 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12787 size_t s = 16 << i;
12788 if (info->size <= s) {
12789 malloc_info_size[i]++;
12790 goto found;
12791 }
12792 }
12793 malloc_info_size[i]++;
12794 found:;
12795
12796 {
12797 st_data_t key = (st_data_t)info->file, d;
12798 size_t *data;
12799
12800 if (malloc_info_file_table == NULL) {
12801 malloc_info_file_table = st_init_numtable_with_size(1024);
12802 }
12803 if (st_lookup(malloc_info_file_table, key, &d)) {
12804 /* hit */
12805 data = (size_t *)d;
12806 }
12807 else {
12808 data = malloc(xmalloc2_size(2, sizeof(size_t)));
12809 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
12810 data[0] = data[1] = 0;
12811 st_insert(malloc_info_file_table, key, (st_data_t)data);
12812 }
12813 data[0] ++;
12814 data[1] += info->size;
12815 };
12816 if (0 && gen >= 2) { /* verbose output */
12817 if (info->file) {
12818 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
12819 info->size, gen, info->file, info->line);
12820 }
12821 else {
12822 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
12823 info->size, gen);
12824 }
12825 }
12826 }
12827#endif
12828#endif
12829 old_size = objspace_malloc_size(objspace, ptr, old_size);
12830
12831 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12832 free(ptr);
12833 ptr = NULL;
12834 RB_DEBUG_COUNTER_INC(heap_xfree);
12835 }
12836}
12837
12838static void *
12839ruby_xmalloc0(size_t size)
12840{
12841 return objspace_xmalloc0(&rb_objspace, size);
12842}
12843
12844void *
12845ruby_xmalloc_body(size_t size)
12846{
12847 if ((ssize_t)size < 0) {
12848 negative_size_allocation_error("too large allocation size");
12849 }
12850 return ruby_xmalloc0(size);
12851}
12852
12853void
12854ruby_malloc_size_overflow(size_t count, size_t elsize)
12855{
12856 rb_raise(rb_eArgError,
12857 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
12858 count, elsize);
12859}
12860
12861void *
12862ruby_xmalloc2_body(size_t n, size_t size)
12863{
12864 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
12865}
12866
12867static void *
12868objspace_xcalloc(rb_objspace_t *objspace, size_t size)
12869{
12870 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12871 rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
12872#if RGENGC_CHECK_MODE || RUBY_DEBUG
12873 rb_bug("Cannot calloc during GC");
12874#endif
12875 }
12876
12877 void *mem;
12878
12879 size = objspace_malloc_prepare(objspace, size);
12880 TRY_WITH_GC(size, mem = calloc1(size));
12881 return objspace_malloc_fixup(objspace, mem, size);
12882}
12883
12884void *
12885ruby_xcalloc_body(size_t n, size_t size)
12886{
12887 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
12888}
12889
12890#ifdef ruby_sized_xrealloc
12891#undef ruby_sized_xrealloc
12892#endif
12893void *
12894ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
12895{
12896 if ((ssize_t)new_size < 0) {
12897 negative_size_allocation_error("too large allocation size");
12898 }
12899
12900 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
12901}
12902
12903void *
12904ruby_xrealloc_body(void *ptr, size_t new_size)
12905{
12906 return ruby_sized_xrealloc(ptr, new_size, 0);
12907}
12908
12909#ifdef ruby_sized_xrealloc2
12910#undef ruby_sized_xrealloc2
12911#endif
12912void *
12913ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
12914{
12915 size_t len = xmalloc2_size(n, size);
12916 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
12917}
12918
12919void *
12920ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
12921{
12922 return ruby_sized_xrealloc2(ptr, n, size, 0);
12923}
12924
12925#ifdef ruby_sized_xfree
12926#undef ruby_sized_xfree
12927#endif
12928void
12929ruby_sized_xfree(void *x, size_t size)
12930{
12931 if (LIKELY(x)) {
12932 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
12933 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
12934 * that case. */
12935 if (LIKELY(GET_VM())) {
12936 objspace_xfree(&rb_objspace, x, size);
12937 }
12938 else {
12939 ruby_mimfree(x);
12940 }
12941 }
12942}
12943
12944void
12945ruby_xfree(void *x)
12946{
12947 ruby_sized_xfree(x, 0);
12948}
12949
12950void *
12951rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12952{
12953 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12954 return ruby_xmalloc(w);
12955}
12956
12957void *
12958rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12959{
12960 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12961 return ruby_xcalloc(w, 1);
12962}
12963
12964void *
12965rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
12966{
12967 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12968 return ruby_xrealloc((void *)p, w);
12969}
12970
12971void *
12972rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12973{
12974 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12975 return ruby_xmalloc(u);
12976}
12977
12978void *
12979rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12980{
12981 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12982 return ruby_xcalloc(u, 1);
12983}
12984
12985/* Mimic ruby_xmalloc, but need not rb_objspace.
12986 * should return pointer suitable for ruby_xfree
12987 */
12988void *
12989ruby_mimmalloc(size_t size)
12990{
12991 void *mem;
12992#if CALC_EXACT_MALLOC_SIZE
12993 size += sizeof(struct malloc_obj_info);
12994#endif
12995 mem = malloc(size);
12996#if CALC_EXACT_MALLOC_SIZE
12997 if (!mem) {
12998 return NULL;
12999 }
13000 else
13001 /* set 0 for consistency of allocated_size/allocations */
13002 {
13003 struct malloc_obj_info *info = mem;
13004 info->size = 0;
13005#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13006 info->gen = 0;
13007 info->file = NULL;
13008 info->line = 0;
13009#endif
13010 mem = info + 1;
13011 }
13012#endif
13013 return mem;
13014}
13015
13016void
13017ruby_mimfree(void *ptr)
13018{
13019#if CALC_EXACT_MALLOC_SIZE
13020 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
13021 ptr = info;
13022#endif
13023 free(ptr);
13024}
13025
13026void *
13027rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
13028{
13029 void *ptr;
13030 VALUE imemo;
13031 rb_imemo_tmpbuf_t *tmpbuf;
13032
13033 /* Keep the order; allocate an empty imemo first then xmalloc, to
13034 * get rid of potential memory leak */
13035 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
13036 *store = imemo;
13037 ptr = ruby_xmalloc0(size);
13038 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
13039 tmpbuf->ptr = ptr;
13040 tmpbuf->cnt = cnt;
13041 return ptr;
13042}
13043
13044void *
13045rb_alloc_tmp_buffer(volatile VALUE *store, long len)
13046{
13047 long cnt;
13048
13049 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
13050 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
13051 }
13052
13053 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
13054}
13055
13056void
13057rb_free_tmp_buffer(volatile VALUE *store)
13058{
13059 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
13060 if (s) {
13061 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
13062 s->cnt = 0;
13063 ruby_xfree(ptr);
13064 }
13065}
13066
13067#if MALLOC_ALLOCATED_SIZE
13068/*
13069 * call-seq:
13070 * GC.malloc_allocated_size -> Integer
13071 *
13072 * Returns the size of memory allocated by malloc().
13073 *
13074 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
13075 */
13076
13077static VALUE
13078gc_malloc_allocated_size(VALUE self)
13079{
13080 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
13081}
13082
13083/*
13084 * call-seq:
13085 * GC.malloc_allocations -> Integer
13086 *
13087 * Returns the number of malloc() allocations.
13088 *
13089 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
13090 */
13091
13092static VALUE
13093gc_malloc_allocations(VALUE self)
13094{
13095 return UINT2NUM(rb_objspace.malloc_params.allocations);
13096}
13097#endif
13098
13099void
13100rb_gc_adjust_memory_usage(ssize_t diff)
13101{
13102 unless_objspace(objspace) { return; }
13103
13104 if (diff > 0) {
13105 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
13106 }
13107 else if (diff < 0) {
13108 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
13109 }
13110}
13111
13112/*
13113 ------------------------------ GC profiler ------------------------------
13114*/
13115
13116#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13117
13118static bool
13119current_process_time(struct timespec *ts)
13120{
13121#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13122 {
13123 static int try_clock_gettime = 1;
13124 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13125 return true;
13126 }
13127 else {
13128 try_clock_gettime = 0;
13129 }
13130 }
13131#endif
13132
13133#ifdef RUSAGE_SELF
13134 {
13135 struct rusage usage;
13136 struct timeval time;
13137 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13138 time = usage.ru_utime;
13139 ts->tv_sec = time.tv_sec;
13140 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13141 return true;
13142 }
13143 }
13144#endif
13145
13146#ifdef _WIN32
13147 {
13148 FILETIME creation_time, exit_time, kernel_time, user_time;
13149 ULARGE_INTEGER ui;
13150
13151 if (GetProcessTimes(GetCurrentProcess(),
13152 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13153 memcpy(&ui, &user_time, sizeof(FILETIME));
13154#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13155 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13156 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13157 return true;
13158 }
13159 }
13160#endif
13161
13162 return false;
13163}
13164
13165static double
13166getrusage_time(void)
13167{
13168 struct timespec ts;
13169 if (current_process_time(&ts)) {
13170 return ts.tv_sec + ts.tv_nsec * 1e-9;
13171 }
13172 else {
13173 return 0.0;
13174 }
13175}
13176
13177
13178static inline void
13179gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
13180{
13181 if (objspace->profile.run) {
13182 size_t index = objspace->profile.next_index;
13183 gc_profile_record *record;
13184
13185 /* create new record */
13186 objspace->profile.next_index++;
13187
13188 if (!objspace->profile.records) {
13189 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13190 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13191 }
13192 if (index >= objspace->profile.size) {
13193 void *ptr;
13194 objspace->profile.size += 1000;
13195 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13196 if (!ptr) rb_memerror();
13197 objspace->profile.records = ptr;
13198 }
13199 if (!objspace->profile.records) {
13200 rb_bug("gc_profile malloc or realloc miss");
13201 }
13202 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13203 MEMZERO(record, gc_profile_record, 1);
13204
13205 /* setup before-GC parameter */
13206 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13207#if MALLOC_ALLOCATED_SIZE
13208 record->allocated_size = malloc_allocated_size;
13209#endif
13210#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13211#ifdef RUSAGE_SELF
13212 {
13213 struct rusage usage;
13214 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13215 record->maxrss = usage.ru_maxrss;
13216 record->minflt = usage.ru_minflt;
13217 record->majflt = usage.ru_majflt;
13218 }
13219 }
13220#endif
13221#endif
13222 }
13223}
13224
13225static inline void
13226gc_prof_timer_start(rb_objspace_t *objspace)
13227{
13228 if (gc_prof_enabled(objspace)) {
13229 gc_profile_record *record = gc_prof_record(objspace);
13230#if GC_PROFILE_MORE_DETAIL
13231 record->prepare_time = objspace->profile.prepare_time;
13232#endif
13233 record->gc_time = 0;
13234 record->gc_invoke_time = getrusage_time();
13235 }
13236}
13237
13238static double
13239elapsed_time_from(double time)
13240{
13241 double now = getrusage_time();
13242 if (now > time) {
13243 return now - time;
13244 }
13245 else {
13246 return 0;
13247 }
13248}
13249
13250static inline void
13251gc_prof_timer_stop(rb_objspace_t *objspace)
13252{
13253 if (gc_prof_enabled(objspace)) {
13254 gc_profile_record *record = gc_prof_record(objspace);
13255 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13256 record->gc_invoke_time -= objspace->profile.invoke_time;
13257 }
13258}
13259
13260#define RUBY_DTRACE_GC_HOOK(name) \
13261 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13262static inline void
13263gc_prof_mark_timer_start(rb_objspace_t *objspace)
13264{
13265 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13266#if GC_PROFILE_MORE_DETAIL
13267 if (gc_prof_enabled(objspace)) {
13268 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13269 }
13270#endif
13271}
13272
13273static inline void
13274gc_prof_mark_timer_stop(rb_objspace_t *objspace)
13275{
13276 RUBY_DTRACE_GC_HOOK(MARK_END);
13277#if GC_PROFILE_MORE_DETAIL
13278 if (gc_prof_enabled(objspace)) {
13279 gc_profile_record *record = gc_prof_record(objspace);
13280 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13281 }
13282#endif
13283}
13284
13285static inline void
13286gc_prof_sweep_timer_start(rb_objspace_t *objspace)
13287{
13288 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13289 if (gc_prof_enabled(objspace)) {
13290 gc_profile_record *record = gc_prof_record(objspace);
13291
13292 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13293 objspace->profile.gc_sweep_start_time = getrusage_time();
13294 }
13295 }
13296}
13297
13298static inline void
13299gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
13300{
13301 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13302
13303 if (gc_prof_enabled(objspace)) {
13304 double sweep_time;
13305 gc_profile_record *record = gc_prof_record(objspace);
13306
13307 if (record->gc_time > 0) {
13308 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13309 /* need to accumulate GC time for lazy sweep after gc() */
13310 record->gc_time += sweep_time;
13311 }
13312 else if (GC_PROFILE_MORE_DETAIL) {
13313 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13314 }
13315
13316#if GC_PROFILE_MORE_DETAIL
13317 record->gc_sweep_time += sweep_time;
13318 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13319#endif
13320 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13321 }
13322}
13323
13324static inline void
13325gc_prof_set_malloc_info(rb_objspace_t *objspace)
13326{
13327#if GC_PROFILE_MORE_DETAIL
13328 if (gc_prof_enabled(objspace)) {
13329 gc_profile_record *record = gc_prof_record(objspace);
13330 record->allocate_increase = malloc_increase;
13331 record->allocate_limit = malloc_limit;
13332 }
13333#endif
13334}
13335
13336static inline void
13337gc_prof_set_heap_info(rb_objspace_t *objspace)
13338{
13339 if (gc_prof_enabled(objspace)) {
13340 gc_profile_record *record = gc_prof_record(objspace);
13341 size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
13342 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13343
13344#if GC_PROFILE_MORE_DETAIL
13345 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13346 record->heap_live_objects = live;
13347 record->heap_free_objects = total - live;
13348#endif
13349
13350 record->heap_total_objects = total;
13351 record->heap_use_size = live * sizeof(RVALUE);
13352 record->heap_total_size = total * sizeof(RVALUE);
13353 }
13354}
13355
13356/*
13357 * call-seq:
13358 * GC::Profiler.clear -> nil
13359 *
13360 * Clears the \GC profiler data.
13361 *
13362 */
13363
13364static VALUE
13365gc_profile_clear(VALUE _)
13366{
13367 rb_objspace_t *objspace = &rb_objspace;
13368 void *p = objspace->profile.records;
13369 objspace->profile.records = NULL;
13370 objspace->profile.size = 0;
13371 objspace->profile.next_index = 0;
13372 objspace->profile.current_record = 0;
13373 free(p);
13374 return Qnil;
13375}
13376
13377/*
13378 * call-seq:
13379 * GC::Profiler.raw_data -> [Hash, ...]
13380 *
13381 * Returns an Array of individual raw profile data Hashes ordered
13382 * from earliest to latest by +:GC_INVOKE_TIME+.
13383 *
13384 * For example:
13385 *
13386 * [
13387 * {
13388 * :GC_TIME=>1.3000000000000858e-05,
13389 * :GC_INVOKE_TIME=>0.010634999999999999,
13390 * :HEAP_USE_SIZE=>289640,
13391 * :HEAP_TOTAL_SIZE=>588960,
13392 * :HEAP_TOTAL_OBJECTS=>14724,
13393 * :GC_IS_MARKED=>false
13394 * },
13395 * # ...
13396 * ]
13397 *
13398 * The keys mean:
13399 *
13400 * +:GC_TIME+::
13401 * Time elapsed in seconds for this GC run
13402 * +:GC_INVOKE_TIME+::
13403 * Time elapsed in seconds from startup to when the GC was invoked
13404 * +:HEAP_USE_SIZE+::
13405 * Total bytes of heap used
13406 * +:HEAP_TOTAL_SIZE+::
13407 * Total size of heap in bytes
13408 * +:HEAP_TOTAL_OBJECTS+::
13409 * Total number of objects
13410 * +:GC_IS_MARKED+::
13411 * Returns +true+ if the GC is in mark phase
13412 *
13413 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
13414 * to the following hash keys:
13415 *
13416 * +:GC_MARK_TIME+::
13417 * +:GC_SWEEP_TIME+::
13418 * +:ALLOCATE_INCREASE+::
13419 * +:ALLOCATE_LIMIT+::
13420 * +:HEAP_USE_PAGES+::
13421 * +:HEAP_LIVE_OBJECTS+::
13422 * +:HEAP_FREE_OBJECTS+::
13423 * +:HAVE_FINALIZE+::
13424 *
13425 */
13426
13427static VALUE
13428gc_profile_record_get(VALUE _)
13429{
13430 VALUE prof;
13431 VALUE gc_profile = rb_ary_new();
13432 size_t i;
13433 rb_objspace_t *objspace = (&rb_objspace);
13434
13435 if (!objspace->profile.run) {
13436 return Qnil;
13437 }
13438
13439 for (i =0; i < objspace->profile.next_index; i++) {
13440 gc_profile_record *record = &objspace->profile.records[i];
13441
13442 prof = rb_hash_new();
13443 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
13444 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
13445 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
13446 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
13447 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
13448 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
13449 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
13450 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
13451#if GC_PROFILE_MORE_DETAIL
13452 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
13453 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
13454 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
13455 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
13456 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
13457 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
13458 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
13459
13460 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
13461 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
13462
13463 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13464#endif
13465
13466#if RGENGC_PROFILE > 0
13467 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
13468 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
13469 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
13470#endif
13471 rb_ary_push(gc_profile, prof);
13472 }
13473
13474 return gc_profile;
13475}
13476
13477#if GC_PROFILE_MORE_DETAIL
13478#define MAJOR_REASON_MAX 0x10
13479
13480static char *
13481gc_profile_dump_major_reason(unsigned int flags, char *buff)
13482{
13483 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13484 int i = 0;
13485
13486 if (reason == GPR_FLAG_NONE) {
13487 buff[0] = '-';
13488 buff[1] = 0;
13489 }
13490 else {
13491#define C(x, s) \
13492 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13493 buff[i++] = #x[0]; \
13494 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13495 buff[i] = 0; \
13496 }
13497 C(NOFREE, N);
13498 C(OLDGEN, O);
13499 C(SHADY, S);
13500#if RGENGC_ESTIMATE_OLDMALLOC
13501 C(OLDMALLOC, M);
13502#endif
13503#undef C
13504 }
13505 return buff;
13506}
13507#endif
13508
13509static void
13510gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
13511{
13512 rb_objspace_t *objspace = &rb_objspace;
13513 size_t count = objspace->profile.next_index;
13514#ifdef MAJOR_REASON_MAX
13515 char reason_str[MAJOR_REASON_MAX];
13516#endif
13517
13518 if (objspace->profile.run && count /* > 1 */) {
13519 size_t i;
13520 const gc_profile_record *record;
13521
13522 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
13523 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13524
13525 for (i = 0; i < count; i++) {
13526 record = &objspace->profile.records[i];
13527 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
13528 i+1, record->gc_invoke_time, record->heap_use_size,
13529 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13530 }
13531
13532#if GC_PROFILE_MORE_DETAIL
13533 const char *str = "\n\n" \
13534 "More detail.\n" \
13535 "Prepare Time = Previously GC's rest sweep time\n"
13536 "Index Flags Allocate Inc. Allocate Limit"
13537#if CALC_EXACT_MALLOC_SIZE
13538 " Allocated Size"
13539#endif
13540 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13541#if RGENGC_PROFILE
13542 " OldgenObj RemNormObj RemShadObj"
13543#endif
13544#if GC_PROFILE_DETAIL_MEMORY
13545 " MaxRSS(KB) MinorFLT MajorFLT"
13546#endif
13547 "\n";
13548 append(out, rb_str_new_cstr(str));
13549
13550 for (i = 0; i < count; i++) {
13551 record = &objspace->profile.records[i];
13552 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
13553#if CALC_EXACT_MALLOC_SIZE
13554 " %15"PRIuSIZE
13555#endif
13556 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13557#if RGENGC_PROFILE
13558 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13559#endif
13560#if GC_PROFILE_DETAIL_MEMORY
13561 "%11ld %8ld %8ld"
13562#endif
13563
13564 "\n",
13565 i+1,
13566 gc_profile_dump_major_reason(record->flags, reason_str),
13567 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
13568 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
13569 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
13570 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
13571 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
13572 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
13573 record->allocate_increase, record->allocate_limit,
13574#if CALC_EXACT_MALLOC_SIZE
13575 record->allocated_size,
13576#endif
13577 record->heap_use_pages,
13578 record->gc_mark_time*1000,
13579 record->gc_sweep_time*1000,
13580 record->prepare_time*1000,
13581
13582 record->heap_live_objects,
13583 record->heap_free_objects,
13584 record->removing_objects,
13585 record->empty_objects
13586#if RGENGC_PROFILE
13587 ,
13588 record->old_objects,
13589 record->remembered_normal_objects,
13590 record->remembered_shady_objects
13591#endif
13592#if GC_PROFILE_DETAIL_MEMORY
13593 ,
13594 record->maxrss / 1024,
13595 record->minflt,
13596 record->majflt
13597#endif
13598
13599 ));
13600 }
13601#endif
13602 }
13603}
13604
13605/*
13606 * call-seq:
13607 * GC::Profiler.result -> String
13608 *
13609 * Returns a profile data report such as:
13610 *
13611 * GC 1 invokes.
13612 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
13613 * 1 0.012 159240 212940 10647 0.00000000000001530000
13614 */
13615
13616static VALUE
13617gc_profile_result(VALUE _)
13618{
13619 VALUE str = rb_str_buf_new(0);
13620 gc_profile_dump_on(str, rb_str_buf_append);
13621 return str;
13622}
13623
13624/*
13625 * call-seq:
13626 * GC::Profiler.report
13627 * GC::Profiler.report(io)
13628 *
13629 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
13630 *
13631 */
13632
13633static VALUE
13634gc_profile_report(int argc, VALUE *argv, VALUE self)
13635{
13636 VALUE out;
13637
13638 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
13639 gc_profile_dump_on(out, rb_io_write);
13640
13641 return Qnil;
13642}
13643
13644/*
13645 * call-seq:
13646 * GC::Profiler.total_time -> float
13647 *
13648 * The total time used for garbage collection in seconds
13649 */
13650
13651static VALUE
13652gc_profile_total_time(VALUE self)
13653{
13654 double time = 0;
13655 rb_objspace_t *objspace = &rb_objspace;
13656
13657 if (objspace->profile.run && objspace->profile.next_index > 0) {
13658 size_t i;
13659 size_t count = objspace->profile.next_index;
13660
13661 for (i = 0; i < count; i++) {
13662 time += objspace->profile.records[i].gc_time;
13663 }
13664 }
13665 return DBL2NUM(time);
13666}
13667
13668/*
13669 * call-seq:
13670 * GC::Profiler.enabled? -> true or false
13671 *
13672 * The current status of \GC profile mode.
13673 */
13674
13675static VALUE
13676gc_profile_enable_get(VALUE self)
13677{
13678 rb_objspace_t *objspace = &rb_objspace;
13679 return RBOOL(objspace->profile.run);
13680}
13681
13682/*
13683 * call-seq:
13684 * GC::Profiler.enable -> nil
13685 *
13686 * Starts the \GC profiler.
13687 *
13688 */
13689
13690static VALUE
13691gc_profile_enable(VALUE _)
13692{
13693 rb_objspace_t *objspace = &rb_objspace;
13694 objspace->profile.run = TRUE;
13695 objspace->profile.current_record = 0;
13696 return Qnil;
13697}
13698
13699/*
13700 * call-seq:
13701 * GC::Profiler.disable -> nil
13702 *
13703 * Stops the \GC profiler.
13704 *
13705 */
13706
13707static VALUE
13708gc_profile_disable(VALUE _)
13709{
13710 rb_objspace_t *objspace = &rb_objspace;
13711
13712 objspace->profile.run = FALSE;
13713 objspace->profile.current_record = 0;
13714 return Qnil;
13715}
13716
13717/*
13718 ------------------------------ DEBUG ------------------------------
13719*/
13720
13721static const char *
13722type_name(int type, VALUE obj)
13723{
13724 switch (type) {
13725#define TYPE_NAME(t) case (t): return #t;
13726 TYPE_NAME(T_NONE);
13727 TYPE_NAME(T_OBJECT);
13728 TYPE_NAME(T_CLASS);
13729 TYPE_NAME(T_MODULE);
13730 TYPE_NAME(T_FLOAT);
13731 TYPE_NAME(T_STRING);
13732 TYPE_NAME(T_REGEXP);
13733 TYPE_NAME(T_ARRAY);
13734 TYPE_NAME(T_HASH);
13735 TYPE_NAME(T_STRUCT);
13736 TYPE_NAME(T_BIGNUM);
13737 TYPE_NAME(T_FILE);
13738 TYPE_NAME(T_MATCH);
13739 TYPE_NAME(T_COMPLEX);
13740 TYPE_NAME(T_RATIONAL);
13741 TYPE_NAME(T_NIL);
13742 TYPE_NAME(T_TRUE);
13743 TYPE_NAME(T_FALSE);
13744 TYPE_NAME(T_SYMBOL);
13745 TYPE_NAME(T_FIXNUM);
13746 TYPE_NAME(T_UNDEF);
13747 TYPE_NAME(T_IMEMO);
13748 TYPE_NAME(T_ICLASS);
13749 TYPE_NAME(T_MOVED);
13750 TYPE_NAME(T_ZOMBIE);
13751 case T_DATA:
13752 if (obj && rb_objspace_data_type_name(obj)) {
13753 return rb_objspace_data_type_name(obj);
13754 }
13755 return "T_DATA";
13756#undef TYPE_NAME
13757 }
13758 return "unknown";
13759}
13760
13761static const char *
13762obj_type_name(VALUE obj)
13763{
13764 return type_name(TYPE(obj), obj);
13765}
13766
13767const char *
13768rb_method_type_name(rb_method_type_t type)
13769{
13770 switch (type) {
13771 case VM_METHOD_TYPE_ISEQ: return "iseq";
13772 case VM_METHOD_TYPE_ATTRSET: return "attrest";
13773 case VM_METHOD_TYPE_IVAR: return "ivar";
13774 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
13775 case VM_METHOD_TYPE_ALIAS: return "alias";
13776 case VM_METHOD_TYPE_REFINED: return "refined";
13777 case VM_METHOD_TYPE_CFUNC: return "cfunc";
13778 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
13779 case VM_METHOD_TYPE_MISSING: return "missing";
13780 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
13781 case VM_METHOD_TYPE_UNDEF: return "undef";
13782 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
13783 }
13784 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
13785}
13786
13787static void
13788rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
13789{
13790 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
13791 VALUE path = rb_iseq_path(iseq);
13792 int n = ISEQ_BODY(iseq)->location.first_lineno;
13793 snprintf(buff, buff_size, " %s@%s:%d",
13794 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
13795 RSTRING_PTR(path), n);
13796 }
13797}
13798
13799static int
13800str_len_no_raise(VALUE str)
13801{
13802 long len = RSTRING_LEN(str);
13803 if (len < 0) return 0;
13804 if (len > INT_MAX) return INT_MAX;
13805 return (int)len;
13806}
13807
13808#define BUFF_ARGS buff + pos, buff_size - pos
13809#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13810#define APPEND_S(s) do { \
13811 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13812 goto end; \
13813 } \
13814 else { \
13815 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13816 } \
13817 } while (0)
13818#define TF(c) ((c) != 0 ? "true" : "false")
13819#define C(c, s) ((c) != 0 ? (s) : " ")
13820
13821static size_t
13822rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
13823{
13824 size_t pos = 0;
13825
13826 if (SPECIAL_CONST_P(obj)) {
13827 APPEND_F("%s", obj_type_name(obj));
13828
13829 if (FIXNUM_P(obj)) {
13830 APPEND_F(" %ld", FIX2LONG(obj));
13831 }
13832 else if (SYMBOL_P(obj)) {
13833 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
13834 }
13835 }
13836 else {
13837 const int age = RVALUE_AGE_GET(obj);
13838
13839 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
13840 APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
13841 (void *)obj, age,
13842 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
13843 C(RVALUE_MARK_BITMAP(obj), "M"),
13844 C(RVALUE_PIN_BITMAP(obj), "P"),
13845 C(RVALUE_MARKING_BITMAP(obj), "R"),
13846 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
13847 C(rb_objspace_garbage_object_p(obj), "G"),
13848 obj_type_name(obj));
13849 }
13850 else {
13851 /* fake */
13852 APPEND_F("%p [%dXXXX] %s",
13853 (void *)obj, age,
13854 obj_type_name(obj));
13855 }
13856
13857 if (internal_object_p(obj)) {
13858 /* ignore */
13859 }
13860 else if (RBASIC(obj)->klass == 0) {
13861 APPEND_S("(temporary internal)");
13862 }
13863 else if (RTEST(RBASIC(obj)->klass)) {
13864 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
13865 if (!NIL_P(class_path)) {
13866 APPEND_F("(%s)", RSTRING_PTR(class_path));
13867 }
13868 }
13869
13870#if GC_DEBUG
13871 APPEND_F("@%s:%d", RANY(obj)->file, RANY(obj)->line);
13872#endif
13873 }
13874 end:
13875
13876 return pos;
13877}
13878
13879static size_t
13880rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
13881{
13882 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
13883 const enum ruby_value_type type = BUILTIN_TYPE(obj);
13884
13885 switch (type) {
13886 case T_NODE:
13887 UNEXPECTED_NODE(rb_raw_obj_info);
13888 break;
13889 case T_ARRAY:
13890 if (ARY_SHARED_P(obj)) {
13891 APPEND_S("shared -> ");
13892 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
13893 }
13894 else if (ARY_EMBED_P(obj)) {
13895 APPEND_F("[%s%s] len: %ld (embed)",
13896 C(ARY_EMBED_P(obj), "E"),
13897 C(ARY_SHARED_P(obj), "S"),
13898 RARRAY_LEN(obj));
13899 }
13900 else {
13901 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
13902 C(ARY_EMBED_P(obj), "E"),
13903 C(ARY_SHARED_P(obj), "S"),
13904 RARRAY_LEN(obj),
13905 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
13906 (void *)RARRAY_CONST_PTR(obj));
13907 }
13908 break;
13909 case T_STRING: {
13910 if (STR_SHARED_P(obj)) {
13911 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
13912 }
13913 else {
13914 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
13915
13916 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
13917 }
13918 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
13919 break;
13920 }
13921 case T_SYMBOL: {
13922 VALUE fstr = RSYMBOL(obj)->fstr;
13923 ID id = RSYMBOL(obj)->id;
13924 if (RB_TYPE_P(fstr, T_STRING)) {
13925 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
13926 }
13927 else {
13928 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
13929 }
13930 break;
13931 }
13932 case T_MOVED: {
13933 APPEND_F("-> %p", (void*)rb_gc_location(obj));
13934 break;
13935 }
13936 case T_HASH: {
13937 APPEND_F("[%c] %"PRIdSIZE,
13938 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
13939 RHASH_SIZE(obj));
13940 break;
13941 }
13942 case T_CLASS:
13943 case T_MODULE:
13944 {
13945 VALUE class_path = rb_class_path_cached(obj);
13946 if (!NIL_P(class_path)) {
13947 APPEND_F("%s", RSTRING_PTR(class_path));
13948 }
13949 else {
13950 APPEND_S("(anon)");
13951 }
13952 break;
13953 }
13954 case T_ICLASS:
13955 {
13956 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
13957 if (!NIL_P(class_path)) {
13958 APPEND_F("src:%s", RSTRING_PTR(class_path));
13959 }
13960 break;
13961 }
13962 case T_OBJECT:
13963 {
13964 if (rb_shape_obj_too_complex(obj)) {
13965 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
13966 APPEND_F("(too_complex) len:%zu", hash_len);
13967 }
13968 else {
13969 uint32_t len = ROBJECT_IV_CAPACITY(obj);
13970
13971 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13972 APPEND_F("(embed) len:%d", len);
13973 }
13974 else {
13975 VALUE *ptr = ROBJECT_IVPTR(obj);
13976 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
13977 }
13978 }
13979 }
13980 break;
13981 case T_DATA: {
13982 const struct rb_block *block;
13983 const rb_iseq_t *iseq;
13984 if (rb_obj_is_proc(obj) &&
13985 (block = vm_proc_block(obj)) != NULL &&
13986 (vm_block_type(block) == block_type_iseq) &&
13987 (iseq = vm_block_iseq(block)) != NULL) {
13988 rb_raw_iseq_info(BUFF_ARGS, iseq);
13989 }
13990 else if (rb_ractor_p(obj)) {
13991 rb_ractor_t *r = (void *)DATA_PTR(obj);
13992 if (r) {
13993 APPEND_F("r:%d", r->pub.id);
13994 }
13995 }
13996 else {
13997 const char * const type_name = rb_objspace_data_type_name(obj);
13998 if (type_name) {
13999 APPEND_F("%s", type_name);
14000 }
14001 }
14002 break;
14003 }
14004 case T_IMEMO: {
14005 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
14006
14007 switch (imemo_type(obj)) {
14008 case imemo_ment:
14009 {
14010 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
14011
14012 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
14013 rb_id2name(me->called_id),
14014 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
14015 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
14016 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
14017 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
14018 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
14019 me->def ? rb_method_type_name(me->def->type) : "NULL",
14020 me->def ? me->def->aliased : -1,
14021 (void *)me->owner, // obj_info(me->owner),
14022 (void *)me->defined_class); //obj_info(me->defined_class)));
14023
14024 if (me->def) {
14025 switch (me->def->type) {
14026 case VM_METHOD_TYPE_ISEQ:
14027 APPEND_S(" (iseq:");
14028 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
14029 APPEND_S(")");
14030 break;
14031 default:
14032 break;
14033 }
14034 }
14035
14036 break;
14037 }
14038 case imemo_iseq: {
14039 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
14040 rb_raw_iseq_info(BUFF_ARGS, iseq);
14041 break;
14042 }
14043 case imemo_callinfo:
14044 {
14045 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
14046 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
14047 rb_id2name(vm_ci_mid(ci)),
14048 vm_ci_flag(ci),
14049 vm_ci_argc(ci),
14050 vm_ci_kwarg(ci) ? "available" : "NULL");
14051 break;
14052 }
14053 case imemo_callcache:
14054 {
14055 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
14056 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
14057 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
14058
14059 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
14060 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
14061 cme ? rb_id2name(cme->called_id) : "<NULL>",
14062 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
14063 (void *)cme,
14064 (void *)vm_cc_call(cc));
14065 break;
14066 }
14067 default:
14068 break;
14069 }
14070 }
14071 default:
14072 break;
14073 }
14074 }
14075 end:
14076
14077 return pos;
14078}
14079
14080#undef TF
14081#undef C
14082
14083const char *
14084rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
14085{
14086 asan_unpoisoning_object(obj) {
14087 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14088 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14089 if (pos >= buff_size) {} // truncated
14090 }
14091
14092 return buff;
14093}
14094
14095#undef APPEND_S
14096#undef APPEND_F
14097#undef BUFF_ARGS
14098
14099#if RGENGC_OBJ_INFO
14100#define OBJ_INFO_BUFFERS_NUM 10
14101#define OBJ_INFO_BUFFERS_SIZE 0x100
14102static rb_atomic_t obj_info_buffers_index = 0;
14103static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14104
14105/* Increments *var atomically and resets *var to 0 when maxval is
14106 * reached. Returns the wraparound old *var value (0...maxval). */
14107static rb_atomic_t
14108atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
14109{
14110 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
14111 if (UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
14112 const rb_atomic_t newval = oldval + 1;
14113 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
14114 oldval %= maxval;
14115 }
14116 return oldval;
14117}
14118
14119static const char *
14120obj_info(VALUE obj)
14121{
14122 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14123 char *const buff = obj_info_buffers[index];
14124 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14125}
14126#else
14127static const char *
14128obj_info(VALUE obj)
14129{
14130 return obj_type_name(obj);
14131}
14132#endif
14133
14134const char *
14135rb_obj_info(VALUE obj)
14136{
14137 return obj_info(obj);
14138}
14139
14140void
14141rb_obj_info_dump(VALUE obj)
14142{
14143 char buff[0x100];
14144 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14145}
14146
14147void
14148rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
14149{
14150 char buff[0x100];
14151 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14152}
14153
14154#if GC_DEBUG
14155
14156void
14157rb_gcdebug_print_obj_condition(VALUE obj)
14158{
14159 rb_objspace_t *objspace = &rb_objspace;
14160
14161 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14162
14163 if (BUILTIN_TYPE(obj) == T_MOVED) {
14164 fprintf(stderr, "moved?: true\n");
14165 }
14166 else {
14167 fprintf(stderr, "moved?: false\n");
14168 }
14169 if (is_pointer_to_heap(objspace, (void *)obj)) {
14170 fprintf(stderr, "pointer to heap?: true\n");
14171 }
14172 else {
14173 fprintf(stderr, "pointer to heap?: false\n");
14174 return;
14175 }
14176
14177 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
14178 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
14179 fprintf(stderr, "age? : %d\n", RVALUE_AGE_GET(obj));
14180 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
14181 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
14182 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
14183
14184 if (is_lazy_sweeping(objspace)) {
14185 fprintf(stderr, "lazy sweeping?: true\n");
14186 fprintf(stderr, "swept?: %s\n", is_swept_object(obj) ? "done" : "not yet");
14187 }
14188 else {
14189 fprintf(stderr, "lazy sweeping?: false\n");
14190 }
14191}
14192
14193static VALUE
14194gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
14195{
14196 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
14197 return Qnil;
14198}
14199
14200void
14201rb_gcdebug_sentinel(VALUE obj, const char *name)
14202{
14203 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
14204}
14205
14206#endif /* GC_DEBUG */
14207
14208/*
14209 * call-seq:
14210 * GC.add_stress_to_class(class[, ...])
14211 *
14212 * Raises NoMemoryError when allocating an instance of the given classes.
14213 *
14214 */
14215static VALUE
14216rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
14217{
14218 rb_objspace_t *objspace = &rb_objspace;
14219
14220 if (!stress_to_class) {
14221 set_stress_to_class(rb_ary_hidden_new(argc));
14222 }
14223 rb_ary_cat(stress_to_class, argv, argc);
14224 return self;
14225}
14226
14227/*
14228 * call-seq:
14229 * GC.remove_stress_to_class(class[, ...])
14230 *
14231 * No longer raises NoMemoryError when allocating an instance of the
14232 * given classes.
14233 *
14234 */
14235static VALUE
14236rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
14237{
14238 rb_objspace_t *objspace = &rb_objspace;
14239 int i;
14240
14241 if (stress_to_class) {
14242 for (i = 0; i < argc; ++i) {
14243 rb_ary_delete_same(stress_to_class, argv[i]);
14244 }
14245 if (RARRAY_LEN(stress_to_class) == 0) {
14246 set_stress_to_class(0);
14247 }
14248 }
14249 return Qnil;
14250}
14251
14252/*
14253 * Document-module: ObjectSpace
14254 *
14255 * The ObjectSpace module contains a number of routines
14256 * that interact with the garbage collection facility and allow you to
14257 * traverse all living objects with an iterator.
14258 *
14259 * ObjectSpace also provides support for object finalizers, procs that will be
14260 * called when a specific object is about to be destroyed by garbage
14261 * collection. See the documentation for
14262 * <code>ObjectSpace.define_finalizer</code> for important information on
14263 * how to use this method correctly.
14264 *
14265 * a = "A"
14266 * b = "B"
14267 *
14268 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
14269 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
14270 *
14271 * a = nil
14272 * b = nil
14273 *
14274 * _produces:_
14275 *
14276 * Finalizer two on 537763470
14277 * Finalizer one on 537763480
14278 */
14279
14280/* Document-class: GC::Profiler
14281 *
14282 * The GC profiler provides access to information on GC runs including time,
14283 * length and object space size.
14284 *
14285 * Example:
14286 *
14287 * GC::Profiler.enable
14288 *
14289 * require 'rdoc/rdoc'
14290 *
14291 * GC::Profiler.report
14292 *
14293 * GC::Profiler.disable
14294 *
14295 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
14296 */
14297
14298#include "gc.rbinc"
14299
14300void
14301Init_GC(void)
14302{
14303#undef rb_intern
14304 malloc_offset = gc_compute_malloc_offset();
14305
14306 VALUE rb_mObjSpace;
14307 VALUE rb_mProfiler;
14308 VALUE gc_constants;
14309
14310 rb_mGC = rb_define_module("GC");
14311
14312 gc_constants = rb_hash_new();
14313 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
14314 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14315 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
14316 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
14317 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14318 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14319 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
14320 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT));
14321 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14322 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE));
14323 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
14324 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue);
14325 }
14326 OBJ_FREEZE(gc_constants);
14327 /* Internal constants in the garbage collector. */
14328 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
14329
14330 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
14331 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
14332 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
14333 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
14334 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
14335 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
14336 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
14337 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
14338 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
14339
14340 rb_mObjSpace = rb_define_module("ObjectSpace");
14341
14342 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
14343
14344 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
14345 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
14346
14347 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
14348
14349 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
14350
14351 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
14352 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
14353
14354 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
14355
14356 /* internal methods */
14357 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
14358#if MALLOC_ALLOCATED_SIZE
14359 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
14360 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
14361#endif
14362
14363 if (GC_COMPACTION_SUPPORTED) {
14364 rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
14365 rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
14366 rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
14367 rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
14368 }
14369 else {
14373 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
14374 /* When !GC_COMPACTION_SUPPORTED, this method is not defined in gc.rb */
14375 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
14376 }
14377
14378 if (GC_DEBUG_STRESS_TO_CLASS) {
14379 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
14380 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
14381 }
14382
14383 {
14384 VALUE opts;
14385 /* \GC build options */
14386 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
14387#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14388 OPT(GC_DEBUG);
14389 OPT(USE_RGENGC);
14390 OPT(RGENGC_DEBUG);
14391 OPT(RGENGC_CHECK_MODE);
14392 OPT(RGENGC_PROFILE);
14393 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14394 OPT(GC_PROFILE_MORE_DETAIL);
14395 OPT(GC_ENABLE_LAZY_SWEEP);
14396 OPT(CALC_EXACT_MALLOC_SIZE);
14397 OPT(MALLOC_ALLOCATED_SIZE);
14398 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14399 OPT(GC_PROFILE_DETAIL_MEMORY);
14400 OPT(GC_COMPACTION_SUPPORTED);
14401#undef OPT
14402 OBJ_FREEZE(opts);
14403 }
14404}
14405
14406#ifdef ruby_xmalloc
14407#undef ruby_xmalloc
14408#endif
14409#ifdef ruby_xmalloc2
14410#undef ruby_xmalloc2
14411#endif
14412#ifdef ruby_xcalloc
14413#undef ruby_xcalloc
14414#endif
14415#ifdef ruby_xrealloc
14416#undef ruby_xrealloc
14417#endif
14418#ifdef ruby_xrealloc2
14419#undef ruby_xrealloc2
14420#endif
14421
14422void *
14423ruby_xmalloc(size_t size)
14424{
14425#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14426 ruby_malloc_info_file = __FILE__;
14427 ruby_malloc_info_line = __LINE__;
14428#endif
14429 return ruby_xmalloc_body(size);
14430}
14431
14432void *
14433ruby_xmalloc2(size_t n, size_t size)
14434{
14435#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14436 ruby_malloc_info_file = __FILE__;
14437 ruby_malloc_info_line = __LINE__;
14438#endif
14439 return ruby_xmalloc2_body(n, size);
14440}
14441
14442void *
14443ruby_xcalloc(size_t n, size_t size)
14444{
14445#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14446 ruby_malloc_info_file = __FILE__;
14447 ruby_malloc_info_line = __LINE__;
14448#endif
14449 return ruby_xcalloc_body(n, size);
14450}
14451
14452void *
14453ruby_xrealloc(void *ptr, size_t new_size)
14454{
14455#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14456 ruby_malloc_info_file = __FILE__;
14457 ruby_malloc_info_line = __LINE__;
14458#endif
14459 return ruby_xrealloc_body(ptr, new_size);
14460}
14461
14462void *
14463ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
14464{
14465#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14466 ruby_malloc_info_file = __FILE__;
14467 ruby_malloc_info_line = __LINE__;
14468#endif
14469 return ruby_xrealloc2_body(ptr, n, new_size);
14470}
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:177
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:341
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:138
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:91
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
Definition stdalign.h:28
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition debug.h:665
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition vm_trace.c:1782
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition vm_trace.c:1749
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition event.h:99
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition event.h:98
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition event.h:97
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition event.h:96
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:100
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
Definition fl_type.h:666
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:199
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
Definition fl_type.h:218
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1085
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition class.c:1109
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2626
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:107
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition fl_type.h:65
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:129
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:652
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
Definition value_type.h:86
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
Definition gc.h:636
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:6506
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:6546
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1355
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1348
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:471
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1344
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:423
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition object.c:96
VALUE rb_mKernel
Kernel module.
Definition object.c:63
VALUE rb_mGC
GC module.
Definition gc.c:1342
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:215
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:62
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:147
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:830
VALUE rb_stdout
STDOUT constant.
Definition io.c:190
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3145
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition defines.h:91
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition defines.h:89
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:769
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
Definition gc.h:722
#define USE_RGENGC
Definition gc.h:444
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition gc.h:506
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
#define rb_check_frozen
Just another name of rb_check_frozen
Definition error.h:264
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:807
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:118
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1538
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:815
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:292
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1145
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1274
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:687
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1280
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:490
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2921
#define RB_SYM2ID
Just another name of rb_sym2id
Definition symbol.h:43
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition symbol.c:953
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3690
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1830
#define strtod(s, e)
Just another name of ruby_strtod
Definition util.h:223
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
Definition util.h:39
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:378
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
Definition pid_t.h:38
#define RARRAY_LEN
Just another name of rb_array_len
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:82
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:108
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:136
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:417
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5470
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
MEMO.
Definition imemo.h:103
Ruby's array.
Definition rarray.h:128
Ruby's object's, base components.
Definition rbasic.h:64
const VALUE klass
Class of an object.
Definition rbasic.h:88
VALUE flags
Per-object flags.
Definition rbasic.h:77
Definition class.h:80
Internal header for Complex.
Definition complex.h:13
Definition rdata.h:124
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:138
Ruby's File and IO.
Definition rfile.h:35
struct rb_io * fptr
IO's specific fields.
Definition rfile.h:41
Definition hash.h:53
Regular expression execution context.
Definition rmatch.h:96
VALUE regexp
The expression of this match.
Definition rmatch.h:109
VALUE str
The target string that the match was made against.
Definition rmatch.h:104
Definition gc.c:644
Ruby's ordinal objects.
Definition robject.h:83
Internal header for Rational.
Definition rational.h:16
Ruby's regular expression.
Definition rregexp.h:60
const VALUE src
Source code of this expression.
Definition rregexp.h:74
Ruby's String.
Definition rstring.h:196
union RString::@50 as
String's specific fields.
struct RString::@50::@51 heap
Strings that use separated memory region for contents use this pattern.
union RString::@50::@51::@53 aux
Auxiliary info.
VALUE shared
Parent of the string.
Definition rstring.h:240
"Typed" user data.
Definition rtypeddata.h:350
const rb_data_type_t *const type
This field stores various information about how Ruby should handle a data.
Definition rtypeddata.h:360
Definition gc.c:653
Definition gc.c:1328
Definition gc.c:738
Definition vm_core.h:233
Definition method.h:62
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:44
Definition class.h:36
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:251
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:207
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:221
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:309
VALUE ecopts
Flags as Ruby hash.
Definition io.h:137
Ruby's IO, metadata and buffers.
Definition io.h:143
struct rb_io_encoding encs
Decomposed encoding flags.
Definition io.h:196
VALUE self
The IO's Ruby level counterpart.
Definition io.h:146
VALUE write_lock
This is a Ruby level mutex.
Definition io.h:248
VALUE timeout
The timeout associated with this IO when performing blocking operations.
Definition io.h:254
VALUE writeconv_pre_ecopts
Value of rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
Definition io.h:238
VALUE tied_io_for_writing
Duplex IO object, if set.
Definition io.h:193
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
Definition io.h:220
VALUE pathv
pathname for file
Definition io.h:170
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:54
rb_cref_t * cref
class reference, should be marked
Definition method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Internal header for Class.
Definition class.h:29
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:83
SVAR (Special VARiable)
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:61
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:181
ruby_value_type
C-level type of an object.
Definition value_type.h:112
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:144