Ruby 3.3.2p78 (2024-05-30 revision e5a195edf62fe1bf7146a191da13fa1c4fecbd71)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "rjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149static int consume_communication_pipe(int fd);
150
151static volatile int system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158 struct rb_io_close_wait_list *busy;
159};
160
161/********************************************************************************/
162
163#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
164
166 enum rb_thread_status prev_status;
167};
168
169static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170static void unblock_function_clear(rb_thread_t *th);
171
172static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
175
176#define THREAD_BLOCKING_BEGIN(th) do { \
177 struct rb_thread_sched * const sched = TH_SCHED(th); \
178 RB_VM_SAVE_MACHINE_CONTEXT(th); \
179 thread_sched_to_waiting((sched), (th));
180
181#define THREAD_BLOCKING_END(th) \
182 thread_sched_to_running((sched), (th)); \
183 rb_ractor_thread_switch(th->ractor, th); \
184} while(0)
185
186#ifdef __GNUC__
187#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189#else
190#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191#endif
192#else
193#define only_if_constant(expr, notconst) notconst
194#endif
195#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196 struct rb_blocking_region_buffer __region; \
197 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198 /* always return true unless fail_if_interrupted */ \
199 !only_if_constant(fail_if_interrupted, TRUE)) { \
200 /* Important that this is inlined into the macro, and not part of \
201 * blocking_region_begin - see bug #20493 */ \
202 RB_VM_SAVE_MACHINE_CONTEXT(th); \
203 thread_sched_to_waiting(TH_SCHED(th), th); \
204 exec; \
205 blocking_region_end(th, &__region); \
206 }; \
207} while(0)
208
209/*
210 * returns true if this thread was spuriously interrupted, false otherwise
211 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
212 */
213#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
214static inline int
215vm_check_ints_blocking(rb_execution_context_t *ec)
216{
217 rb_thread_t *th = rb_ec_thread_ptr(ec);
218
219 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
220 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
221 }
222 else {
223 th->pending_interrupt_queue_checked = 0;
224 RUBY_VM_SET_INTERRUPT(ec);
225 }
226 return rb_threadptr_execute_interrupts(th, 1);
227}
228
229int
230rb_vm_check_ints_blocking(rb_execution_context_t *ec)
231{
232 return vm_check_ints_blocking(ec);
233}
234
235/*
236 * poll() is supported by many OSes, but so far Linux is the only
237 * one we know of that supports using poll() in all places select()
238 * would work.
239 */
240#if defined(HAVE_POLL)
241# if defined(__linux__)
242# define USE_POLL
243# endif
244# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
245# define USE_POLL
246 /* FreeBSD does not set POLLOUT when POLLHUP happens */
247# define POLLERR_SET (POLLHUP | POLLERR)
248# endif
249#endif
250
251static void
252timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
253 const struct timeval *timeout)
254{
255 if (timeout) {
256 *rel = rb_timeval2hrtime(timeout);
257 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
258 *to = rel;
259 }
260 else {
261 *to = 0;
262 }
263}
264
265MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
266
267#include THREAD_IMPL_SRC
268
269/*
270 * TODO: somebody with win32 knowledge should be able to get rid of
271 * timer-thread by busy-waiting on signals. And it should be possible
272 * to make the GVL in thread_pthread.c be platform-independent.
273 */
274#ifndef BUSY_WAIT_SIGNALS
275# define BUSY_WAIT_SIGNALS (0)
276#endif
277
278#ifndef USE_EVENTFD
279# define USE_EVENTFD (0)
280#endif
281
282#include "thread_sync.c"
283
284void
285rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
286{
288}
289
290void
291rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
304{
306}
307
308static int
309unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
310{
311 do {
312 if (fail_if_interrupted) {
313 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
314 return FALSE;
315 }
316 }
317 else {
318 RUBY_VM_CHECK_INTS(th->ec);
319 }
320
321 rb_native_mutex_lock(&th->interrupt_lock);
322 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
323 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
324
325 VM_ASSERT(th->unblock.func == NULL);
326
327 th->unblock.func = func;
328 th->unblock.arg = arg;
329 rb_native_mutex_unlock(&th->interrupt_lock);
330
331 return TRUE;
332}
333
334static void
335unblock_function_clear(rb_thread_t *th)
336{
337 rb_native_mutex_lock(&th->interrupt_lock);
338 th->unblock.func = 0;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340}
341
342static void
343rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
344{
345 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
346
347 rb_native_mutex_lock(&th->interrupt_lock);
348 {
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355
356 if (th->unblock.func != NULL) {
357 (th->unblock.func)(th->unblock.arg);
358 }
359 else {
360 /* none */
361 }
362 }
363 rb_native_mutex_unlock(&th->interrupt_lock);
364}
365
366void
367rb_threadptr_interrupt(rb_thread_t *th)
368{
369 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
370 rb_threadptr_interrupt_common(th, 0);
371}
372
373static void
374threadptr_trap_interrupt(rb_thread_t *th)
375{
376 rb_threadptr_interrupt_common(th, 1);
377}
378
379static void
380terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
381{
382 rb_thread_t *th = 0;
383
384 ccan_list_for_each(&r->threads.set, th, lt_node) {
385 if (th != main_thread) {
386 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
387
388 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
389 rb_threadptr_interrupt(th);
390
391 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
392 }
393 else {
394 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
395 }
396 }
397}
398
399static void
400rb_threadptr_join_list_wakeup(rb_thread_t *thread)
401{
402 while (thread->join_list) {
403 struct rb_waiting_list *join_list = thread->join_list;
404
405 // Consume the entry from the join list:
406 thread->join_list = join_list->next;
407
408 rb_thread_t *target_thread = join_list->thread;
409
410 if (target_thread->scheduler != Qnil && join_list->fiber) {
411 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
412 }
413 else {
414 rb_threadptr_interrupt(target_thread);
415
416 switch (target_thread->status) {
417 case THREAD_STOPPED:
418 case THREAD_STOPPED_FOREVER:
419 target_thread->status = THREAD_RUNNABLE;
420 break;
421 default:
422 break;
423 }
424 }
425 }
426}
427
428void
429rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
430{
431 while (th->keeping_mutexes) {
432 rb_mutex_t *mutex = th->keeping_mutexes;
433 th->keeping_mutexes = mutex->next_mutex;
434
435 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
436
437 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
438 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
439 }
440}
441
442void
443rb_thread_terminate_all(rb_thread_t *th)
444{
445 rb_ractor_t *cr = th->ractor;
446 rb_execution_context_t * volatile ec = th->ec;
447 volatile int sleeping = 0;
448
449 if (cr->threads.main != th) {
450 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
451 (void *)cr->threads.main, (void *)th);
452 }
453
454 /* unlock all locking mutexes */
455 rb_threadptr_unlock_all_locking_mutexes(th);
456
457 EC_PUSH_TAG(ec);
458 if (EC_EXEC_TAG() == TAG_NONE) {
459 retry:
460 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
461
462 terminate_all(cr, th);
463
464 while (rb_ractor_living_thread_num(cr) > 1) {
465 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
466 /*q
467 * Thread exiting routine in thread_start_func_2 notify
468 * me when the last sub-thread exit.
469 */
470 sleeping = 1;
471 native_sleep(th, &rel);
472 RUBY_VM_CHECK_INTS_BLOCKING(ec);
473 sleeping = 0;
474 }
475 }
476 else {
477 /*
478 * When caught an exception (e.g. Ctrl+C), let's broadcast
479 * kill request again to ensure killing all threads even
480 * if they are blocked on sleep, mutex, etc.
481 */
482 if (sleeping) {
483 sleeping = 0;
484 goto retry;
485 }
486 }
487 EC_POP_TAG();
488}
489
490void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
491
492static void
493thread_cleanup_func_before_exec(void *th_ptr)
494{
495 rb_thread_t *th = th_ptr;
496 th->status = THREAD_KILLED;
497
498 // The thread stack doesn't exist in the forked process:
499 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
500
501 rb_threadptr_root_fiber_terminate(th);
502}
503
504static void
505thread_cleanup_func(void *th_ptr, int atfork)
506{
507 rb_thread_t *th = th_ptr;
508
509 th->locking_mutex = Qfalse;
510 thread_cleanup_func_before_exec(th_ptr);
511
512 /*
513 * Unfortunately, we can't release native threading resource at fork
514 * because libc may have unstable locking state therefore touching
515 * a threading resource may cause a deadlock.
516 */
517 if (atfork) {
518 th->nt = NULL;
519 return;
520 }
521
522 rb_native_mutex_destroy(&th->interrupt_lock);
523}
524
525static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
526static VALUE rb_thread_to_s(VALUE thread);
527
528void
529ruby_thread_init_stack(rb_thread_t *th)
530{
531 native_thread_init_stack(th);
532}
533
534const VALUE *
535rb_vm_proc_local_ep(VALUE proc)
536{
537 const VALUE *ep = vm_proc_ep(proc);
538
539 if (ep) {
540 return rb_vm_ep_local_ep(ep);
541 }
542 else {
543 return NULL;
544 }
545}
546
547// for ractor, defined in vm.c
548VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
549 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
550
551static VALUE
552thread_do_start_proc(rb_thread_t *th)
553{
554 VALUE args = th->invoke_arg.proc.args;
555 const VALUE *args_ptr;
556 int args_len;
557 VALUE procval = th->invoke_arg.proc.proc;
558 rb_proc_t *proc;
559 GetProcPtr(procval, proc);
560
561 th->ec->errinfo = Qnil;
562 th->ec->root_lep = rb_vm_proc_local_ep(procval);
563 th->ec->root_svar = Qfalse;
564
565 vm_check_ints_blocking(th->ec);
566
567 if (th->invoke_type == thread_invoke_type_ractor_proc) {
568 VALUE self = rb_ractor_self(th->ractor);
569 VM_ASSERT(FIXNUM_P(args));
570 args_len = FIX2INT(args);
571 args_ptr = ALLOCA_N(VALUE, args_len);
572 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
573 vm_check_ints_blocking(th->ec);
574
575 return rb_vm_invoke_proc_with_self(
576 th->ec, proc, self,
577 args_len, args_ptr,
578 th->invoke_arg.proc.kw_splat,
579 VM_BLOCK_HANDLER_NONE
580 );
581 }
582 else {
583 args_len = RARRAY_LENINT(args);
584 if (args_len < 8) {
585 /* free proc.args if the length is enough small */
586 args_ptr = ALLOCA_N(VALUE, args_len);
587 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
588 th->invoke_arg.proc.args = Qnil;
589 }
590 else {
591 args_ptr = RARRAY_CONST_PTR(args);
592 }
593
594 vm_check_ints_blocking(th->ec);
595
596 return rb_vm_invoke_proc(
597 th->ec, proc,
598 args_len, args_ptr,
599 th->invoke_arg.proc.kw_splat,
600 VM_BLOCK_HANDLER_NONE
601 );
602 }
603}
604
605static VALUE
606thread_do_start(rb_thread_t *th)
607{
608 native_set_thread_name(th);
609 VALUE result = Qundef;
610
611 switch (th->invoke_type) {
612 case thread_invoke_type_proc:
613 result = thread_do_start_proc(th);
614 break;
615
616 case thread_invoke_type_ractor_proc:
617 result = thread_do_start_proc(th);
618 rb_ractor_atexit(th->ec, result);
619 break;
620
621 case thread_invoke_type_func:
622 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
623 break;
624
625 case thread_invoke_type_none:
626 rb_bug("unreachable");
627 }
628
629 return result;
630}
631
632void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
633
634static int
635thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
636{
637 STACK_GROW_DIR_DETECTION;
638
639 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
640 VM_ASSERT(th != th->vm->ractor.main_thread);
641
642 enum ruby_tag_type state;
643 VALUE errinfo = Qnil;
644 rb_thread_t *ractor_main_th = th->ractor->threads.main;
645
646 // setup ractor
647 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
648 RB_VM_LOCK();
649 {
650 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
651 rb_ractor_t *r = th->ractor;
652 r->r_stdin = rb_io_prep_stdin();
653 r->r_stdout = rb_io_prep_stdout();
654 r->r_stderr = rb_io_prep_stderr();
655 }
656 RB_VM_UNLOCK();
657 }
658
659 // Ensure that we are not joinable.
660 VM_ASSERT(UNDEF_P(th->value));
661
662 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
663 VALUE result = Qundef;
664
665 EC_PUSH_TAG(th->ec);
666
667 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
668 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
669
670 SAVE_ROOT_JMPBUF(th, result = thread_do_start(th));
671 }
672
673 if (!fiber_scheduler_closed) {
674 fiber_scheduler_closed = 1;
676 }
677
678 if (!event_thread_end_hooked) {
679 event_thread_end_hooked = 1;
680 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
681 }
682
683 if (state == TAG_NONE) {
684 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
685 th->value = result;
686 } else {
687 errinfo = th->ec->errinfo;
688
689 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
690 if (!NIL_P(exc)) errinfo = exc;
691
692 if (state == TAG_FATAL) {
693 if (th->invoke_type == thread_invoke_type_ractor_proc) {
694 rb_ractor_atexit(th->ec, Qnil);
695 }
696 /* fatal error within this thread, need to stop whole script */
697 }
698 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
699 /* exit on main_thread. */
700 }
701 else {
702 if (th->report_on_exception) {
703 VALUE mesg = rb_thread_to_s(th->self);
704 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
705 rb_write_error_str(mesg);
706 rb_ec_error_print(th->ec, errinfo);
707 }
708
709 if (th->invoke_type == thread_invoke_type_ractor_proc) {
710 rb_ractor_atexit_exception(th->ec);
711 }
712
713 if (th->vm->thread_abort_on_exception ||
714 th->abort_on_exception || RTEST(ruby_debug)) {
715 /* exit on main_thread */
716 }
717 else {
718 errinfo = Qnil;
719 }
720 }
721 th->value = Qnil;
722 }
723
724 // The thread is effectively finished and can be joined.
725 VM_ASSERT(!UNDEF_P(th->value));
726
727 rb_threadptr_join_list_wakeup(th);
728 rb_threadptr_unlock_all_locking_mutexes(th);
729
730 if (th->invoke_type == thread_invoke_type_ractor_proc) {
731 rb_thread_terminate_all(th);
732 rb_ractor_teardown(th->ec);
733 }
734
735 th->status = THREAD_KILLED;
736 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
737
738 if (th->vm->ractor.main_thread == th) {
739 ruby_stop(0);
740 }
741
742 if (RB_TYPE_P(errinfo, T_OBJECT)) {
743 /* treat with normal error object */
744 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
745 }
746
747 EC_POP_TAG();
748
749 rb_ec_clear_current_thread_trace_func(th->ec);
750
751 /* locking_mutex must be Qfalse */
752 if (th->locking_mutex != Qfalse) {
753 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
754 (void *)th, th->locking_mutex);
755 }
756
757 if (ractor_main_th->status == THREAD_KILLED &&
758 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
759 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
760 rb_threadptr_interrupt(ractor_main_th);
761 }
762
763 rb_check_deadlock(th->ractor);
764
765 rb_fiber_close(th->ec->fiber_ptr);
766
767 thread_cleanup_func(th, FALSE);
768 VM_ASSERT(th->ec->vm_stack == NULL);
769
770 if (th->invoke_type == thread_invoke_type_ractor_proc) {
771 // after rb_ractor_living_threads_remove()
772 // GC will happen anytime and this ractor can be collected (and destroy GVL).
773 // So gvl_release() should be before it.
774 thread_sched_to_dead(TH_SCHED(th), th);
775 rb_ractor_living_threads_remove(th->ractor, th);
776 }
777 else {
778 rb_ractor_living_threads_remove(th->ractor, th);
779 thread_sched_to_dead(TH_SCHED(th), th);
780 }
781
782 return 0;
783}
786 enum thread_invoke_type type;
787
788 // for normal proc thread
789 VALUE args;
790 VALUE proc;
791
792 // for ractor
793 rb_ractor_t *g;
794
795 // for func
796 VALUE (*fn)(void *);
797};
798
799static void thread_specific_storage_alloc(rb_thread_t *th);
800
801static VALUE
802thread_create_core(VALUE thval, struct thread_create_params *params)
803{
804 rb_execution_context_t *ec = GET_EC();
805 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
806 int err;
807
808 thread_specific_storage_alloc(th);
809
810 if (OBJ_FROZEN(current_th->thgroup)) {
811 rb_raise(rb_eThreadError,
812 "can't start a new thread (frozen ThreadGroup)");
813 }
814
815 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
816
817 switch (params->type) {
818 case thread_invoke_type_proc:
819 th->invoke_type = thread_invoke_type_proc;
820 th->invoke_arg.proc.args = params->args;
821 th->invoke_arg.proc.proc = params->proc;
822 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
823 break;
824
825 case thread_invoke_type_ractor_proc:
826#if RACTOR_CHECK_MODE > 0
827 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
828#endif
829 th->invoke_type = thread_invoke_type_ractor_proc;
830 th->ractor = params->g;
831 th->ractor->threads.main = th;
832 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
833 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
834 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
835 rb_ractor_send_parameters(ec, params->g, params->args);
836 break;
837
838 case thread_invoke_type_func:
839 th->invoke_type = thread_invoke_type_func;
840 th->invoke_arg.func.func = params->fn;
841 th->invoke_arg.func.arg = (void *)params->args;
842 break;
843
844 default:
845 rb_bug("unreachable");
846 }
847
848 th->priority = current_th->priority;
849 th->thgroup = current_th->thgroup;
850
851 th->pending_interrupt_queue = rb_ary_hidden_new(0);
852 th->pending_interrupt_queue_checked = 0;
853 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
854 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
855
856 rb_native_mutex_initialize(&th->interrupt_lock);
857
858 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
859
860 rb_ractor_living_threads_insert(th->ractor, th);
861
862 /* kick thread */
863 err = native_thread_create(th);
864 if (err) {
865 th->status = THREAD_KILLED;
866 rb_ractor_living_threads_remove(th->ractor, th);
867 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
868 }
869 return thval;
870}
871
872#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
873
874/*
875 * call-seq:
876 * Thread.new { ... } -> thread
877 * Thread.new(*args, &proc) -> thread
878 * Thread.new(*args) { |args| ... } -> thread
879 *
880 * Creates a new thread executing the given block.
881 *
882 * Any +args+ given to ::new will be passed to the block:
883 *
884 * arr = []
885 * a, b, c = 1, 2, 3
886 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
887 * arr #=> [1, 2, 3]
888 *
889 * A ThreadError exception is raised if ::new is called without a block.
890 *
891 * If you're going to subclass Thread, be sure to call super in your
892 * +initialize+ method, otherwise a ThreadError will be raised.
893 */
894static VALUE
895thread_s_new(int argc, VALUE *argv, VALUE klass)
896{
897 rb_thread_t *th;
898 VALUE thread = rb_thread_alloc(klass);
899
900 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
901 rb_raise(rb_eThreadError, "can't alloc thread");
902 }
903
904 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
905 th = rb_thread_ptr(thread);
906 if (!threadptr_initialized(th)) {
907 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
908 klass);
909 }
910 return thread;
911}
912
913/*
914 * call-seq:
915 * Thread.start([args]*) {|args| block } -> thread
916 * Thread.fork([args]*) {|args| block } -> thread
917 *
918 * Basically the same as ::new. However, if class Thread is subclassed, then
919 * calling +start+ in that subclass will not invoke the subclass's
920 * +initialize+ method.
921 */
922
923static VALUE
924thread_start(VALUE klass, VALUE args)
925{
926 struct thread_create_params params = {
927 .type = thread_invoke_type_proc,
928 .args = args,
929 .proc = rb_block_proc(),
930 };
931 return thread_create_core(rb_thread_alloc(klass), &params);
932}
933
934static VALUE
935threadptr_invoke_proc_location(rb_thread_t *th)
936{
937 if (th->invoke_type == thread_invoke_type_proc) {
938 return rb_proc_location(th->invoke_arg.proc.proc);
939 }
940 else {
941 return Qnil;
942 }
943}
944
945/* :nodoc: */
946static VALUE
947thread_initialize(VALUE thread, VALUE args)
948{
949 rb_thread_t *th = rb_thread_ptr(thread);
950
951 if (!rb_block_given_p()) {
952 rb_raise(rb_eThreadError, "must be called with a block");
953 }
954 else if (th->invoke_type != thread_invoke_type_none) {
955 VALUE loc = threadptr_invoke_proc_location(th);
956 if (!NIL_P(loc)) {
957 rb_raise(rb_eThreadError,
958 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
959 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
960 }
961 else {
962 rb_raise(rb_eThreadError, "already initialized thread");
963 }
964 }
965 else {
966 struct thread_create_params params = {
967 .type = thread_invoke_type_proc,
968 .args = args,
969 .proc = rb_block_proc(),
970 };
971 return thread_create_core(thread, &params);
972 }
973}
974
976rb_thread_create(VALUE (*fn)(void *), void *arg)
977{
978 struct thread_create_params params = {
979 .type = thread_invoke_type_func,
980 .fn = fn,
981 .args = (VALUE)arg,
982 };
983 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
984}
985
986VALUE
987rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
988{
989 struct thread_create_params params = {
990 .type = thread_invoke_type_ractor_proc,
991 .g = r,
992 .args = args,
993 .proc = proc,
994 };
995 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
996}
997
999struct join_arg {
1000 struct rb_waiting_list *waiter;
1001 rb_thread_t *target;
1002 VALUE timeout;
1003 rb_hrtime_t *limit;
1004};
1005
1006static VALUE
1007remove_from_join_list(VALUE arg)
1008{
1009 struct join_arg *p = (struct join_arg *)arg;
1010 rb_thread_t *target_thread = p->target;
1011
1012 if (target_thread->status != THREAD_KILLED) {
1013 struct rb_waiting_list **join_list = &target_thread->join_list;
1014
1015 while (*join_list) {
1016 if (*join_list == p->waiter) {
1017 *join_list = (*join_list)->next;
1018 break;
1019 }
1020
1021 join_list = &(*join_list)->next;
1022 }
1023 }
1024
1025 return Qnil;
1026}
1027
1028static int
1029thread_finished(rb_thread_t *th)
1030{
1031 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1032}
1033
1034static VALUE
1035thread_join_sleep(VALUE arg)
1036{
1037 struct join_arg *p = (struct join_arg *)arg;
1038 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1039 rb_hrtime_t end = 0, *limit = p->limit;
1040
1041 if (limit) {
1042 end = rb_hrtime_add(*limit, rb_hrtime_now());
1043 }
1044
1045 while (!thread_finished(target_th)) {
1046 VALUE scheduler = rb_fiber_scheduler_current();
1047
1048 if (scheduler != Qnil) {
1049 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1050 // Check if the target thread is finished after blocking:
1051 if (thread_finished(target_th)) break;
1052 // Otherwise, a timeout occurred:
1053 else return Qfalse;
1054 }
1055 else if (!limit) {
1056 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1057 }
1058 else {
1059 if (hrtime_update_expire(limit, end)) {
1060 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1061 return Qfalse;
1062 }
1063 th->status = THREAD_STOPPED;
1064 native_sleep(th, limit);
1065 }
1066 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1067 th->status = THREAD_RUNNABLE;
1068
1069 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1070 }
1071
1072 return Qtrue;
1073}
1074
1075static VALUE
1076thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1077{
1078 rb_execution_context_t *ec = GET_EC();
1079 rb_thread_t *th = ec->thread_ptr;
1080 rb_fiber_t *fiber = ec->fiber_ptr;
1081
1082 if (th == target_th) {
1083 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1084 }
1085
1086 if (th->ractor->threads.main == target_th) {
1087 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1088 }
1089
1090 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1091
1092 if (target_th->status != THREAD_KILLED) {
1093 struct rb_waiting_list waiter;
1094 waiter.next = target_th->join_list;
1095 waiter.thread = th;
1096 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1097 target_th->join_list = &waiter;
1098
1099 struct join_arg arg;
1100 arg.waiter = &waiter;
1101 arg.target = target_th;
1102 arg.timeout = timeout;
1103 arg.limit = limit;
1104
1105 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1106 return Qnil;
1107 }
1108 }
1109
1110 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1111
1112 if (target_th->ec->errinfo != Qnil) {
1113 VALUE err = target_th->ec->errinfo;
1114
1115 if (FIXNUM_P(err)) {
1116 switch (err) {
1117 case INT2FIX(TAG_FATAL):
1118 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1119
1120 /* OK. killed. */
1121 break;
1122 default:
1123 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1124 }
1125 }
1126 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1127 rb_bug("thread_join: THROW_DATA should not reach here.");
1128 }
1129 else {
1130 /* normal exception */
1131 rb_exc_raise(err);
1132 }
1133 }
1134 return target_th->self;
1135}
1136
1137/*
1138 * call-seq:
1139 * thr.join -> thr
1140 * thr.join(limit) -> thr
1141 *
1142 * The calling thread will suspend execution and run this +thr+.
1143 *
1144 * Does not return until +thr+ exits or until the given +limit+ seconds have
1145 * passed.
1146 *
1147 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1148 * returned.
1149 *
1150 * Any threads not joined will be killed when the main program exits.
1151 *
1152 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1153 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1154 * will be processed at this time.
1155 *
1156 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1157 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1158 * x.join # Let thread x finish, thread a will be killed on exit.
1159 * #=> "axyz"
1160 *
1161 * The following example illustrates the +limit+ parameter.
1162 *
1163 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1164 * puts "Waiting" until y.join(0.15)
1165 *
1166 * This will produce:
1167 *
1168 * tick...
1169 * Waiting
1170 * tick...
1171 * Waiting
1172 * tick...
1173 * tick...
1174 */
1175
1176static VALUE
1177thread_join_m(int argc, VALUE *argv, VALUE self)
1178{
1179 VALUE timeout = Qnil;
1180 rb_hrtime_t rel = 0, *limit = 0;
1181
1182 if (rb_check_arity(argc, 0, 1)) {
1183 timeout = argv[0];
1184 }
1185
1186 // Convert the timeout eagerly, so it's always converted and deterministic
1187 /*
1188 * This supports INFINITY and negative values, so we can't use
1189 * rb_time_interval right now...
1190 */
1191 if (NIL_P(timeout)) {
1192 /* unlimited */
1193 }
1194 else if (FIXNUM_P(timeout)) {
1195 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1196 limit = &rel;
1197 }
1198 else {
1199 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1200 }
1201
1202 return thread_join(rb_thread_ptr(self), timeout, limit);
1203}
1204
1205/*
1206 * call-seq:
1207 * thr.value -> obj
1208 *
1209 * Waits for +thr+ to complete, using #join, and returns its value or raises
1210 * the exception which terminated the thread.
1211 *
1212 * a = Thread.new { 2 + 2 }
1213 * a.value #=> 4
1214 *
1215 * b = Thread.new { raise 'something went wrong' }
1216 * b.value #=> RuntimeError: something went wrong
1217 */
1218
1219static VALUE
1220thread_value(VALUE self)
1221{
1222 rb_thread_t *th = rb_thread_ptr(self);
1223 thread_join(th, Qnil, 0);
1224 if (UNDEF_P(th->value)) {
1225 // If the thread is dead because we forked th->value is still Qundef.
1226 return Qnil;
1227 }
1228 return th->value;
1229}
1230
1231/*
1232 * Thread Scheduling
1233 */
1234
1235static void
1236getclockofday(struct timespec *ts)
1237{
1238#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1239 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1240 return;
1241#endif
1242 rb_timespec_now(ts);
1243}
1244
1245/*
1246 * Don't inline this, since library call is already time consuming
1247 * and we don't want "struct timespec" on stack too long for GC
1248 */
1249NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1250rb_hrtime_t
1251rb_hrtime_now(void)
1252{
1253 struct timespec ts;
1254
1255 getclockofday(&ts);
1256 return rb_timespec2hrtime(&ts);
1257}
1258
1259/*
1260 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1261 * being uninitialized, maybe other versions, too.
1262 */
1263COMPILER_WARNING_PUSH
1264#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1265COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1266#endif
1267#ifndef PRIu64
1268#define PRIu64 PRI_64_PREFIX "u"
1269#endif
1270/*
1271 * @end is the absolute time when @ts is set to expire
1272 * Returns true if @end has past
1273 * Updates @ts and returns false otherwise
1274 */
1275static int
1276hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1277{
1278 rb_hrtime_t now = rb_hrtime_now();
1279
1280 if (now > end) return 1;
1281
1282 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1283
1284 *timeout = end - now;
1285 return 0;
1286}
1287COMPILER_WARNING_POP
1288
1289static int
1290sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1291{
1292 enum rb_thread_status prev_status = th->status;
1293 int woke;
1294 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1295
1296 th->status = THREAD_STOPPED;
1297 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1298 while (th->status == THREAD_STOPPED) {
1299 native_sleep(th, &rel);
1300 woke = vm_check_ints_blocking(th->ec);
1301 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1302 break;
1303 if (hrtime_update_expire(&rel, end))
1304 break;
1305 woke = 1;
1306 }
1307 th->status = prev_status;
1308 return woke;
1309}
1310
1311static int
1312sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1313{
1314 enum rb_thread_status prev_status = th->status;
1315 int woke;
1316 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1317
1318 th->status = THREAD_STOPPED;
1319 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1320 while (th->status == THREAD_STOPPED) {
1321 native_sleep(th, &rel);
1322 woke = vm_check_ints_blocking(th->ec);
1323 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1324 break;
1325 if (hrtime_update_expire(&rel, end))
1326 break;
1327 woke = 1;
1328 }
1329 th->status = prev_status;
1330 return woke;
1331}
1332
1333static void
1334sleep_forever(rb_thread_t *th, unsigned int fl)
1335{
1336 enum rb_thread_status prev_status = th->status;
1337 enum rb_thread_status status;
1338 int woke;
1339
1340 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1341 th->status = status;
1342
1343 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1344
1345 while (th->status == status) {
1346 if (fl & SLEEP_DEADLOCKABLE) {
1347 rb_ractor_sleeper_threads_inc(th->ractor);
1348 rb_check_deadlock(th->ractor);
1349 }
1350 {
1351 native_sleep(th, 0);
1352 }
1353 if (fl & SLEEP_DEADLOCKABLE) {
1354 rb_ractor_sleeper_threads_dec(th->ractor);
1355 }
1356 if (fl & SLEEP_ALLOW_SPURIOUS) {
1357 break;
1358 }
1359
1360 woke = vm_check_ints_blocking(th->ec);
1361
1362 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1363 break;
1364 }
1365 }
1366 th->status = prev_status;
1367}
1368
1369void
1371{
1372 RUBY_DEBUG_LOG("forever");
1373 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1374}
1375
1376void
1378{
1379 RUBY_DEBUG_LOG("deadly");
1380 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1381}
1382
1383static void
1384rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1385{
1386 VALUE scheduler = rb_fiber_scheduler_current();
1387 if (scheduler != Qnil) {
1388 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1389 }
1390 else {
1391 RUBY_DEBUG_LOG("...");
1392 if (end) {
1393 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1394 }
1395 else {
1396 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1397 }
1398 }
1399}
1400
1401void
1402rb_thread_wait_for(struct timeval time)
1403{
1404 rb_thread_t *th = GET_THREAD();
1405
1406 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1407}
1408
1409/*
1410 * CAUTION: This function causes thread switching.
1411 * rb_thread_check_ints() check ruby's interrupts.
1412 * some interrupt needs thread switching/invoke handlers,
1413 * and so on.
1414 */
1415
1416void
1418{
1419 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1420}
1421
1422/*
1423 * Hidden API for tcl/tk wrapper.
1424 * There is no guarantee to perpetuate it.
1425 */
1426int
1427rb_thread_check_trap_pending(void)
1428{
1429 return rb_signal_buff_size() != 0;
1430}
1431
1432/* This function can be called in blocking region. */
1435{
1436 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1437}
1438
1439void
1440rb_thread_sleep(int sec)
1441{
1443}
1444
1445static void
1446rb_thread_schedule_limits(uint32_t limits_us)
1447{
1448 if (!rb_thread_alone()) {
1449 rb_thread_t *th = GET_THREAD();
1450 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1451
1452 if (th->running_time_us >= limits_us) {
1453 RUBY_DEBUG_LOG("switch %s", "start");
1454
1455 RB_VM_SAVE_MACHINE_CONTEXT(th);
1456 thread_sched_yield(TH_SCHED(th), th);
1457 rb_ractor_thread_switch(th->ractor, th);
1458
1459 RUBY_DEBUG_LOG("switch %s", "done");
1460 }
1461 }
1462}
1463
1464void
1466{
1467 rb_thread_schedule_limits(0);
1468 RUBY_VM_CHECK_INTS(GET_EC());
1469}
1470
1471/* blocking region */
1472
1473static inline int
1474blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1475 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1476{
1477#ifdef RUBY_VM_CRITICAL_SECTION
1478 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1479#endif
1480 VM_ASSERT(th == GET_THREAD());
1481
1482 region->prev_status = th->status;
1483 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1484 th->blocking_region_buffer = region;
1485 th->status = THREAD_STOPPED;
1486 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1487
1488 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1489 return TRUE;
1490 }
1491 else {
1492 return FALSE;
1493 }
1494}
1495
1496static inline void
1497blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1498{
1499 /* entry to ubf_list still permitted at this point, make it impossible: */
1500 unblock_function_clear(th);
1501 /* entry to ubf_list impossible at this point, so unregister is safe: */
1502 unregister_ubf_list(th);
1503
1504 thread_sched_to_running(TH_SCHED(th), th);
1505 rb_ractor_thread_switch(th->ractor, th);
1506
1507 th->blocking_region_buffer = 0;
1508 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1509 if (th->status == THREAD_STOPPED) {
1510 th->status = region->prev_status;
1511 }
1512
1513 RUBY_DEBUG_LOG("end");
1514
1515#ifndef _WIN32
1516 // GET_THREAD() clears WSAGetLastError()
1517 VM_ASSERT(th == GET_THREAD());
1518#endif
1519}
1520
1521void *
1522rb_nogvl(void *(*func)(void *), void *data1,
1523 rb_unblock_function_t *ubf, void *data2,
1524 int flags)
1525{
1526 void *val = 0;
1527 rb_execution_context_t *ec = GET_EC();
1528 rb_thread_t *th = rb_ec_thread_ptr(ec);
1529 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1530 bool is_main_thread = vm->ractor.main_thread == th;
1531 int saved_errno = 0;
1532 VALUE ubf_th = Qfalse;
1533
1534 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1535 ubf = ubf_select;
1536 data2 = th;
1537 }
1538 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1539 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1540 vm->ubf_async_safe = 1;
1541 }
1542 }
1543
1544 BLOCKING_REGION(th, {
1545 val = func(data1);
1546 saved_errno = rb_errno();
1547 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1548
1549 if (is_main_thread) vm->ubf_async_safe = 0;
1550
1551 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1552 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1553 }
1554
1555 if (ubf_th != Qfalse) {
1556 thread_value(rb_thread_kill(ubf_th));
1557 }
1558
1559 rb_errno_set(saved_errno);
1560
1561 return val;
1562}
1563
1564/*
1565 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1566 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1567 * without interrupt process.
1568 *
1569 * rb_thread_call_without_gvl() does:
1570 * (1) Check interrupts.
1571 * (2) release GVL.
1572 * Other Ruby threads may run in parallel.
1573 * (3) call func with data1
1574 * (4) acquire GVL.
1575 * Other Ruby threads can not run in parallel any more.
1576 * (5) Check interrupts.
1577 *
1578 * rb_thread_call_without_gvl2() does:
1579 * (1) Check interrupt and return if interrupted.
1580 * (2) release GVL.
1581 * (3) call func with data1 and a pointer to the flags.
1582 * (4) acquire GVL.
1583 *
1584 * If another thread interrupts this thread (Thread#kill, signal delivery,
1585 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1586 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1587 * toggling a cancellation flag, canceling the invocation of a call inside
1588 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1589 *
1590 * There are built-in ubfs and you can specify these ubfs:
1591 *
1592 * * RUBY_UBF_IO: ubf for IO operation
1593 * * RUBY_UBF_PROCESS: ubf for process operation
1594 *
1595 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1596 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1597 * provide proper ubf(), your program will not stop for Control+C or other
1598 * shutdown events.
1599 *
1600 * "Check interrupts" on above list means checking asynchronous
1601 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1602 * request, and so on) and calling corresponding procedures
1603 * (such as `trap' for signals, raise an exception for Thread#raise).
1604 * If `func()' finished and received interrupts, you may skip interrupt
1605 * checking. For example, assume the following func() it reads data from file.
1606 *
1607 * read_func(...) {
1608 * // (a) before read
1609 * read(buffer); // (b) reading
1610 * // (c) after read
1611 * }
1612 *
1613 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1614 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1615 * at (c), after *read* operation is completed, checking interrupts is harmful
1616 * because it causes irrevocable side-effect, the read data will vanish. To
1617 * avoid such problem, the `read_func()' should be used with
1618 * `rb_thread_call_without_gvl2()'.
1619 *
1620 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1621 * immediately. This function does not show when the execution was interrupted.
1622 * For example, there are 4 possible timing (a), (b), (c) and before calling
1623 * read_func(). You need to record progress of a read_func() and check
1624 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1625 * `rb_thread_check_ints()' correctly or your program can not process proper
1626 * process such as `trap' and so on.
1627 *
1628 * NOTE: You can not execute most of Ruby C API and touch Ruby
1629 * objects in `func()' and `ubf()', including raising an
1630 * exception, because current thread doesn't acquire GVL
1631 * (it causes synchronization problems). If you need to
1632 * call ruby functions either use rb_thread_call_with_gvl()
1633 * or read source code of C APIs and confirm safety by
1634 * yourself.
1635 *
1636 * NOTE: In short, this API is difficult to use safely. I recommend you
1637 * use other ways if you have. We lack experiences to use this API.
1638 * Please report your problem related on it.
1639 *
1640 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1641 * for a short running `func()'. Be sure to benchmark and use this
1642 * mechanism when `func()' consumes enough time.
1643 *
1644 * Safe C API:
1645 * * rb_thread_interrupted() - check interrupt flag
1646 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1647 * they will work without GVL, and may acquire GVL when GC is needed.
1648 */
1649void *
1650rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1651 rb_unblock_function_t *ubf, void *data2)
1652{
1653 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1654}
1655
1656void *
1657rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1658 rb_unblock_function_t *ubf, void *data2)
1659{
1660 return rb_nogvl(func, data1, ubf, data2, 0);
1661}
1662
1663static int
1664waitfd_to_waiting_flag(int wfd_event)
1665{
1666 return wfd_event << 1;
1667}
1668
1669static void
1670thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1671{
1672 wfd->fd = fd;
1673 wfd->th = th;
1674 wfd->busy = NULL;
1675
1676 RB_VM_LOCK_ENTER();
1677 {
1678 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1679 }
1680 RB_VM_LOCK_LEAVE();
1681}
1682
1683static void
1684thread_io_wake_pending_closer(struct waiting_fd *wfd)
1685{
1686 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1687 if (has_waiter) {
1688 rb_mutex_lock(wfd->busy->wakeup_mutex);
1689 }
1690
1691 /* Needs to be protected with RB_VM_LOCK because we don't know if
1692 wfd is on the global list of pending FD ops or if it's on a
1693 struct rb_io_close_wait_list close-waiter. */
1694 RB_VM_LOCK_ENTER();
1695 ccan_list_del(&wfd->wfd_node);
1696 RB_VM_LOCK_LEAVE();
1697
1698 if (has_waiter) {
1699 rb_thread_wakeup(wfd->busy->closing_thread);
1700 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1701 }
1702}
1703
1704static int
1705thread_io_wait_events(rb_thread_t *th, rb_execution_context_t *ec, int fd, int events, struct timeval *timeout, struct waiting_fd *wfd)
1706{
1707#if defined(USE_MN_THREADS) && USE_MN_THREADS
1708 if (!th_has_dedicated_nt(th) &&
1709 (events || timeout) &&
1710 th->blocking // no fiber scheduler
1711 ) {
1712 int r;
1713 rb_hrtime_t rel, *prel;
1714
1715 if (timeout) {
1716 rel = rb_timeval2hrtime(timeout);
1717 prel = &rel;
1718 }
1719 else {
1720 prel = NULL;
1721 }
1722
1723 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1724
1725 thread_io_setup_wfd(th, fd, wfd);
1726 {
1727 // wait readable/writable
1728 r = thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel);
1729 }
1730 thread_io_wake_pending_closer(wfd);
1731
1732 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1733
1734 return r;
1735 }
1736#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1737
1738 return 0;
1739}
1740
1741VALUE
1742rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1743{
1744 rb_execution_context_t * volatile ec = GET_EC();
1745 rb_thread_t *th = rb_ec_thread_ptr(ec);
1746
1747 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1748
1749 struct waiting_fd waiting_fd;
1750
1751 thread_io_wait_events(th, ec, fd, events, NULL, &waiting_fd);
1752
1753 volatile VALUE val = Qundef; /* shouldn't be used */
1754 volatile int saved_errno = 0;
1755 enum ruby_tag_type state;
1756
1757 // `errno` is only valid when there is an actual error - but we can't
1758 // extract that from the return value of `func` alone, so we clear any
1759 // prior `errno` value here so that we can later check if it was set by
1760 // `func` or not (as opposed to some previously set value).
1761 errno = 0;
1762
1763 thread_io_setup_wfd(th, fd, &waiting_fd);
1764
1765 EC_PUSH_TAG(ec);
1766 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1767 BLOCKING_REGION(waiting_fd.th, {
1768 val = func(data1);
1769 saved_errno = errno;
1770 }, ubf_select, waiting_fd.th, FALSE);
1771 }
1772 EC_POP_TAG();
1773
1774 /*
1775 * must be deleted before jump
1776 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1777 */
1778 thread_io_wake_pending_closer(&waiting_fd);
1779
1780 if (state) {
1781 EC_JUMP_TAG(ec, state);
1782 }
1783 /* TODO: check func() */
1784 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1785
1786 // If the error was a timeout, we raise a specific exception for that:
1787 if (saved_errno == ETIMEDOUT) {
1788 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1789 }
1790
1791 errno = saved_errno;
1792
1793 return val;
1794}
1795
1796VALUE
1797rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1798{
1799 return rb_thread_io_blocking_call(func, data1, fd, 0);
1800}
1801
1802/*
1803 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1804 *
1805 * After releasing GVL using
1806 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1807 * methods. If you need to access Ruby you must use this function
1808 * rb_thread_call_with_gvl().
1809 *
1810 * This function rb_thread_call_with_gvl() does:
1811 * (1) acquire GVL.
1812 * (2) call passed function `func'.
1813 * (3) release GVL.
1814 * (4) return a value which is returned at (2).
1815 *
1816 * NOTE: You should not return Ruby object at (2) because such Object
1817 * will not be marked.
1818 *
1819 * NOTE: If an exception is raised in `func', this function DOES NOT
1820 * protect (catch) the exception. If you have any resources
1821 * which should free before throwing exception, you need use
1822 * rb_protect() in `func' and return a value which represents
1823 * exception was raised.
1824 *
1825 * NOTE: This function should not be called by a thread which was not
1826 * created as Ruby thread (created by Thread.new or so). In other
1827 * words, this function *DOES NOT* associate or convert a NON-Ruby
1828 * thread to a Ruby thread.
1829 */
1830void *
1831rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1832{
1833 rb_thread_t *th = ruby_thread_from_native();
1834 struct rb_blocking_region_buffer *brb;
1835 struct rb_unblock_callback prev_unblock;
1836 void *r;
1837
1838 if (th == 0) {
1839 /* Error has occurred, but we can't use rb_bug()
1840 * because this thread is not Ruby's thread.
1841 * What should we do?
1842 */
1843 bp();
1844 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1845 exit(EXIT_FAILURE);
1846 }
1847
1848 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1849 prev_unblock = th->unblock;
1850
1851 if (brb == 0) {
1852 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1853 }
1854
1855 blocking_region_end(th, brb);
1856 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1857 r = (*func)(data1);
1858 /* leave from Ruby world: You can not access Ruby values, etc. */
1859 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1860 RUBY_ASSERT_ALWAYS(released);
1861 RB_VM_SAVE_MACHINE_CONTEXT(th);
1862 thread_sched_to_waiting(TH_SCHED(th), th);
1863 return r;
1864}
1865
1866/*
1867 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1868 *
1869 ***
1870 *** This API is EXPERIMENTAL!
1871 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1872 ***
1873 */
1874
1875int
1876ruby_thread_has_gvl_p(void)
1877{
1878 rb_thread_t *th = ruby_thread_from_native();
1879
1880 if (th && th->blocking_region_buffer == 0) {
1881 return 1;
1882 }
1883 else {
1884 return 0;
1885 }
1886}
1887
1888/*
1889 * call-seq:
1890 * Thread.pass -> nil
1891 *
1892 * Give the thread scheduler a hint to pass execution to another thread.
1893 * A running thread may or may not switch, it depends on OS and processor.
1894 */
1895
1896static VALUE
1897thread_s_pass(VALUE klass)
1898{
1900 return Qnil;
1901}
1902
1903/*****************************************************/
1904
1905/*
1906 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1907 *
1908 * Async events such as an exception thrown by Thread#raise,
1909 * Thread#kill and thread termination (after main thread termination)
1910 * will be queued to th->pending_interrupt_queue.
1911 * - clear: clear the queue.
1912 * - enque: enqueue err object into queue.
1913 * - deque: dequeue err object from queue.
1914 * - active_p: return 1 if the queue should be checked.
1915 *
1916 * All rb_threadptr_pending_interrupt_* functions are called by
1917 * a GVL acquired thread, of course.
1918 * Note that all "rb_" prefix APIs need GVL to call.
1919 */
1920
1921void
1922rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1923{
1924 rb_ary_clear(th->pending_interrupt_queue);
1925}
1926
1927void
1928rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1929{
1930 rb_ary_push(th->pending_interrupt_queue, v);
1931 th->pending_interrupt_queue_checked = 0;
1932}
1933
1934static void
1935threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1936{
1937 if (!th->pending_interrupt_queue) {
1938 rb_raise(rb_eThreadError, "uninitialized thread");
1939 }
1940}
1941
1942enum handle_interrupt_timing {
1943 INTERRUPT_NONE,
1944 INTERRUPT_IMMEDIATE,
1945 INTERRUPT_ON_BLOCKING,
1946 INTERRUPT_NEVER
1947};
1948
1949static enum handle_interrupt_timing
1950rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
1951{
1952 if (sym == sym_immediate) {
1953 return INTERRUPT_IMMEDIATE;
1954 }
1955 else if (sym == sym_on_blocking) {
1956 return INTERRUPT_ON_BLOCKING;
1957 }
1958 else if (sym == sym_never) {
1959 return INTERRUPT_NEVER;
1960 }
1961 else {
1962 rb_raise(rb_eThreadError, "unknown mask signature");
1963 }
1964}
1965
1966static enum handle_interrupt_timing
1967rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1968{
1969 VALUE mask;
1970 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1971 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1972 VALUE mod;
1973 long i;
1974
1975 for (i=0; i<mask_stack_len; i++) {
1976 mask = mask_stack[mask_stack_len-(i+1)];
1977
1978 if (SYMBOL_P(mask)) {
1979 /* do not match RUBY_FATAL_THREAD_KILLED etc */
1980 if (err != rb_cInteger) {
1981 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
1982 }
1983 else {
1984 continue;
1985 }
1986 }
1987
1988 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1989 VALUE klass = mod;
1990 VALUE sym;
1991
1992 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1993 klass = RBASIC(mod)->klass;
1994 }
1995 else if (mod != RCLASS_ORIGIN(mod)) {
1996 continue;
1997 }
1998
1999 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2000 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2001 }
2002 }
2003 /* try to next mask */
2004 }
2005 return INTERRUPT_NONE;
2006}
2007
2008static int
2009rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2010{
2011 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2012}
2013
2014static int
2015rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2016{
2017 int i;
2018 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2019 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2020 if (rb_obj_is_kind_of(e, err)) {
2021 return TRUE;
2022 }
2023 }
2024 return FALSE;
2025}
2026
2027static VALUE
2028rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2029{
2030#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2031 int i;
2032
2033 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2034 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2035
2036 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2037
2038 switch (mask_timing) {
2039 case INTERRUPT_ON_BLOCKING:
2040 if (timing != INTERRUPT_ON_BLOCKING) {
2041 break;
2042 }
2043 /* fall through */
2044 case INTERRUPT_NONE: /* default: IMMEDIATE */
2045 case INTERRUPT_IMMEDIATE:
2046 rb_ary_delete_at(th->pending_interrupt_queue, i);
2047 return err;
2048 case INTERRUPT_NEVER:
2049 break;
2050 }
2051 }
2052
2053 th->pending_interrupt_queue_checked = 1;
2054 return Qundef;
2055#else
2056 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2057 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2058 th->pending_interrupt_queue_checked = 1;
2059 }
2060 return err;
2061#endif
2062}
2063
2064static int
2065threadptr_pending_interrupt_active_p(rb_thread_t *th)
2066{
2067 /*
2068 * For optimization, we don't check async errinfo queue
2069 * if the queue and the thread interrupt mask were not changed
2070 * since last check.
2071 */
2072 if (th->pending_interrupt_queue_checked) {
2073 return 0;
2074 }
2075
2076 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2077 return 0;
2078 }
2079
2080 return 1;
2081}
2082
2083static int
2084handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2085{
2086 VALUE *maskp = (VALUE *)args;
2087
2088 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2089 rb_raise(rb_eArgError, "unknown mask signature");
2090 }
2091
2092 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2093 *maskp = val;
2094 return ST_CONTINUE;
2095 }
2096
2097 if (RTEST(*maskp)) {
2098 if (!RB_TYPE_P(*maskp, T_HASH)) {
2099 VALUE prev = *maskp;
2100 *maskp = rb_ident_hash_new();
2101 if (SYMBOL_P(prev)) {
2102 rb_hash_aset(*maskp, rb_eException, prev);
2103 }
2104 }
2105 rb_hash_aset(*maskp, key, val);
2106 }
2107 else {
2108 *maskp = Qfalse;
2109 }
2110
2111 return ST_CONTINUE;
2112}
2113
2114/*
2115 * call-seq:
2116 * Thread.handle_interrupt(hash) { ... } -> result of the block
2117 *
2118 * Changes asynchronous interrupt timing.
2119 *
2120 * _interrupt_ means asynchronous event and corresponding procedure
2121 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2122 * and main thread termination (if main thread terminates, then all
2123 * other thread will be killed).
2124 *
2125 * The given +hash+ has pairs like <code>ExceptionClass =>
2126 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2127 * the given block. The TimingSymbol can be one of the following symbols:
2128 *
2129 * [+:immediate+] Invoke interrupts immediately.
2130 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2131 * [+:never+] Never invoke all interrupts.
2132 *
2133 * _BlockingOperation_ means that the operation will block the calling thread,
2134 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2135 * operation executed without GVL.
2136 *
2137 * Masked asynchronous interrupts are delayed until they are enabled.
2138 * This method is similar to sigprocmask(3).
2139 *
2140 * === NOTE
2141 *
2142 * Asynchronous interrupts are difficult to use.
2143 *
2144 * If you need to communicate between threads, please consider to use another way such as Queue.
2145 *
2146 * Or use them with deep understanding about this method.
2147 *
2148 * === Usage
2149 *
2150 * In this example, we can guard from Thread#raise exceptions.
2151 *
2152 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2153 * ignored in the first block of the main thread. In the second
2154 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2155 *
2156 * th = Thread.new do
2157 * Thread.handle_interrupt(RuntimeError => :never) {
2158 * begin
2159 * # You can write resource allocation code safely.
2160 * Thread.handle_interrupt(RuntimeError => :immediate) {
2161 * # ...
2162 * }
2163 * ensure
2164 * # You can write resource deallocation code safely.
2165 * end
2166 * }
2167 * end
2168 * Thread.pass
2169 * # ...
2170 * th.raise "stop"
2171 *
2172 * While we are ignoring the RuntimeError exception, it's safe to write our
2173 * resource allocation code. Then, the ensure block is where we can safely
2174 * deallocate your resources.
2175 *
2176 * ==== Guarding from Timeout::Error
2177 *
2178 * In the next example, we will guard from the Timeout::Error exception. This
2179 * will help prevent from leaking resources when Timeout::Error exceptions occur
2180 * during normal ensure clause. For this example we use the help of the
2181 * standard library Timeout, from lib/timeout.rb
2182 *
2183 * require 'timeout'
2184 * Thread.handle_interrupt(Timeout::Error => :never) {
2185 * timeout(10){
2186 * # Timeout::Error doesn't occur here
2187 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2188 * # possible to be killed by Timeout::Error
2189 * # while blocking operation
2190 * }
2191 * # Timeout::Error doesn't occur here
2192 * }
2193 * }
2194 *
2195 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2196 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2197 * operation that will block the calling thread is susceptible to a
2198 * Timeout::Error exception being raised.
2199 *
2200 * ==== Stack control settings
2201 *
2202 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2203 * to control more than one ExceptionClass and TimingSymbol at a time.
2204 *
2205 * Thread.handle_interrupt(FooError => :never) {
2206 * Thread.handle_interrupt(BarError => :never) {
2207 * # FooError and BarError are prohibited.
2208 * }
2209 * }
2210 *
2211 * ==== Inheritance with ExceptionClass
2212 *
2213 * All exceptions inherited from the ExceptionClass parameter will be considered.
2214 *
2215 * Thread.handle_interrupt(Exception => :never) {
2216 * # all exceptions inherited from Exception are prohibited.
2217 * }
2218 *
2219 * For handling all interrupts, use +Object+ and not +Exception+
2220 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2221 */
2222static VALUE
2223rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2224{
2225 VALUE mask = Qundef;
2226 rb_execution_context_t * volatile ec = GET_EC();
2227 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2228 volatile VALUE r = Qnil;
2229 enum ruby_tag_type state;
2230
2231 if (!rb_block_given_p()) {
2232 rb_raise(rb_eArgError, "block is needed.");
2233 }
2234
2235 mask_arg = rb_to_hash_type(mask_arg);
2236
2237 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2238 mask = Qnil;
2239 }
2240
2241 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2242
2243 if (UNDEF_P(mask)) {
2244 return rb_yield(Qnil);
2245 }
2246
2247 if (!RTEST(mask)) {
2248 mask = mask_arg;
2249 }
2250 else if (RB_TYPE_P(mask, T_HASH)) {
2251 OBJ_FREEZE_RAW(mask);
2252 }
2253
2254 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2255 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2256 th->pending_interrupt_queue_checked = 0;
2257 RUBY_VM_SET_INTERRUPT(th->ec);
2258 }
2259
2260 EC_PUSH_TAG(th->ec);
2261 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2262 r = rb_yield(Qnil);
2263 }
2264 EC_POP_TAG();
2265
2266 rb_ary_pop(th->pending_interrupt_mask_stack);
2267 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2268 th->pending_interrupt_queue_checked = 0;
2269 RUBY_VM_SET_INTERRUPT(th->ec);
2270 }
2271
2272 RUBY_VM_CHECK_INTS(th->ec);
2273
2274 if (state) {
2275 EC_JUMP_TAG(th->ec, state);
2276 }
2277
2278 return r;
2279}
2280
2281/*
2282 * call-seq:
2283 * target_thread.pending_interrupt?(error = nil) -> true/false
2284 *
2285 * Returns whether or not the asynchronous queue is empty for the target thread.
2286 *
2287 * If +error+ is given, then check only for +error+ type deferred events.
2288 *
2289 * See ::pending_interrupt? for more information.
2290 */
2291static VALUE
2292rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2293{
2294 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2295
2296 if (!target_th->pending_interrupt_queue) {
2297 return Qfalse;
2298 }
2299 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2300 return Qfalse;
2301 }
2302 if (rb_check_arity(argc, 0, 1)) {
2303 VALUE err = argv[0];
2304 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2305 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2306 }
2307 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2308 }
2309 else {
2310 return Qtrue;
2311 }
2312}
2313
2314/*
2315 * call-seq:
2316 * Thread.pending_interrupt?(error = nil) -> true/false
2317 *
2318 * Returns whether or not the asynchronous queue is empty.
2319 *
2320 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2321 * this method can be used to determine if there are any deferred events.
2322 *
2323 * If you find this method returns true, then you may finish +:never+ blocks.
2324 *
2325 * For example, the following method processes deferred asynchronous events
2326 * immediately.
2327 *
2328 * def Thread.kick_interrupt_immediately
2329 * Thread.handle_interrupt(Object => :immediate) {
2330 * Thread.pass
2331 * }
2332 * end
2333 *
2334 * If +error+ is given, then check only for +error+ type deferred events.
2335 *
2336 * === Usage
2337 *
2338 * th = Thread.new{
2339 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2340 * while true
2341 * ...
2342 * # reach safe point to invoke interrupt
2343 * if Thread.pending_interrupt?
2344 * Thread.handle_interrupt(Object => :immediate){}
2345 * end
2346 * ...
2347 * end
2348 * }
2349 * }
2350 * ...
2351 * th.raise # stop thread
2352 *
2353 * This example can also be written as the following, which you should use to
2354 * avoid asynchronous interrupts.
2355 *
2356 * flag = true
2357 * th = Thread.new{
2358 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2359 * while true
2360 * ...
2361 * # reach safe point to invoke interrupt
2362 * break if flag == false
2363 * ...
2364 * end
2365 * }
2366 * }
2367 * ...
2368 * flag = false # stop thread
2369 */
2370
2371static VALUE
2372rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2373{
2374 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2375}
2376
2377NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2378
2379static void
2380rb_threadptr_to_kill(rb_thread_t *th)
2381{
2382 rb_threadptr_pending_interrupt_clear(th);
2383 th->status = THREAD_RUNNABLE;
2384 th->to_kill = 1;
2385 th->ec->errinfo = INT2FIX(TAG_FATAL);
2386 EC_JUMP_TAG(th->ec, TAG_FATAL);
2387}
2388
2389static inline rb_atomic_t
2390threadptr_get_interrupts(rb_thread_t *th)
2391{
2392 rb_execution_context_t *ec = th->ec;
2393 rb_atomic_t interrupt;
2394 rb_atomic_t old;
2395
2396 do {
2397 interrupt = ec->interrupt_flag;
2398 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2399 } while (old != interrupt);
2400 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2401}
2402
2403int
2404rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2405{
2406 rb_atomic_t interrupt;
2407 int postponed_job_interrupt = 0;
2408 int ret = FALSE;
2409
2410 if (th->ec->raised_flag) return ret;
2411
2412 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2413 int sig;
2414 int timer_interrupt;
2415 int pending_interrupt;
2416 int trap_interrupt;
2417 int terminate_interrupt;
2418
2419 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2420 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2421 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2422 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2423 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2424
2425 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2426 RB_VM_LOCK_ENTER();
2427 RB_VM_LOCK_LEAVE();
2428 }
2429
2430 if (postponed_job_interrupt) {
2431 rb_postponed_job_flush(th->vm);
2432 }
2433
2434 /* signal handling */
2435 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2436 enum rb_thread_status prev_status = th->status;
2437
2438 th->status = THREAD_RUNNABLE;
2439 {
2440 while ((sig = rb_get_next_signal()) != 0) {
2441 ret |= rb_signal_exec(th, sig);
2442 }
2443 }
2444 th->status = prev_status;
2445 }
2446
2447 /* exception from another thread */
2448 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2449 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2450 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2451 ret = TRUE;
2452
2453 if (UNDEF_P(err)) {
2454 /* no error */
2455 }
2456 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2457 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2458 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2459 terminate_interrupt = 1;
2460 }
2461 else {
2462 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2463 /* the only special exception to be queued across thread */
2464 err = ruby_vm_special_exception_copy(err);
2465 }
2466 /* set runnable if th was slept. */
2467 if (th->status == THREAD_STOPPED ||
2468 th->status == THREAD_STOPPED_FOREVER)
2469 th->status = THREAD_RUNNABLE;
2470 rb_exc_raise(err);
2471 }
2472 }
2473
2474 if (terminate_interrupt) {
2475 rb_threadptr_to_kill(th);
2476 }
2477
2478 if (timer_interrupt) {
2479 uint32_t limits_us = TIME_QUANTUM_USEC;
2480
2481 if (th->priority > 0)
2482 limits_us <<= th->priority;
2483 else
2484 limits_us >>= -th->priority;
2485
2486 if (th->status == THREAD_RUNNABLE)
2487 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2488
2489 VM_ASSERT(th->ec->cfp);
2490 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2491 0, 0, 0, Qundef);
2492
2493 rb_thread_schedule_limits(limits_us);
2494 }
2495 }
2496 return ret;
2497}
2498
2499void
2500rb_thread_execute_interrupts(VALUE thval)
2501{
2502 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2503}
2504
2505static void
2506rb_threadptr_ready(rb_thread_t *th)
2507{
2508 rb_threadptr_interrupt(th);
2509}
2510
2511static VALUE
2512rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2513{
2514 VALUE exc;
2515
2516 if (rb_threadptr_dead(target_th)) {
2517 return Qnil;
2518 }
2519
2520 if (argc == 0) {
2521 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2522 }
2523 else {
2524 exc = rb_make_exception(argc, argv);
2525 }
2526
2527 /* making an exception object can switch thread,
2528 so we need to check thread deadness again */
2529 if (rb_threadptr_dead(target_th)) {
2530 return Qnil;
2531 }
2532
2533 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2534 rb_threadptr_pending_interrupt_enque(target_th, exc);
2535 rb_threadptr_interrupt(target_th);
2536 return Qnil;
2537}
2538
2539void
2540rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2541{
2542 VALUE argv[2];
2543
2544 argv[0] = rb_eSignal;
2545 argv[1] = INT2FIX(sig);
2546 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2547}
2548
2549void
2550rb_threadptr_signal_exit(rb_thread_t *th)
2551{
2552 VALUE argv[2];
2553
2554 argv[0] = rb_eSystemExit;
2555 argv[1] = rb_str_new2("exit");
2556
2557 // TODO: check signal raise deliverly
2558 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2559}
2560
2561int
2562rb_ec_set_raised(rb_execution_context_t *ec)
2563{
2564 if (ec->raised_flag & RAISED_EXCEPTION) {
2565 return 1;
2566 }
2567 ec->raised_flag |= RAISED_EXCEPTION;
2568 return 0;
2569}
2570
2571int
2572rb_ec_reset_raised(rb_execution_context_t *ec)
2573{
2574 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2575 return 0;
2576 }
2577 ec->raised_flag &= ~RAISED_EXCEPTION;
2578 return 1;
2579}
2580
2581int
2582rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2583{
2584 rb_vm_t *vm = GET_THREAD()->vm;
2585 struct waiting_fd *wfd = 0, *next;
2586 ccan_list_head_init(&busy->pending_fd_users);
2587 int has_any;
2588 VALUE wakeup_mutex;
2589
2590 RB_VM_LOCK_ENTER();
2591 {
2592 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2593 if (wfd->fd == fd) {
2594 rb_thread_t *th = wfd->th;
2595 VALUE err;
2596
2597 ccan_list_del(&wfd->wfd_node);
2598 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2599
2600 wfd->busy = busy;
2601 err = th->vm->special_exceptions[ruby_error_stream_closed];
2602 rb_threadptr_pending_interrupt_enque(th, err);
2603 rb_threadptr_interrupt(th);
2604 }
2605 }
2606 }
2607
2608 has_any = !ccan_list_empty(&busy->pending_fd_users);
2609 busy->closing_thread = rb_thread_current();
2610 wakeup_mutex = Qnil;
2611 if (has_any) {
2612 wakeup_mutex = rb_mutex_new();
2613 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2614 }
2615 busy->wakeup_mutex = wakeup_mutex;
2616
2617 RB_VM_LOCK_LEAVE();
2618
2619 /* If the caller didn't pass *busy as a pointer to something on the stack,
2620 we need to guard this mutex object on _our_ C stack for the duration
2621 of this function. */
2622 RB_GC_GUARD(wakeup_mutex);
2623 return has_any;
2624}
2625
2626void
2627rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2628{
2629 if (!RB_TEST(busy->wakeup_mutex)) {
2630 /* There was nobody else using this file when we closed it, so we
2631 never bothered to allocate a mutex*/
2632 return;
2633 }
2634
2635 rb_mutex_lock(busy->wakeup_mutex);
2636 while (!ccan_list_empty(&busy->pending_fd_users)) {
2637 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2638 }
2639 rb_mutex_unlock(busy->wakeup_mutex);
2640}
2641
2642void
2643rb_thread_fd_close(int fd)
2644{
2645 struct rb_io_close_wait_list busy;
2646
2647 if (rb_notify_fd_close(fd, &busy)) {
2648 rb_notify_fd_close_wait(&busy);
2649 }
2650}
2651
2652/*
2653 * call-seq:
2654 * thr.raise
2655 * thr.raise(string)
2656 * thr.raise(exception [, string [, array]])
2657 *
2658 * Raises an exception from the given thread. The caller does not have to be
2659 * +thr+. See Kernel#raise for more information.
2660 *
2661 * Thread.abort_on_exception = true
2662 * a = Thread.new { sleep(200) }
2663 * a.raise("Gotcha")
2664 *
2665 * This will produce:
2666 *
2667 * prog.rb:3: Gotcha (RuntimeError)
2668 * from prog.rb:2:in `initialize'
2669 * from prog.rb:2:in `new'
2670 * from prog.rb:2
2671 */
2672
2673static VALUE
2674thread_raise_m(int argc, VALUE *argv, VALUE self)
2675{
2676 rb_thread_t *target_th = rb_thread_ptr(self);
2677 const rb_thread_t *current_th = GET_THREAD();
2678
2679 threadptr_check_pending_interrupt_queue(target_th);
2680 rb_threadptr_raise(target_th, argc, argv);
2681
2682 /* To perform Thread.current.raise as Kernel.raise */
2683 if (current_th == target_th) {
2684 RUBY_VM_CHECK_INTS(target_th->ec);
2685 }
2686 return Qnil;
2687}
2688
2689
2690/*
2691 * call-seq:
2692 * thr.exit -> thr
2693 * thr.kill -> thr
2694 * thr.terminate -> thr
2695 *
2696 * Terminates +thr+ and schedules another thread to be run, returning
2697 * the terminated Thread. If this is the main thread, or the last
2698 * thread, exits the process.
2699 */
2700
2702rb_thread_kill(VALUE thread)
2703{
2704 rb_thread_t *target_th = rb_thread_ptr(thread);
2705
2706 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2707 return thread;
2708 }
2709 if (target_th == target_th->vm->ractor.main_thread) {
2710 rb_exit(EXIT_SUCCESS);
2711 }
2712
2713 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2714
2715 if (target_th == GET_THREAD()) {
2716 /* kill myself immediately */
2717 rb_threadptr_to_kill(target_th);
2718 }
2719 else {
2720 threadptr_check_pending_interrupt_queue(target_th);
2721 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2722 rb_threadptr_interrupt(target_th);
2723 }
2724
2725 return thread;
2726}
2727
2728int
2729rb_thread_to_be_killed(VALUE thread)
2730{
2731 rb_thread_t *target_th = rb_thread_ptr(thread);
2732
2733 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2734 return TRUE;
2735 }
2736 return FALSE;
2737}
2738
2739/*
2740 * call-seq:
2741 * Thread.kill(thread) -> thread
2742 *
2743 * Causes the given +thread+ to exit, see also Thread::exit.
2744 *
2745 * count = 0
2746 * a = Thread.new { loop { count += 1 } }
2747 * sleep(0.1) #=> 0
2748 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2749 * count #=> 93947
2750 * a.alive? #=> false
2751 */
2752
2753static VALUE
2754rb_thread_s_kill(VALUE obj, VALUE th)
2755{
2756 return rb_thread_kill(th);
2757}
2758
2759
2760/*
2761 * call-seq:
2762 * Thread.exit -> thread
2763 *
2764 * Terminates the currently running thread and schedules another thread to be
2765 * run.
2766 *
2767 * If this thread is already marked to be killed, ::exit returns the Thread.
2768 *
2769 * If this is the main thread, or the last thread, exit the process.
2770 */
2771
2772static VALUE
2773rb_thread_exit(VALUE _)
2774{
2775 rb_thread_t *th = GET_THREAD();
2776 return rb_thread_kill(th->self);
2777}
2778
2779
2780/*
2781 * call-seq:
2782 * thr.wakeup -> thr
2783 *
2784 * Marks a given thread as eligible for scheduling, however it may still
2785 * remain blocked on I/O.
2786 *
2787 * *Note:* This does not invoke the scheduler, see #run for more information.
2788 *
2789 * c = Thread.new { Thread.stop; puts "hey!" }
2790 * sleep 0.1 while c.status!='sleep'
2791 * c.wakeup
2792 * c.join
2793 * #=> "hey!"
2794 */
2795
2797rb_thread_wakeup(VALUE thread)
2798{
2799 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2800 rb_raise(rb_eThreadError, "killed thread");
2801 }
2802 return thread;
2803}
2804
2807{
2808 rb_thread_t *target_th = rb_thread_ptr(thread);
2809 if (target_th->status == THREAD_KILLED) return Qnil;
2810
2811 rb_threadptr_ready(target_th);
2812
2813 if (target_th->status == THREAD_STOPPED ||
2814 target_th->status == THREAD_STOPPED_FOREVER) {
2815 target_th->status = THREAD_RUNNABLE;
2816 }
2817
2818 return thread;
2819}
2820
2821
2822/*
2823 * call-seq:
2824 * thr.run -> thr
2825 *
2826 * Wakes up +thr+, making it eligible for scheduling.
2827 *
2828 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2829 * sleep 0.1 while a.status!='sleep'
2830 * puts "Got here"
2831 * a.run
2832 * a.join
2833 *
2834 * This will produce:
2835 *
2836 * a
2837 * Got here
2838 * c
2839 *
2840 * See also the instance method #wakeup.
2841 */
2842
2844rb_thread_run(VALUE thread)
2845{
2846 rb_thread_wakeup(thread);
2848 return thread;
2849}
2850
2851
2853rb_thread_stop(void)
2854{
2855 if (rb_thread_alone()) {
2856 rb_raise(rb_eThreadError,
2857 "stopping only thread\n\tnote: use sleep to stop forever");
2858 }
2860 return Qnil;
2861}
2862
2863/*
2864 * call-seq:
2865 * Thread.stop -> nil
2866 *
2867 * Stops execution of the current thread, putting it into a ``sleep'' state,
2868 * and schedules execution of another thread.
2869 *
2870 * a = Thread.new { print "a"; Thread.stop; print "c" }
2871 * sleep 0.1 while a.status!='sleep'
2872 * print "b"
2873 * a.run
2874 * a.join
2875 * #=> "abc"
2876 */
2877
2878static VALUE
2879thread_stop(VALUE _)
2880{
2881 return rb_thread_stop();
2882}
2883
2884/********************************************************************/
2885
2886VALUE
2887rb_thread_list(void)
2888{
2889 // TODO
2890 return rb_ractor_thread_list();
2891}
2892
2893/*
2894 * call-seq:
2895 * Thread.list -> array
2896 *
2897 * Returns an array of Thread objects for all threads that are either runnable
2898 * or stopped.
2899 *
2900 * Thread.new { sleep(200) }
2901 * Thread.new { 1000000.times {|i| i*i } }
2902 * Thread.new { Thread.stop }
2903 * Thread.list.each {|t| p t}
2904 *
2905 * This will produce:
2906 *
2907 * #<Thread:0x401b3e84 sleep>
2908 * #<Thread:0x401b3f38 run>
2909 * #<Thread:0x401b3fb0 sleep>
2910 * #<Thread:0x401bdf4c run>
2911 */
2912
2913static VALUE
2914thread_list(VALUE _)
2915{
2916 return rb_thread_list();
2917}
2918
2921{
2922 return GET_THREAD()->self;
2923}
2924
2925/*
2926 * call-seq:
2927 * Thread.current -> thread
2928 *
2929 * Returns the currently executing thread.
2930 *
2931 * Thread.current #=> #<Thread:0x401bdf4c run>
2932 */
2933
2934static VALUE
2935thread_s_current(VALUE klass)
2936{
2937 return rb_thread_current();
2938}
2939
2941rb_thread_main(void)
2942{
2943 return GET_RACTOR()->threads.main->self;
2944}
2945
2946/*
2947 * call-seq:
2948 * Thread.main -> thread
2949 *
2950 * Returns the main thread.
2951 */
2952
2953static VALUE
2954rb_thread_s_main(VALUE klass)
2955{
2956 return rb_thread_main();
2957}
2958
2959
2960/*
2961 * call-seq:
2962 * Thread.abort_on_exception -> true or false
2963 *
2964 * Returns the status of the global ``abort on exception'' condition.
2965 *
2966 * The default is +false+.
2967 *
2968 * When set to +true+, if any thread is aborted by an exception, the
2969 * raised exception will be re-raised in the main thread.
2970 *
2971 * Can also be specified by the global $DEBUG flag or command line option
2972 * +-d+.
2973 *
2974 * See also ::abort_on_exception=.
2975 *
2976 * There is also an instance level method to set this for a specific thread,
2977 * see #abort_on_exception.
2978 */
2979
2980static VALUE
2981rb_thread_s_abort_exc(VALUE _)
2982{
2983 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2984}
2985
2986
2987/*
2988 * call-seq:
2989 * Thread.abort_on_exception= boolean -> true or false
2990 *
2991 * When set to +true+, if any thread is aborted by an exception, the
2992 * raised exception will be re-raised in the main thread.
2993 * Returns the new state.
2994 *
2995 * Thread.abort_on_exception = true
2996 * t1 = Thread.new do
2997 * puts "In new thread"
2998 * raise "Exception from thread"
2999 * end
3000 * sleep(1)
3001 * puts "not reached"
3002 *
3003 * This will produce:
3004 *
3005 * In new thread
3006 * prog.rb:4: Exception from thread (RuntimeError)
3007 * from prog.rb:2:in `initialize'
3008 * from prog.rb:2:in `new'
3009 * from prog.rb:2
3010 *
3011 * See also ::abort_on_exception.
3012 *
3013 * There is also an instance level method to set this for a specific thread,
3014 * see #abort_on_exception=.
3015 */
3016
3017static VALUE
3018rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3019{
3020 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3021 return val;
3022}
3023
3024
3025/*
3026 * call-seq:
3027 * thr.abort_on_exception -> true or false
3028 *
3029 * Returns the status of the thread-local ``abort on exception'' condition for
3030 * this +thr+.
3031 *
3032 * The default is +false+.
3033 *
3034 * See also #abort_on_exception=.
3035 *
3036 * There is also a class level method to set this for all threads, see
3037 * ::abort_on_exception.
3038 */
3039
3040static VALUE
3041rb_thread_abort_exc(VALUE thread)
3042{
3043 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3044}
3045
3046
3047/*
3048 * call-seq:
3049 * thr.abort_on_exception= boolean -> true or false
3050 *
3051 * When set to +true+, if this +thr+ is aborted by an exception, the
3052 * raised exception will be re-raised in the main thread.
3053 *
3054 * See also #abort_on_exception.
3055 *
3056 * There is also a class level method to set this for all threads, see
3057 * ::abort_on_exception=.
3058 */
3059
3060static VALUE
3061rb_thread_abort_exc_set(VALUE thread, VALUE val)
3062{
3063 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3064 return val;
3065}
3066
3067
3068/*
3069 * call-seq:
3070 * Thread.report_on_exception -> true or false
3071 *
3072 * Returns the status of the global ``report on exception'' condition.
3073 *
3074 * The default is +true+ since Ruby 2.5.
3075 *
3076 * All threads created when this flag is true will report
3077 * a message on $stderr if an exception kills the thread.
3078 *
3079 * Thread.new { 1.times { raise } }
3080 *
3081 * will produce this output on $stderr:
3082 *
3083 * #<Thread:...> terminated with exception (report_on_exception is true):
3084 * Traceback (most recent call last):
3085 * 2: from -e:1:in `block in <main>'
3086 * 1: from -e:1:in `times'
3087 *
3088 * This is done to catch errors in threads early.
3089 * In some cases, you might not want this output.
3090 * There are multiple ways to avoid the extra output:
3091 *
3092 * * If the exception is not intended, the best is to fix the cause of
3093 * the exception so it does not happen anymore.
3094 * * If the exception is intended, it might be better to rescue it closer to
3095 * where it is raised rather then let it kill the Thread.
3096 * * If it is guaranteed the Thread will be joined with Thread#join or
3097 * Thread#value, then it is safe to disable this report with
3098 * <code>Thread.current.report_on_exception = false</code>
3099 * when starting the Thread.
3100 * However, this might handle the exception much later, or not at all
3101 * if the Thread is never joined due to the parent thread being blocked, etc.
3102 *
3103 * See also ::report_on_exception=.
3104 *
3105 * There is also an instance level method to set this for a specific thread,
3106 * see #report_on_exception=.
3107 *
3108 */
3109
3110static VALUE
3111rb_thread_s_report_exc(VALUE _)
3112{
3113 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3114}
3115
3116
3117/*
3118 * call-seq:
3119 * Thread.report_on_exception= boolean -> true or false
3120 *
3121 * Returns the new state.
3122 * When set to +true+, all threads created afterwards will inherit the
3123 * condition and report a message on $stderr if an exception kills a thread:
3124 *
3125 * Thread.report_on_exception = true
3126 * t1 = Thread.new do
3127 * puts "In new thread"
3128 * raise "Exception from thread"
3129 * end
3130 * sleep(1)
3131 * puts "In the main thread"
3132 *
3133 * This will produce:
3134 *
3135 * In new thread
3136 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3137 * Traceback (most recent call last):
3138 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3139 * In the main thread
3140 *
3141 * See also ::report_on_exception.
3142 *
3143 * There is also an instance level method to set this for a specific thread,
3144 * see #report_on_exception=.
3145 */
3146
3147static VALUE
3148rb_thread_s_report_exc_set(VALUE self, VALUE val)
3149{
3150 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3151 return val;
3152}
3153
3154
3155/*
3156 * call-seq:
3157 * Thread.ignore_deadlock -> true or false
3158 *
3159 * Returns the status of the global ``ignore deadlock'' condition.
3160 * The default is +false+, so that deadlock conditions are not ignored.
3161 *
3162 * See also ::ignore_deadlock=.
3163 *
3164 */
3165
3166static VALUE
3167rb_thread_s_ignore_deadlock(VALUE _)
3168{
3169 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3170}
3171
3172
3173/*
3174 * call-seq:
3175 * Thread.ignore_deadlock = boolean -> true or false
3176 *
3177 * Returns the new state.
3178 * When set to +true+, the VM will not check for deadlock conditions.
3179 * It is only useful to set this if your application can break a
3180 * deadlock condition via some other means, such as a signal.
3181 *
3182 * Thread.ignore_deadlock = true
3183 * queue = Thread::Queue.new
3184 *
3185 * trap(:SIGUSR1){queue.push "Received signal"}
3186 *
3187 * # raises fatal error unless ignoring deadlock
3188 * puts queue.pop
3189 *
3190 * See also ::ignore_deadlock.
3191 */
3192
3193static VALUE
3194rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3195{
3196 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3197 return val;
3198}
3199
3200
3201/*
3202 * call-seq:
3203 * thr.report_on_exception -> true or false
3204 *
3205 * Returns the status of the thread-local ``report on exception'' condition for
3206 * this +thr+.
3207 *
3208 * The default value when creating a Thread is the value of
3209 * the global flag Thread.report_on_exception.
3210 *
3211 * See also #report_on_exception=.
3212 *
3213 * There is also a class level method to set this for all new threads, see
3214 * ::report_on_exception=.
3215 */
3216
3217static VALUE
3218rb_thread_report_exc(VALUE thread)
3219{
3220 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3221}
3222
3223
3224/*
3225 * call-seq:
3226 * thr.report_on_exception= boolean -> true or false
3227 *
3228 * When set to +true+, a message is printed on $stderr if an exception
3229 * kills this +thr+. See ::report_on_exception for details.
3230 *
3231 * See also #report_on_exception.
3232 *
3233 * There is also a class level method to set this for all new threads, see
3234 * ::report_on_exception=.
3235 */
3236
3237static VALUE
3238rb_thread_report_exc_set(VALUE thread, VALUE val)
3239{
3240 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3241 return val;
3242}
3243
3244
3245/*
3246 * call-seq:
3247 * thr.group -> thgrp or nil
3248 *
3249 * Returns the ThreadGroup which contains the given thread.
3250 *
3251 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3252 */
3253
3254VALUE
3255rb_thread_group(VALUE thread)
3256{
3257 return rb_thread_ptr(thread)->thgroup;
3258}
3259
3260static const char *
3261thread_status_name(rb_thread_t *th, int detail)
3262{
3263 switch (th->status) {
3264 case THREAD_RUNNABLE:
3265 return th->to_kill ? "aborting" : "run";
3266 case THREAD_STOPPED_FOREVER:
3267 if (detail) return "sleep_forever";
3268 case THREAD_STOPPED:
3269 return "sleep";
3270 case THREAD_KILLED:
3271 return "dead";
3272 default:
3273 return "unknown";
3274 }
3275}
3276
3277static int
3278rb_threadptr_dead(rb_thread_t *th)
3279{
3280 return th->status == THREAD_KILLED;
3281}
3282
3283
3284/*
3285 * call-seq:
3286 * thr.status -> string, false or nil
3287 *
3288 * Returns the status of +thr+.
3289 *
3290 * [<tt>"sleep"</tt>]
3291 * Returned if this thread is sleeping or waiting on I/O
3292 * [<tt>"run"</tt>]
3293 * When this thread is executing
3294 * [<tt>"aborting"</tt>]
3295 * If this thread is aborting
3296 * [+false+]
3297 * When this thread is terminated normally
3298 * [+nil+]
3299 * If terminated with an exception.
3300 *
3301 * a = Thread.new { raise("die now") }
3302 * b = Thread.new { Thread.stop }
3303 * c = Thread.new { Thread.exit }
3304 * d = Thread.new { sleep }
3305 * d.kill #=> #<Thread:0x401b3678 aborting>
3306 * a.status #=> nil
3307 * b.status #=> "sleep"
3308 * c.status #=> false
3309 * d.status #=> "aborting"
3310 * Thread.current.status #=> "run"
3311 *
3312 * See also the instance methods #alive? and #stop?
3313 */
3314
3315static VALUE
3316rb_thread_status(VALUE thread)
3317{
3318 rb_thread_t *target_th = rb_thread_ptr(thread);
3319
3320 if (rb_threadptr_dead(target_th)) {
3321 if (!NIL_P(target_th->ec->errinfo) &&
3322 !FIXNUM_P(target_th->ec->errinfo)) {
3323 return Qnil;
3324 }
3325 else {
3326 return Qfalse;
3327 }
3328 }
3329 else {
3330 return rb_str_new2(thread_status_name(target_th, FALSE));
3331 }
3332}
3333
3334
3335/*
3336 * call-seq:
3337 * thr.alive? -> true or false
3338 *
3339 * Returns +true+ if +thr+ is running or sleeping.
3340 *
3341 * thr = Thread.new { }
3342 * thr.join #=> #<Thread:0x401b3fb0 dead>
3343 * Thread.current.alive? #=> true
3344 * thr.alive? #=> false
3345 *
3346 * See also #stop? and #status.
3347 */
3348
3349static VALUE
3350rb_thread_alive_p(VALUE thread)
3351{
3352 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3353}
3354
3355/*
3356 * call-seq:
3357 * thr.stop? -> true or false
3358 *
3359 * Returns +true+ if +thr+ is dead or sleeping.
3360 *
3361 * a = Thread.new { Thread.stop }
3362 * b = Thread.current
3363 * a.stop? #=> true
3364 * b.stop? #=> false
3365 *
3366 * See also #alive? and #status.
3367 */
3368
3369static VALUE
3370rb_thread_stop_p(VALUE thread)
3371{
3372 rb_thread_t *th = rb_thread_ptr(thread);
3373
3374 if (rb_threadptr_dead(th)) {
3375 return Qtrue;
3376 }
3377 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3378}
3379
3380/*
3381 * call-seq:
3382 * thr.name -> string
3383 *
3384 * show the name of the thread.
3385 */
3386
3387static VALUE
3388rb_thread_getname(VALUE thread)
3389{
3390 return rb_thread_ptr(thread)->name;
3391}
3392
3393/*
3394 * call-seq:
3395 * thr.name=(name) -> string
3396 *
3397 * set given name to the ruby thread.
3398 * On some platform, it may set the name to pthread and/or kernel.
3399 */
3400
3401static VALUE
3402rb_thread_setname(VALUE thread, VALUE name)
3403{
3404 rb_thread_t *target_th = rb_thread_ptr(thread);
3405
3406 if (!NIL_P(name)) {
3407 rb_encoding *enc;
3408 StringValueCStr(name);
3409 enc = rb_enc_get(name);
3410 if (!rb_enc_asciicompat(enc)) {
3411 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3412 rb_enc_name(enc));
3413 }
3414 name = rb_str_new_frozen(name);
3415 }
3416 target_th->name = name;
3417 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3418 native_set_another_thread_name(target_th->nt->thread_id, name);
3419 }
3420 return name;
3421}
3422
3423#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3424/*
3425 * call-seq:
3426 * thr.native_thread_id -> integer
3427 *
3428 * Return the native thread ID which is used by the Ruby thread.
3429 *
3430 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3431 * * On Linux it is TID returned by gettid(2).
3432 * * On macOS it is the system-wide unique integral ID of thread returned
3433 * by pthread_threadid_np(3).
3434 * * On FreeBSD it is the unique integral ID of the thread returned by
3435 * pthread_getthreadid_np(3).
3436 * * On Windows it is the thread identifier returned by GetThreadId().
3437 * * On other platforms, it raises NotImplementedError.
3438 *
3439 * NOTE:
3440 * If the thread is not associated yet or already deassociated with a native
3441 * thread, it returns _nil_.
3442 * If the Ruby implementation uses M:N thread model, the ID may change
3443 * depending on the timing.
3444 */
3445
3446static VALUE
3447rb_thread_native_thread_id(VALUE thread)
3448{
3449 rb_thread_t *target_th = rb_thread_ptr(thread);
3450 if (rb_threadptr_dead(target_th)) return Qnil;
3451 return native_thread_native_thread_id(target_th);
3452}
3453#else
3454# define rb_thread_native_thread_id rb_f_notimplement
3455#endif
3456
3457/*
3458 * call-seq:
3459 * thr.to_s -> string
3460 *
3461 * Dump the name, id, and status of _thr_ to a string.
3462 */
3463
3464static VALUE
3465rb_thread_to_s(VALUE thread)
3466{
3467 VALUE cname = rb_class_path(rb_obj_class(thread));
3468 rb_thread_t *target_th = rb_thread_ptr(thread);
3469 const char *status;
3470 VALUE str, loc;
3471
3472 status = thread_status_name(target_th, TRUE);
3473 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3474 if (!NIL_P(target_th->name)) {
3475 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3476 }
3477 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3478 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3479 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3480 }
3481 rb_str_catf(str, " %s>", status);
3482
3483 return str;
3484}
3485
3486/* variables for recursive traversals */
3487#define recursive_key id__recursive_key__
3488
3489static VALUE
3490threadptr_local_aref(rb_thread_t *th, ID id)
3491{
3492 if (id == recursive_key) {
3493 return th->ec->local_storage_recursive_hash;
3494 }
3495 else {
3496 VALUE val;
3497 struct rb_id_table *local_storage = th->ec->local_storage;
3498
3499 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3500 return val;
3501 }
3502 else {
3503 return Qnil;
3504 }
3505 }
3506}
3507
3509rb_thread_local_aref(VALUE thread, ID id)
3510{
3511 return threadptr_local_aref(rb_thread_ptr(thread), id);
3512}
3513
3514/*
3515 * call-seq:
3516 * thr[sym] -> obj or nil
3517 *
3518 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3519 * if not explicitly inside a Fiber), using either a symbol or a string name.
3520 * If the specified variable does not exist, returns +nil+.
3521 *
3522 * [
3523 * Thread.new { Thread.current["name"] = "A" },
3524 * Thread.new { Thread.current[:name] = "B" },
3525 * Thread.new { Thread.current["name"] = "C" }
3526 * ].each do |th|
3527 * th.join
3528 * puts "#{th.inspect}: #{th[:name]}"
3529 * end
3530 *
3531 * This will produce:
3532 *
3533 * #<Thread:0x00000002a54220 dead>: A
3534 * #<Thread:0x00000002a541a8 dead>: B
3535 * #<Thread:0x00000002a54130 dead>: C
3536 *
3537 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3538 * This confusion did not exist in Ruby 1.8 because
3539 * fibers are only available since Ruby 1.9.
3540 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3541 * following idiom for dynamic scope.
3542 *
3543 * def meth(newvalue)
3544 * begin
3545 * oldvalue = Thread.current[:name]
3546 * Thread.current[:name] = newvalue
3547 * yield
3548 * ensure
3549 * Thread.current[:name] = oldvalue
3550 * end
3551 * end
3552 *
3553 * The idiom may not work as dynamic scope if the methods are thread-local
3554 * and a given block switches fiber.
3555 *
3556 * f = Fiber.new {
3557 * meth(1) {
3558 * Fiber.yield
3559 * }
3560 * }
3561 * meth(2) {
3562 * f.resume
3563 * }
3564 * f.resume
3565 * p Thread.current[:name]
3566 * #=> nil if fiber-local
3567 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3568 *
3569 * For thread-local variables, please see #thread_variable_get and
3570 * #thread_variable_set.
3571 *
3572 */
3573
3574static VALUE
3575rb_thread_aref(VALUE thread, VALUE key)
3576{
3577 ID id = rb_check_id(&key);
3578 if (!id) return Qnil;
3579 return rb_thread_local_aref(thread, id);
3580}
3581
3582/*
3583 * call-seq:
3584 * thr.fetch(sym) -> obj
3585 * thr.fetch(sym) { } -> obj
3586 * thr.fetch(sym, default) -> obj
3587 *
3588 * Returns a fiber-local for the given key. If the key can't be
3589 * found, there are several options: With no other arguments, it will
3590 * raise a KeyError exception; if <i>default</i> is given, then that
3591 * will be returned; if the optional code block is specified, then
3592 * that will be run and its result returned. See Thread#[] and
3593 * Hash#fetch.
3594 */
3595static VALUE
3596rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3597{
3598 VALUE key, val;
3599 ID id;
3600 rb_thread_t *target_th = rb_thread_ptr(self);
3601 int block_given;
3602
3603 rb_check_arity(argc, 1, 2);
3604 key = argv[0];
3605
3606 block_given = rb_block_given_p();
3607 if (block_given && argc == 2) {
3608 rb_warn("block supersedes default value argument");
3609 }
3610
3611 id = rb_check_id(&key);
3612
3613 if (id == recursive_key) {
3614 return target_th->ec->local_storage_recursive_hash;
3615 }
3616 else if (id && target_th->ec->local_storage &&
3617 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3618 return val;
3619 }
3620 else if (block_given) {
3621 return rb_yield(key);
3622 }
3623 else if (argc == 1) {
3624 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3625 }
3626 else {
3627 return argv[1];
3628 }
3629}
3630
3631static VALUE
3632threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3633{
3634 if (id == recursive_key) {
3635 th->ec->local_storage_recursive_hash = val;
3636 return val;
3637 }
3638 else {
3639 struct rb_id_table *local_storage = th->ec->local_storage;
3640
3641 if (NIL_P(val)) {
3642 if (!local_storage) return Qnil;
3643 rb_id_table_delete(local_storage, id);
3644 return Qnil;
3645 }
3646 else {
3647 if (local_storage == NULL) {
3648 th->ec->local_storage = local_storage = rb_id_table_create(0);
3649 }
3650 rb_id_table_insert(local_storage, id, val);
3651 return val;
3652 }
3653 }
3654}
3655
3657rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3658{
3659 if (OBJ_FROZEN(thread)) {
3660 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3661 }
3662
3663 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3664}
3665
3666/*
3667 * call-seq:
3668 * thr[sym] = obj -> obj
3669 *
3670 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3671 * using either a symbol or a string.
3672 *
3673 * See also Thread#[].
3674 *
3675 * For thread-local variables, please see #thread_variable_set and
3676 * #thread_variable_get.
3677 */
3678
3679static VALUE
3680rb_thread_aset(VALUE self, VALUE id, VALUE val)
3681{
3682 return rb_thread_local_aset(self, rb_to_id(id), val);
3683}
3684
3685/*
3686 * call-seq:
3687 * thr.thread_variable_get(key) -> obj or nil
3688 *
3689 * Returns the value of a thread local variable that has been set. Note that
3690 * these are different than fiber local values. For fiber local values,
3691 * please see Thread#[] and Thread#[]=.
3692 *
3693 * Thread local values are carried along with threads, and do not respect
3694 * fibers. For example:
3695 *
3696 * Thread.new {
3697 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3698 * Thread.current["foo"] = "bar" # set a fiber local
3699 *
3700 * Fiber.new {
3701 * Fiber.yield [
3702 * Thread.current.thread_variable_get("foo"), # get the thread local
3703 * Thread.current["foo"], # get the fiber local
3704 * ]
3705 * }.resume
3706 * }.join.value # => ['bar', nil]
3707 *
3708 * The value "bar" is returned for the thread local, where nil is returned
3709 * for the fiber local. The fiber is executed in the same thread, so the
3710 * thread local values are available.
3711 */
3712
3713static VALUE
3714rb_thread_variable_get(VALUE thread, VALUE key)
3715{
3716 VALUE locals;
3717
3718 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3719 return Qnil;
3720 }
3721 locals = rb_thread_local_storage(thread);
3722 return rb_hash_aref(locals, rb_to_symbol(key));
3723}
3724
3725/*
3726 * call-seq:
3727 * thr.thread_variable_set(key, value)
3728 *
3729 * Sets a thread local with +key+ to +value+. Note that these are local to
3730 * threads, and not to fibers. Please see Thread#thread_variable_get and
3731 * Thread#[] for more information.
3732 */
3733
3734static VALUE
3735rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3736{
3737 VALUE locals;
3738
3739 if (OBJ_FROZEN(thread)) {
3740 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3741 }
3742
3743 locals = rb_thread_local_storage(thread);
3744 return rb_hash_aset(locals, rb_to_symbol(key), val);
3745}
3746
3747/*
3748 * call-seq:
3749 * thr.key?(sym) -> true or false
3750 *
3751 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3752 * variable.
3753 *
3754 * me = Thread.current
3755 * me[:oliver] = "a"
3756 * me.key?(:oliver) #=> true
3757 * me.key?(:stanley) #=> false
3758 */
3759
3760static VALUE
3761rb_thread_key_p(VALUE self, VALUE key)
3762{
3763 VALUE val;
3764 ID id = rb_check_id(&key);
3765 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3766
3767 if (!id || local_storage == NULL) {
3768 return Qfalse;
3769 }
3770 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3771}
3772
3773static enum rb_id_table_iterator_result
3774thread_keys_i(ID key, VALUE value, void *ary)
3775{
3776 rb_ary_push((VALUE)ary, ID2SYM(key));
3777 return ID_TABLE_CONTINUE;
3778}
3779
3781rb_thread_alone(void)
3782{
3783 // TODO
3784 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3785}
3786
3787/*
3788 * call-seq:
3789 * thr.keys -> array
3790 *
3791 * Returns an array of the names of the fiber-local variables (as Symbols).
3792 *
3793 * thr = Thread.new do
3794 * Thread.current[:cat] = 'meow'
3795 * Thread.current["dog"] = 'woof'
3796 * end
3797 * thr.join #=> #<Thread:0x401b3f10 dead>
3798 * thr.keys #=> [:dog, :cat]
3799 */
3800
3801static VALUE
3802rb_thread_keys(VALUE self)
3803{
3804 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3805 VALUE ary = rb_ary_new();
3806
3807 if (local_storage) {
3808 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3809 }
3810 return ary;
3811}
3812
3813static int
3814keys_i(VALUE key, VALUE value, VALUE ary)
3815{
3816 rb_ary_push(ary, key);
3817 return ST_CONTINUE;
3818}
3819
3820/*
3821 * call-seq:
3822 * thr.thread_variables -> array
3823 *
3824 * Returns an array of the names of the thread-local variables (as Symbols).
3825 *
3826 * thr = Thread.new do
3827 * Thread.current.thread_variable_set(:cat, 'meow')
3828 * Thread.current.thread_variable_set("dog", 'woof')
3829 * end
3830 * thr.join #=> #<Thread:0x401b3f10 dead>
3831 * thr.thread_variables #=> [:dog, :cat]
3832 *
3833 * Note that these are not fiber local variables. Please see Thread#[] and
3834 * Thread#thread_variable_get for more details.
3835 */
3836
3837static VALUE
3838rb_thread_variables(VALUE thread)
3839{
3840 VALUE locals;
3841 VALUE ary;
3842
3843 ary = rb_ary_new();
3844 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3845 return ary;
3846 }
3847 locals = rb_thread_local_storage(thread);
3848 rb_hash_foreach(locals, keys_i, ary);
3849
3850 return ary;
3851}
3852
3853/*
3854 * call-seq:
3855 * thr.thread_variable?(key) -> true or false
3856 *
3857 * Returns +true+ if the given string (or symbol) exists as a thread-local
3858 * variable.
3859 *
3860 * me = Thread.current
3861 * me.thread_variable_set(:oliver, "a")
3862 * me.thread_variable?(:oliver) #=> true
3863 * me.thread_variable?(:stanley) #=> false
3864 *
3865 * Note that these are not fiber local variables. Please see Thread#[] and
3866 * Thread#thread_variable_get for more details.
3867 */
3868
3869static VALUE
3870rb_thread_variable_p(VALUE thread, VALUE key)
3871{
3872 VALUE locals;
3873
3874 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3875 return Qfalse;
3876 }
3877 locals = rb_thread_local_storage(thread);
3878
3879 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3880}
3881
3882/*
3883 * call-seq:
3884 * thr.priority -> integer
3885 *
3886 * Returns the priority of <i>thr</i>. Default is inherited from the
3887 * current thread which creating the new thread, or zero for the
3888 * initial main thread; higher-priority thread will run more frequently
3889 * than lower-priority threads (but lower-priority threads can also run).
3890 *
3891 * This is just hint for Ruby thread scheduler. It may be ignored on some
3892 * platform.
3893 *
3894 * Thread.current.priority #=> 0
3895 */
3896
3897static VALUE
3898rb_thread_priority(VALUE thread)
3899{
3900 return INT2NUM(rb_thread_ptr(thread)->priority);
3901}
3902
3903
3904/*
3905 * call-seq:
3906 * thr.priority= integer -> thr
3907 *
3908 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3909 * will run more frequently than lower-priority threads (but lower-priority
3910 * threads can also run).
3911 *
3912 * This is just hint for Ruby thread scheduler. It may be ignored on some
3913 * platform.
3914 *
3915 * count1 = count2 = 0
3916 * a = Thread.new do
3917 * loop { count1 += 1 }
3918 * end
3919 * a.priority = -1
3920 *
3921 * b = Thread.new do
3922 * loop { count2 += 1 }
3923 * end
3924 * b.priority = -2
3925 * sleep 1 #=> 1
3926 * count1 #=> 622504
3927 * count2 #=> 5832
3928 */
3929
3930static VALUE
3931rb_thread_priority_set(VALUE thread, VALUE prio)
3932{
3933 rb_thread_t *target_th = rb_thread_ptr(thread);
3934 int priority;
3935
3936#if USE_NATIVE_THREAD_PRIORITY
3937 target_th->priority = NUM2INT(prio);
3938 native_thread_apply_priority(th);
3939#else
3940 priority = NUM2INT(prio);
3941 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3942 priority = RUBY_THREAD_PRIORITY_MAX;
3943 }
3944 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3945 priority = RUBY_THREAD_PRIORITY_MIN;
3946 }
3947 target_th->priority = (int8_t)priority;
3948#endif
3949 return INT2NUM(target_th->priority);
3950}
3951
3952/* for IO */
3953
3954#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3955
3956/*
3957 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3958 * in select(2) system call.
3959 *
3960 * - Linux 2.2.12 (?)
3961 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3962 * select(2) documents how to allocate fd_set dynamically.
3963 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3964 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3965 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3966 * select(2) documents how to allocate fd_set dynamically.
3967 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3968 * - Solaris 8 has select_large_fdset
3969 * - Mac OS X 10.7 (Lion)
3970 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3971 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3972 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3973 *
3974 * When fd_set is not big enough to hold big file descriptors,
3975 * it should be allocated dynamically.
3976 * Note that this assumes fd_set is structured as bitmap.
3977 *
3978 * rb_fd_init allocates the memory.
3979 * rb_fd_term free the memory.
3980 * rb_fd_set may re-allocates bitmap.
3981 *
3982 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3983 */
3984
3985void
3987{
3988 fds->maxfd = 0;
3989 fds->fdset = ALLOC(fd_set);
3990 FD_ZERO(fds->fdset);
3991}
3992
3993void
3994rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3995{
3996 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3997
3998 if (size < sizeof(fd_set))
3999 size = sizeof(fd_set);
4000 dst->maxfd = src->maxfd;
4001 dst->fdset = xmalloc(size);
4002 memcpy(dst->fdset, src->fdset, size);
4003}
4004
4005void
4007{
4008 xfree(fds->fdset);
4009 fds->maxfd = 0;
4010 fds->fdset = 0;
4011}
4012
4013void
4015{
4016 if (fds->fdset)
4017 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4018}
4019
4020static void
4021rb_fd_resize(int n, rb_fdset_t *fds)
4022{
4023 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4024 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4025
4026 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4027 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4028
4029 if (m > o) {
4030 fds->fdset = xrealloc(fds->fdset, m);
4031 memset((char *)fds->fdset + o, 0, m - o);
4032 }
4033 if (n >= fds->maxfd) fds->maxfd = n + 1;
4034}
4035
4036void
4037rb_fd_set(int n, rb_fdset_t *fds)
4038{
4039 rb_fd_resize(n, fds);
4040 FD_SET(n, fds->fdset);
4041}
4042
4043void
4044rb_fd_clr(int n, rb_fdset_t *fds)
4045{
4046 if (n >= fds->maxfd) return;
4047 FD_CLR(n, fds->fdset);
4048}
4049
4050int
4051rb_fd_isset(int n, const rb_fdset_t *fds)
4052{
4053 if (n >= fds->maxfd) return 0;
4054 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4055}
4056
4057void
4058rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4059{
4060 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4061
4062 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4063 dst->maxfd = max;
4064 dst->fdset = xrealloc(dst->fdset, size);
4065 memcpy(dst->fdset, src, size);
4066}
4067
4068void
4069rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4070{
4071 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4072
4073 if (size < sizeof(fd_set))
4074 size = sizeof(fd_set);
4075 dst->maxfd = src->maxfd;
4076 dst->fdset = xrealloc(dst->fdset, size);
4077 memcpy(dst->fdset, src->fdset, size);
4078}
4079
4080int
4081rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4082{
4083 fd_set *r = NULL, *w = NULL, *e = NULL;
4084 if (readfds) {
4085 rb_fd_resize(n - 1, readfds);
4086 r = rb_fd_ptr(readfds);
4087 }
4088 if (writefds) {
4089 rb_fd_resize(n - 1, writefds);
4090 w = rb_fd_ptr(writefds);
4091 }
4092 if (exceptfds) {
4093 rb_fd_resize(n - 1, exceptfds);
4094 e = rb_fd_ptr(exceptfds);
4095 }
4096 return select(n, r, w, e, timeout);
4097}
4098
4099#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4100
4101#undef FD_ZERO
4102#undef FD_SET
4103#undef FD_CLR
4104#undef FD_ISSET
4105
4106#define FD_ZERO(f) rb_fd_zero(f)
4107#define FD_SET(i, f) rb_fd_set((i), (f))
4108#define FD_CLR(i, f) rb_fd_clr((i), (f))
4109#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4110
4111#elif defined(_WIN32)
4112
4113void
4115{
4116 set->capa = FD_SETSIZE;
4117 set->fdset = ALLOC(fd_set);
4118 FD_ZERO(set->fdset);
4119}
4120
4121void
4122rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4123{
4124 rb_fd_init(dst);
4125 rb_fd_dup(dst, src);
4126}
4127
4128void
4130{
4131 xfree(set->fdset);
4132 set->fdset = NULL;
4133 set->capa = 0;
4134}
4135
4136void
4137rb_fd_set(int fd, rb_fdset_t *set)
4138{
4139 unsigned int i;
4140 SOCKET s = rb_w32_get_osfhandle(fd);
4141
4142 for (i = 0; i < set->fdset->fd_count; i++) {
4143 if (set->fdset->fd_array[i] == s) {
4144 return;
4145 }
4146 }
4147 if (set->fdset->fd_count >= (unsigned)set->capa) {
4148 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4149 set->fdset =
4150 rb_xrealloc_mul_add(
4151 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4152 }
4153 set->fdset->fd_array[set->fdset->fd_count++] = s;
4154}
4155
4156#undef FD_ZERO
4157#undef FD_SET
4158#undef FD_CLR
4159#undef FD_ISSET
4160
4161#define FD_ZERO(f) rb_fd_zero(f)
4162#define FD_SET(i, f) rb_fd_set((i), (f))
4163#define FD_CLR(i, f) rb_fd_clr((i), (f))
4164#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4165
4166#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4167
4168#endif
4169
4170#ifndef rb_fd_no_init
4171#define rb_fd_no_init(fds) (void)(fds)
4172#endif
4173
4174static int
4175wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4176{
4177 if (*result < 0) {
4178 switch (errnum) {
4179 case EINTR:
4180#ifdef ERESTART
4181 case ERESTART:
4182#endif
4183 *result = 0;
4184 if (rel && hrtime_update_expire(rel, end)) {
4185 *rel = 0;
4186 }
4187 return TRUE;
4188 }
4189 return FALSE;
4190 }
4191 else if (*result == 0) {
4192 /* check for spurious wakeup */
4193 if (rel) {
4194 return !hrtime_update_expire(rel, end);
4195 }
4196 return TRUE;
4197 }
4198 return FALSE;
4199}
4201struct select_set {
4202 int max;
4203 rb_thread_t *th;
4204 rb_fdset_t *rset;
4205 rb_fdset_t *wset;
4206 rb_fdset_t *eset;
4207 rb_fdset_t orig_rset;
4208 rb_fdset_t orig_wset;
4209 rb_fdset_t orig_eset;
4210 struct timeval *timeout;
4211};
4212
4213static VALUE
4214select_set_free(VALUE p)
4215{
4216 struct select_set *set = (struct select_set *)p;
4217
4218 rb_fd_term(&set->orig_rset);
4219 rb_fd_term(&set->orig_wset);
4220 rb_fd_term(&set->orig_eset);
4221
4222 return Qfalse;
4223}
4224
4225static VALUE
4226do_select(VALUE p)
4227{
4228 struct select_set *set = (struct select_set *)p;
4229 volatile int result = 0;
4230 int lerrno;
4231 rb_hrtime_t *to, rel, end = 0;
4232
4233 timeout_prepare(&to, &rel, &end, set->timeout);
4234 volatile rb_hrtime_t endtime = end;
4235#define restore_fdset(dst, src) \
4236 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4237#define do_select_update() \
4238 (restore_fdset(set->rset, &set->orig_rset), \
4239 restore_fdset(set->wset, &set->orig_wset), \
4240 restore_fdset(set->eset, &set->orig_eset), \
4241 TRUE)
4242
4243 do {
4244 lerrno = 0;
4245
4246 BLOCKING_REGION(set->th, {
4247 struct timeval tv;
4248
4249 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4250 result = native_fd_select(set->max,
4251 set->rset, set->wset, set->eset,
4252 rb_hrtime2timeval(&tv, to), set->th);
4253 if (result < 0) lerrno = errno;
4254 }
4255 }, ubf_select, set->th, TRUE);
4256
4257 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4258 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4259
4260 if (result < 0) {
4261 errno = lerrno;
4262 }
4263
4264 return (VALUE)result;
4265}
4266
4268rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4269 struct timeval *timeout)
4270{
4271 struct select_set set;
4272
4273 set.th = GET_THREAD();
4274 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4275 set.max = max;
4276 set.rset = read;
4277 set.wset = write;
4278 set.eset = except;
4279 set.timeout = timeout;
4280
4281 if (!set.rset && !set.wset && !set.eset) {
4282 if (!timeout) {
4284 return 0;
4285 }
4286 rb_thread_wait_for(*timeout);
4287 return 0;
4288 }
4289
4290#define fd_init_copy(f) do { \
4291 if (set.f) { \
4292 rb_fd_resize(set.max - 1, set.f); \
4293 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4294 rb_fd_init_copy(&set.orig_##f, set.f); \
4295 } \
4296 } \
4297 else { \
4298 rb_fd_no_init(&set.orig_##f); \
4299 } \
4300 } while (0)
4301 fd_init_copy(rset);
4302 fd_init_copy(wset);
4303 fd_init_copy(eset);
4304#undef fd_init_copy
4305
4306 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4307}
4308
4309#ifdef USE_POLL
4310
4311/* The same with linux kernel. TODO: make platform independent definition. */
4312#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4313#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4314#define POLLEX_SET (POLLPRI)
4315
4316#ifndef POLLERR_SET /* defined for FreeBSD for now */
4317# define POLLERR_SET (0)
4318#endif
4319
4320/*
4321 * returns a mask of events
4322 */
4323int
4324rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4325{
4326 struct pollfd fds[1];
4327 volatile int result = 0;
4328 nfds_t nfds;
4329 struct waiting_fd wfd;
4330 int state;
4331 volatile int lerrno;
4332
4333 rb_execution_context_t *ec = GET_EC();
4334 rb_thread_t *th = rb_ec_thread_ptr(ec);
4335
4336 if (thread_io_wait_events(th, ec, fd, events, timeout, &wfd)) {
4337 return 0; // timeout
4338 }
4339
4340 thread_io_setup_wfd(th, fd, &wfd);
4341
4342 EC_PUSH_TAG(wfd.th->ec);
4343 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4344 rb_hrtime_t *to, rel, end = 0;
4345 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4346 timeout_prepare(&to, &rel, &end, timeout);
4347 volatile rb_hrtime_t endtime = end;
4348 fds[0].fd = fd;
4349 fds[0].events = (short)events;
4350 fds[0].revents = 0;
4351 do {
4352 nfds = 1;
4353
4354 lerrno = 0;
4355 BLOCKING_REGION(wfd.th, {
4356 struct timespec ts;
4357
4358 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4359 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4360 if (result < 0) lerrno = errno;
4361 }
4362 }, ubf_select, wfd.th, TRUE);
4363
4364 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4365 } while (wait_retryable(&result, lerrno, to, endtime));
4366 }
4367 EC_POP_TAG();
4368
4369 thread_io_wake_pending_closer(&wfd);
4370
4371 if (state) {
4372 EC_JUMP_TAG(wfd.th->ec, state);
4373 }
4374
4375 if (result < 0) {
4376 errno = lerrno;
4377 return -1;
4378 }
4379
4380 if (fds[0].revents & POLLNVAL) {
4381 errno = EBADF;
4382 return -1;
4383 }
4384
4385 /*
4386 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4387 * Therefore we need to fix it up.
4388 */
4389 result = 0;
4390 if (fds[0].revents & POLLIN_SET)
4391 result |= RB_WAITFD_IN;
4392 if (fds[0].revents & POLLOUT_SET)
4393 result |= RB_WAITFD_OUT;
4394 if (fds[0].revents & POLLEX_SET)
4395 result |= RB_WAITFD_PRI;
4396
4397 /* all requested events are ready if there is an error */
4398 if (fds[0].revents & POLLERR_SET)
4399 result |= events;
4400
4401 return result;
4402}
4403#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4404struct select_args {
4405 union {
4406 int fd;
4407 int error;
4408 } as;
4409 rb_fdset_t *read;
4410 rb_fdset_t *write;
4411 rb_fdset_t *except;
4412 struct waiting_fd wfd;
4413 struct timeval *tv;
4414};
4415
4416static VALUE
4417select_single(VALUE ptr)
4418{
4419 struct select_args *args = (struct select_args *)ptr;
4420 int r;
4421
4422 r = rb_thread_fd_select(args->as.fd + 1,
4423 args->read, args->write, args->except, args->tv);
4424 if (r == -1)
4425 args->as.error = errno;
4426 if (r > 0) {
4427 r = 0;
4428 if (args->read && rb_fd_isset(args->as.fd, args->read))
4429 r |= RB_WAITFD_IN;
4430 if (args->write && rb_fd_isset(args->as.fd, args->write))
4431 r |= RB_WAITFD_OUT;
4432 if (args->except && rb_fd_isset(args->as.fd, args->except))
4433 r |= RB_WAITFD_PRI;
4434 }
4435 return (VALUE)r;
4436}
4437
4438static VALUE
4439select_single_cleanup(VALUE ptr)
4440{
4441 struct select_args *args = (struct select_args *)ptr;
4442
4443 thread_io_wake_pending_closer(&args->wfd);
4444 if (args->read) rb_fd_term(args->read);
4445 if (args->write) rb_fd_term(args->write);
4446 if (args->except) rb_fd_term(args->except);
4447
4448 return (VALUE)-1;
4449}
4450
4451static rb_fdset_t *
4452init_set_fd(int fd, rb_fdset_t *fds)
4453{
4454 if (fd < 0) {
4455 return 0;
4456 }
4457 rb_fd_init(fds);
4458 rb_fd_set(fd, fds);
4459
4460 return fds;
4461}
4462
4463int
4464rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4465{
4466 rb_fdset_t rfds, wfds, efds;
4467 struct select_args args;
4468 int r;
4469 VALUE ptr = (VALUE)&args;
4470 rb_execution_context_t *ec = GET_EC();
4471 rb_thread_t *th = rb_ec_thread_ptr(ec);
4472
4473 if (thread_io_wait_events(th, ec, fd, events, timeout, &args.wfd)) {
4474 return 0; // timeout
4475 }
4476
4477 args.as.fd = fd;
4478 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4479 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4480 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4481 args.tv = timeout;
4482 args.wfd.fd = fd;
4483 args.wfd.th = th;
4484 args.wfd.busy = NULL;
4485
4486 RB_VM_LOCK_ENTER();
4487 {
4488 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4489 }
4490 RB_VM_LOCK_LEAVE();
4491
4492 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4493 if (r == -1)
4494 errno = args.as.error;
4495
4496 return r;
4497}
4498#endif /* ! USE_POLL */
4499
4500/*
4501 * for GC
4502 */
4503
4504#ifdef USE_CONSERVATIVE_STACK_END
4505void
4506rb_gc_set_stack_end(VALUE **stack_end_p)
4507{
4508 VALUE stack_end;
4509 *stack_end_p = &stack_end;
4510}
4511#endif
4512
4513/*
4514 *
4515 */
4516
4517void
4518rb_threadptr_check_signal(rb_thread_t *mth)
4519{
4520 /* mth must be main_thread */
4521 if (rb_signal_buff_size() > 0) {
4522 /* wakeup main thread */
4523 threadptr_trap_interrupt(mth);
4524 }
4525}
4526
4527static void
4528async_bug_fd(const char *mesg, int errno_arg, int fd)
4529{
4530 char buff[64];
4531 size_t n = strlcpy(buff, mesg, sizeof(buff));
4532 if (n < sizeof(buff)-3) {
4533 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4534 }
4535 rb_async_bug_errno(buff, errno_arg);
4536}
4537
4538/* VM-dependent API is not available for this function */
4539static int
4540consume_communication_pipe(int fd)
4541{
4542#if USE_EVENTFD
4543 uint64_t buff[1];
4544#else
4545 /* buffer can be shared because no one refers to them. */
4546 static char buff[1024];
4547#endif
4548 ssize_t result;
4549 int ret = FALSE; /* for rb_sigwait_sleep */
4550
4551 while (1) {
4552 result = read(fd, buff, sizeof(buff));
4553#if USE_EVENTFD
4554 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4555#else
4556 RUBY_DEBUG_LOG("result:%d", (int)result);
4557#endif
4558 if (result > 0) {
4559 ret = TRUE;
4560 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4561 return ret;
4562 }
4563 }
4564 else if (result == 0) {
4565 return ret;
4566 }
4567 else if (result < 0) {
4568 int e = errno;
4569 switch (e) {
4570 case EINTR:
4571 continue; /* retry */
4572 case EAGAIN:
4573#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4574 case EWOULDBLOCK:
4575#endif
4576 return ret;
4577 default:
4578 async_bug_fd("consume_communication_pipe: read", e, fd);
4579 }
4580 }
4581 }
4582}
4583
4584void
4585rb_thread_stop_timer_thread(void)
4586{
4587 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4588 native_reset_timer_thread();
4589 }
4590}
4591
4592void
4593rb_thread_reset_timer_thread(void)
4594{
4595 native_reset_timer_thread();
4596}
4597
4598void
4599rb_thread_start_timer_thread(void)
4600{
4601 system_working = 1;
4602 rb_thread_create_timer_thread();
4603}
4604
4605static int
4606clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4607{
4608 int i;
4609 VALUE coverage = (VALUE)val;
4610 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4611 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4612
4613 if (lines) {
4614 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4615 rb_ary_clear(lines);
4616 }
4617 else {
4618 int i;
4619 for (i = 0; i < RARRAY_LEN(lines); i++) {
4620 if (RARRAY_AREF(lines, i) != Qnil)
4621 RARRAY_ASET(lines, i, INT2FIX(0));
4622 }
4623 }
4624 }
4625 if (branches) {
4626 VALUE counters = RARRAY_AREF(branches, 1);
4627 for (i = 0; i < RARRAY_LEN(counters); i++) {
4628 RARRAY_ASET(counters, i, INT2FIX(0));
4629 }
4630 }
4631
4632 return ST_CONTINUE;
4633}
4634
4635void
4636rb_clear_coverages(void)
4637{
4638 VALUE coverages = rb_get_coverages();
4639 if (RTEST(coverages)) {
4640 rb_hash_foreach(coverages, clear_coverage_i, 0);
4641 }
4642}
4643
4644#if defined(HAVE_WORKING_FORK)
4645
4646static void
4647rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4648{
4649 rb_thread_t *i = 0;
4650 rb_vm_t *vm = th->vm;
4651 rb_ractor_t *r = th->ractor;
4652 vm->ractor.main_ractor = r;
4653 vm->ractor.main_thread = th;
4654 r->threads.main = th;
4655 r->status_ = ractor_created;
4656
4657 thread_sched_atfork(TH_SCHED(th));
4658 ubf_list_atfork();
4659
4660 // OK. Only this thread accesses:
4661 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4662 ccan_list_for_each(&r->threads.set, i, lt_node) {
4663 atfork(i, th);
4664 }
4665 }
4666 rb_vm_living_threads_init(vm);
4667
4668 rb_ractor_atfork(vm, th);
4669 rb_vm_postponed_job_atfork();
4670
4671 /* may be held by RJIT threads in parent */
4672 rb_native_mutex_initialize(&vm->workqueue_lock);
4673
4674 /* may be held by any thread in parent */
4675 rb_native_mutex_initialize(&th->interrupt_lock);
4676
4677 vm->fork_gen++;
4678 rb_ractor_sleeper_threads_clear(th->ractor);
4679 rb_clear_coverages();
4680
4681 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4682 rb_thread_reset_timer_thread();
4683 rb_thread_start_timer_thread();
4684
4685 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4686 VM_ASSERT(vm->ractor.cnt == 1);
4687}
4688
4689static void
4690terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4691{
4692 if (th != current_th) {
4693 rb_mutex_abandon_keeping_mutexes(th);
4694 rb_mutex_abandon_locking_mutex(th);
4695 thread_cleanup_func(th, TRUE);
4696 }
4697}
4698
4699void rb_fiber_atfork(rb_thread_t *);
4700void
4701rb_thread_atfork(void)
4702{
4703 rb_thread_t *th = GET_THREAD();
4704 rb_threadptr_pending_interrupt_clear(th);
4705 rb_thread_atfork_internal(th, terminate_atfork_i);
4706 th->join_list = NULL;
4707 rb_fiber_atfork(th);
4708
4709 /* We don't want reproduce CVE-2003-0900. */
4711}
4712
4713static void
4714terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4715{
4716 if (th != current_th) {
4717 thread_cleanup_func_before_exec(th);
4718 }
4719}
4720
4721void
4723{
4724 rb_thread_t *th = GET_THREAD();
4725 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4726}
4727#else
4728void
4729rb_thread_atfork(void)
4730{
4731}
4732
4733void
4735{
4736}
4737#endif
4739struct thgroup {
4740 int enclosed;
4741};
4742
4743static const rb_data_type_t thgroup_data_type = {
4744 "thgroup",
4745 {
4746 0,
4748 NULL, // No external memory to report
4749 },
4750 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4751};
4752
4753/*
4754 * Document-class: ThreadGroup
4755 *
4756 * ThreadGroup provides a means of keeping track of a number of threads as a
4757 * group.
4758 *
4759 * A given Thread object can only belong to one ThreadGroup at a time; adding
4760 * a thread to a new group will remove it from any previous group.
4761 *
4762 * Newly created threads belong to the same group as the thread from which they
4763 * were created.
4764 */
4765
4766/*
4767 * Document-const: Default
4768 *
4769 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4770 * by default.
4771 */
4772static VALUE
4773thgroup_s_alloc(VALUE klass)
4774{
4775 VALUE group;
4776 struct thgroup *data;
4777
4778 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4779 data->enclosed = 0;
4780
4781 return group;
4782}
4783
4784/*
4785 * call-seq:
4786 * thgrp.list -> array
4787 *
4788 * Returns an array of all existing Thread objects that belong to this group.
4789 *
4790 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4791 */
4792
4793static VALUE
4794thgroup_list(VALUE group)
4795{
4796 VALUE ary = rb_ary_new();
4797 rb_thread_t *th = 0;
4798 rb_ractor_t *r = GET_RACTOR();
4799
4800 ccan_list_for_each(&r->threads.set, th, lt_node) {
4801 if (th->thgroup == group) {
4802 rb_ary_push(ary, th->self);
4803 }
4804 }
4805 return ary;
4806}
4807
4808
4809/*
4810 * call-seq:
4811 * thgrp.enclose -> thgrp
4812 *
4813 * Prevents threads from being added to or removed from the receiving
4814 * ThreadGroup.
4815 *
4816 * New threads can still be started in an enclosed ThreadGroup.
4817 *
4818 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4819 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4820 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4821 * tg.add thr
4822 * #=> ThreadError: can't move from the enclosed thread group
4823 */
4824
4825static VALUE
4826thgroup_enclose(VALUE group)
4827{
4828 struct thgroup *data;
4829
4830 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4831 data->enclosed = 1;
4832
4833 return group;
4834}
4835
4836
4837/*
4838 * call-seq:
4839 * thgrp.enclosed? -> true or false
4840 *
4841 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4842 */
4843
4844static VALUE
4845thgroup_enclosed_p(VALUE group)
4846{
4847 struct thgroup *data;
4848
4849 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4850 return RBOOL(data->enclosed);
4851}
4852
4853
4854/*
4855 * call-seq:
4856 * thgrp.add(thread) -> thgrp
4857 *
4858 * Adds the given +thread+ to this group, removing it from any other
4859 * group to which it may have previously been a member.
4860 *
4861 * puts "Initial group is #{ThreadGroup::Default.list}"
4862 * tg = ThreadGroup.new
4863 * t1 = Thread.new { sleep }
4864 * t2 = Thread.new { sleep }
4865 * puts "t1 is #{t1}"
4866 * puts "t2 is #{t2}"
4867 * tg.add(t1)
4868 * puts "Initial group now #{ThreadGroup::Default.list}"
4869 * puts "tg group now #{tg.list}"
4870 *
4871 * This will produce:
4872 *
4873 * Initial group is #<Thread:0x401bdf4c>
4874 * t1 is #<Thread:0x401b3c90>
4875 * t2 is #<Thread:0x401b3c18>
4876 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4877 * tg group now #<Thread:0x401b3c90>
4878 */
4879
4880static VALUE
4881thgroup_add(VALUE group, VALUE thread)
4882{
4883 rb_thread_t *target_th = rb_thread_ptr(thread);
4884 struct thgroup *data;
4885
4886 if (OBJ_FROZEN(group)) {
4887 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4888 }
4889 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4890 if (data->enclosed) {
4891 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4892 }
4893
4894 if (OBJ_FROZEN(target_th->thgroup)) {
4895 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4896 }
4897 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4898 if (data->enclosed) {
4899 rb_raise(rb_eThreadError,
4900 "can't move from the enclosed thread group");
4901 }
4902
4903 target_th->thgroup = group;
4904 return group;
4905}
4906
4907/*
4908 * Document-class: ThreadShield
4909 */
4910static void
4911thread_shield_mark(void *ptr)
4912{
4913 rb_gc_mark((VALUE)ptr);
4914}
4915
4916static const rb_data_type_t thread_shield_data_type = {
4917 "thread_shield",
4918 {thread_shield_mark, 0, 0,},
4919 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4920};
4921
4922static VALUE
4923thread_shield_alloc(VALUE klass)
4924{
4925 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4926}
4927
4928#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4929#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4930#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4931#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4932STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4933static inline unsigned int
4934rb_thread_shield_waiting(VALUE b)
4935{
4936 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4937}
4938
4939static inline void
4940rb_thread_shield_waiting_inc(VALUE b)
4941{
4942 unsigned int w = rb_thread_shield_waiting(b);
4943 w++;
4944 if (w > THREAD_SHIELD_WAITING_MAX)
4945 rb_raise(rb_eRuntimeError, "waiting count overflow");
4946 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4947 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4948}
4949
4950static inline void
4951rb_thread_shield_waiting_dec(VALUE b)
4952{
4953 unsigned int w = rb_thread_shield_waiting(b);
4954 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4955 w--;
4956 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4957 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4958}
4959
4960VALUE
4961rb_thread_shield_new(void)
4962{
4963 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4964 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4965 return thread_shield;
4966}
4967
4968bool
4969rb_thread_shield_owned(VALUE self)
4970{
4971 VALUE mutex = GetThreadShieldPtr(self);
4972 if (!mutex) return false;
4973
4974 rb_mutex_t *m = mutex_ptr(mutex);
4975
4976 return m->fiber == GET_EC()->fiber_ptr;
4977}
4978
4979/*
4980 * Wait a thread shield.
4981 *
4982 * Returns
4983 * true: acquired the thread shield
4984 * false: the thread shield was destroyed and no other threads waiting
4985 * nil: the thread shield was destroyed but still in use
4986 */
4987VALUE
4988rb_thread_shield_wait(VALUE self)
4989{
4990 VALUE mutex = GetThreadShieldPtr(self);
4991 rb_mutex_t *m;
4992
4993 if (!mutex) return Qfalse;
4994 m = mutex_ptr(mutex);
4995 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
4996 rb_thread_shield_waiting_inc(self);
4997 rb_mutex_lock(mutex);
4998 rb_thread_shield_waiting_dec(self);
4999 if (DATA_PTR(self)) return Qtrue;
5000 rb_mutex_unlock(mutex);
5001 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5002}
5003
5004static VALUE
5005thread_shield_get_mutex(VALUE self)
5006{
5007 VALUE mutex = GetThreadShieldPtr(self);
5008 if (!mutex)
5009 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5010 return mutex;
5011}
5012
5013/*
5014 * Release a thread shield, and return true if it has waiting threads.
5015 */
5016VALUE
5017rb_thread_shield_release(VALUE self)
5018{
5019 VALUE mutex = thread_shield_get_mutex(self);
5020 rb_mutex_unlock(mutex);
5021 return RBOOL(rb_thread_shield_waiting(self) > 0);
5022}
5023
5024/*
5025 * Release and destroy a thread shield, and return true if it has waiting threads.
5026 */
5027VALUE
5028rb_thread_shield_destroy(VALUE self)
5029{
5030 VALUE mutex = thread_shield_get_mutex(self);
5031 DATA_PTR(self) = 0;
5032 rb_mutex_unlock(mutex);
5033 return RBOOL(rb_thread_shield_waiting(self) > 0);
5034}
5035
5036static VALUE
5037threadptr_recursive_hash(rb_thread_t *th)
5038{
5039 return th->ec->local_storage_recursive_hash;
5040}
5041
5042static void
5043threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5044{
5045 th->ec->local_storage_recursive_hash = hash;
5046}
5047
5049
5050/*
5051 * Returns the current "recursive list" used to detect recursion.
5052 * This list is a hash table, unique for the current thread and for
5053 * the current __callee__.
5054 */
5055
5056static VALUE
5057recursive_list_access(VALUE sym)
5058{
5059 rb_thread_t *th = GET_THREAD();
5060 VALUE hash = threadptr_recursive_hash(th);
5061 VALUE list;
5062 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5063 hash = rb_ident_hash_new();
5064 threadptr_recursive_hash_set(th, hash);
5065 list = Qnil;
5066 }
5067 else {
5068 list = rb_hash_aref(hash, sym);
5069 }
5070 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5071 list = rb_ident_hash_new();
5072 rb_hash_aset(hash, sym, list);
5073 }
5074 return list;
5075}
5076
5077/*
5078 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5079 * in the recursion list.
5080 * Assumes the recursion list is valid.
5081 */
5082
5083static VALUE
5084recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5085{
5086#if SIZEOF_LONG == SIZEOF_VOIDP
5087 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5088#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5089 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5090 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5091#endif
5092
5093 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5094 if (UNDEF_P(pair_list))
5095 return Qfalse;
5096 if (paired_obj_id) {
5097 if (!RB_TYPE_P(pair_list, T_HASH)) {
5098 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5099 return Qfalse;
5100 }
5101 else {
5102 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5103 return Qfalse;
5104 }
5105 }
5106 return Qtrue;
5107}
5108
5109/*
5110 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5111 * For a single obj, it sets list[obj] to Qtrue.
5112 * For a pair, it sets list[obj] to paired_obj_id if possible,
5113 * otherwise list[obj] becomes a hash like:
5114 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5115 * Assumes the recursion list is valid.
5116 */
5117
5118static void
5119recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5120{
5121 VALUE pair_list;
5122
5123 if (!paired_obj) {
5124 rb_hash_aset(list, obj, Qtrue);
5125 }
5126 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5127 rb_hash_aset(list, obj, paired_obj);
5128 }
5129 else {
5130 if (!RB_TYPE_P(pair_list, T_HASH)){
5131 VALUE other_paired_obj = pair_list;
5132 pair_list = rb_hash_new();
5133 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5134 rb_hash_aset(list, obj, pair_list);
5135 }
5136 rb_hash_aset(pair_list, paired_obj, Qtrue);
5137 }
5138}
5139
5140/*
5141 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5142 * For a pair, if list[obj] is a hash, then paired_obj_id is
5143 * removed from the hash and no attempt is made to simplify
5144 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5145 * Assumes the recursion list is valid.
5146 */
5147
5148static int
5149recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5150{
5151 if (paired_obj) {
5152 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5153 if (UNDEF_P(pair_list)) {
5154 return 0;
5155 }
5156 if (RB_TYPE_P(pair_list, T_HASH)) {
5157 rb_hash_delete_entry(pair_list, paired_obj);
5158 if (!RHASH_EMPTY_P(pair_list)) {
5159 return 1; /* keep hash until is empty */
5160 }
5161 }
5162 }
5163 rb_hash_delete_entry(list, obj);
5164 return 1;
5165}
5167struct exec_recursive_params {
5168 VALUE (*func) (VALUE, VALUE, int);
5169 VALUE list;
5170 VALUE obj;
5171 VALUE pairid;
5172 VALUE arg;
5173};
5174
5175static VALUE
5176exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5177{
5178 struct exec_recursive_params *p = (void *)data;
5179 return (*p->func)(p->obj, p->arg, FALSE);
5180}
5181
5182/*
5183 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5184 * current method is called recursively on obj, or on the pair <obj, pairid>
5185 * If outer is 0, then the innermost func will be called with recursive set
5186 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5187 * all inner func are short-circuited by throw.
5188 * Implementation details: the value thrown is the recursive list which is
5189 * proper to the current method and unlikely to be caught anywhere else.
5190 * list[recursive_key] is used as a flag for the outermost call.
5191 */
5192
5193static VALUE
5194exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5195{
5196 VALUE result = Qundef;
5197 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5198 struct exec_recursive_params p;
5199 int outermost;
5200 p.list = recursive_list_access(sym);
5201 p.obj = obj;
5202 p.pairid = pairid;
5203 p.arg = arg;
5204 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5205
5206 if (recursive_check(p.list, p.obj, pairid)) {
5207 if (outer && !outermost) {
5208 rb_throw_obj(p.list, p.list);
5209 }
5210 return (*func)(obj, arg, TRUE);
5211 }
5212 else {
5213 enum ruby_tag_type state;
5214
5215 p.func = func;
5216
5217 if (outermost) {
5218 recursive_push(p.list, ID2SYM(recursive_key), 0);
5219 recursive_push(p.list, p.obj, p.pairid);
5220 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5221 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5222 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5223 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5224 if (result == p.list) {
5225 result = (*func)(obj, arg, TRUE);
5226 }
5227 }
5228 else {
5229 volatile VALUE ret = Qundef;
5230 recursive_push(p.list, p.obj, p.pairid);
5231 EC_PUSH_TAG(GET_EC());
5232 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5233 ret = (*func)(obj, arg, FALSE);
5234 }
5235 EC_POP_TAG();
5236 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5237 goto invalid;
5238 }
5239 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5240 result = ret;
5241 }
5242 }
5243 *(volatile struct exec_recursive_params *)&p;
5244 return result;
5245
5246 invalid:
5247 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5248 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5249 sym, rb_thread_current());
5251}
5252
5253/*
5254 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5255 * current method is called recursively on obj
5256 */
5257
5259rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5260{
5261 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5262}
5263
5264/*
5265 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5266 * current method is called recursively on the ordered pair <obj, paired_obj>
5267 */
5268
5270rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5271{
5272 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5273}
5274
5275/*
5276 * If recursion is detected on the current method and obj, the outermost
5277 * func will be called with (obj, arg, Qtrue). All inner func will be
5278 * short-circuited using throw.
5279 */
5280
5282rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5283{
5284 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5285}
5286
5287VALUE
5288rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5289{
5290 return exec_recursive(func, obj, 0, arg, 1, mid);
5291}
5292
5293/*
5294 * If recursion is detected on the current method, obj and paired_obj,
5295 * the outermost func will be called with (obj, arg, Qtrue). All inner
5296 * func will be short-circuited using throw.
5297 */
5298
5300rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5301{
5302 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5303}
5304
5305/*
5306 * call-seq:
5307 * thread.backtrace -> array or nil
5308 *
5309 * Returns the current backtrace of the target thread.
5310 *
5311 */
5312
5313static VALUE
5314rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5315{
5316 return rb_vm_thread_backtrace(argc, argv, thval);
5317}
5318
5319/* call-seq:
5320 * thread.backtrace_locations(*args) -> array or nil
5321 *
5322 * Returns the execution stack for the target thread---an array containing
5323 * backtrace location objects.
5324 *
5325 * See Thread::Backtrace::Location for more information.
5326 *
5327 * This method behaves similarly to Kernel#caller_locations except it applies
5328 * to a specific thread.
5329 */
5330static VALUE
5331rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5332{
5333 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5334}
5335
5336void
5337Init_Thread_Mutex(void)
5338{
5339 rb_thread_t *th = GET_THREAD();
5340
5341 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5342 rb_native_mutex_initialize(&th->interrupt_lock);
5343}
5344
5345/*
5346 * Document-class: ThreadError
5347 *
5348 * Raised when an invalid operation is attempted on a thread.
5349 *
5350 * For example, when no other thread has been started:
5351 *
5352 * Thread.stop
5353 *
5354 * This will raises the following exception:
5355 *
5356 * ThreadError: stopping only thread
5357 * note: use sleep to stop forever
5358 */
5359
5360void
5361Init_Thread(void)
5362{
5363 VALUE cThGroup;
5364 rb_thread_t *th = GET_THREAD();
5365
5366 sym_never = ID2SYM(rb_intern_const("never"));
5367 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5368 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5369
5370 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5371 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5372 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5373 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5374 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5375 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5376 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5377 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5378 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5379 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5380 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5381 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5382 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5383 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5384 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5385 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5386 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5387 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5388 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5389
5390 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5391 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5392 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5393 rb_define_method(rb_cThread, "value", thread_value, 0);
5394 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5395 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5396 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5397 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5398 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5399 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5400 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5401 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5402 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5403 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5404 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5405 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5406 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5407 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5408 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5409 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5410 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5411 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5412 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5413 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5414 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5415 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5416 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5417 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5418 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5419 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5420
5421 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5422 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5423 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5424 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5425 rb_define_alias(rb_cThread, "inspect", "to_s");
5426
5427 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5428 "stream closed in another thread");
5429
5430 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5431 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5432 rb_define_method(cThGroup, "list", thgroup_list, 0);
5433 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5434 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5435 rb_define_method(cThGroup, "add", thgroup_add, 1);
5436
5437 {
5438 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5439 rb_define_const(cThGroup, "Default", th->thgroup);
5440 }
5441
5443
5444 /* init thread core */
5445 {
5446 /* main thread setting */
5447 {
5448 /* acquire global vm lock */
5449#ifdef HAVE_PTHREAD_NP_H
5450 VM_ASSERT(TH_SCHED(th)->running == th);
5451#endif
5452 // thread_sched_to_running() should not be called because
5453 // it assumes blocked by thread_sched_to_waiting().
5454 // thread_sched_to_running(sched, th);
5455
5456 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5457 th->pending_interrupt_queue_checked = 0;
5458 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5459 }
5460 }
5461
5462 rb_thread_create_timer_thread();
5463
5464 Init_thread_sync();
5465
5466 // TODO: Suppress unused function warning for now
5467 // if (0) rb_thread_sched_destroy(NULL);
5468}
5469
5472{
5473 rb_thread_t *th = ruby_thread_from_native();
5474
5475 return th != 0;
5476}
5477
5478#ifdef NON_SCALAR_THREAD_ID
5479 #define thread_id_str(th) (NULL)
5480#else
5481 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5482#endif
5483
5484static void
5485debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5486{
5487 rb_thread_t *th = 0;
5488 VALUE sep = rb_str_new_cstr("\n ");
5489
5490 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5491 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5492 (void *)GET_THREAD(), (void *)r->threads.main);
5493
5494 ccan_list_for_each(&r->threads.set, th, lt_node) {
5495 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5496 "native:%p int:%u",
5497 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5498
5499 if (th->locking_mutex) {
5500 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5501 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5502 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5503 }
5504
5505 {
5506 struct rb_waiting_list *list = th->join_list;
5507 while (list) {
5508 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5509 list = list->next;
5510 }
5511 }
5512 rb_str_catf(msg, "\n ");
5513 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5514 rb_str_catf(msg, "\n");
5515 }
5516}
5517
5518static void
5519rb_check_deadlock(rb_ractor_t *r)
5520{
5521 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5522
5523#ifdef RUBY_THREAD_PTHREAD_H
5524 if (r->threads.sched.readyq_cnt > 0) return;
5525#endif
5526
5527 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5528 int ltnum = rb_ractor_living_thread_num(r);
5529
5530 if (ltnum > sleeper_num) return;
5531 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5532
5533 int found = 0;
5534 rb_thread_t *th = NULL;
5535
5536 ccan_list_for_each(&r->threads.set, th, lt_node) {
5537 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5538 found = 1;
5539 }
5540 else if (th->locking_mutex) {
5541 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5542 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5543 found = 1;
5544 }
5545 }
5546 if (found)
5547 break;
5548 }
5549
5550 if (!found) {
5551 VALUE argv[2];
5552 argv[0] = rb_eFatal;
5553 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5554 debug_deadlock_check(r, argv[1]);
5555 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5556 rb_threadptr_raise(r->threads.main, 2, argv);
5557 }
5558}
5559
5560// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5561// structs. Defined here because the struct definition lives here as well.
5562size_t
5563rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5564{
5565 struct waiting_fd *waitfd = 0;
5566 size_t size = 0;
5567
5568 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5569 size += sizeof(struct waiting_fd);
5570 }
5571
5572 return size;
5573}
5574
5575static void
5576update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5577{
5578 const rb_control_frame_t *cfp = GET_EC()->cfp;
5579 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5580 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5581 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5582 if (lines) {
5583 long line = rb_sourceline() - 1;
5584 long count;
5585 VALUE num;
5586 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5587 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5588 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5589 rb_ary_push(lines, LONG2FIX(line + 1));
5590 return;
5591 }
5592 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5593 return;
5594 }
5595 num = RARRAY_AREF(lines, line);
5596 if (!FIXNUM_P(num)) return;
5597 count = FIX2LONG(num) + 1;
5598 if (POSFIXABLE(count)) {
5599 RARRAY_ASET(lines, line, LONG2FIX(count));
5600 }
5601 }
5602 }
5603}
5604
5605static void
5606update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5607{
5608 const rb_control_frame_t *cfp = GET_EC()->cfp;
5609 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5610 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5611 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5612 if (branches) {
5613 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5614 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5615 VALUE counters = RARRAY_AREF(branches, 1);
5616 VALUE num = RARRAY_AREF(counters, idx);
5617 count = FIX2LONG(num) + 1;
5618 if (POSFIXABLE(count)) {
5619 RARRAY_ASET(counters, idx, LONG2FIX(count));
5620 }
5621 }
5622 }
5623}
5624
5625const rb_method_entry_t *
5626rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5627{
5628 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5629
5630 if (!me->def) return NULL; // negative cme
5631
5632 retry:
5633 switch (me->def->type) {
5634 case VM_METHOD_TYPE_ISEQ: {
5635 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5636 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5637 path = rb_iseq_path(iseq);
5638 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5639 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5640 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5641 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5642 break;
5643 }
5644 case VM_METHOD_TYPE_BMETHOD: {
5645 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5646 if (iseq) {
5647 rb_iseq_location_t *loc;
5648 rb_iseq_check(iseq);
5649 path = rb_iseq_path(iseq);
5650 loc = &ISEQ_BODY(iseq)->location;
5651 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5652 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5653 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5654 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5655 break;
5656 }
5657 return NULL;
5658 }
5659 case VM_METHOD_TYPE_ALIAS:
5660 me = me->def->body.alias.original_me;
5661 goto retry;
5662 case VM_METHOD_TYPE_REFINED:
5663 me = me->def->body.refined.orig_me;
5664 if (!me) return NULL;
5665 goto retry;
5666 default:
5667 return NULL;
5668 }
5669
5670 /* found */
5671 if (RB_TYPE_P(path, T_ARRAY)) {
5672 path = rb_ary_entry(path, 1);
5673 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5674 }
5675 if (resolved_location) {
5676 resolved_location[0] = path;
5677 resolved_location[1] = beg_pos_lineno;
5678 resolved_location[2] = beg_pos_column;
5679 resolved_location[3] = end_pos_lineno;
5680 resolved_location[4] = end_pos_column;
5681 }
5682 return me;
5683}
5684
5685static void
5686update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5687{
5688 const rb_control_frame_t *cfp = GET_EC()->cfp;
5689 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5690 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5691 VALUE rcount;
5692 long count;
5693
5694 me = rb_resolve_me_location(me, 0);
5695 if (!me) return;
5696
5697 rcount = rb_hash_aref(me2counter, (VALUE) me);
5698 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5699 if (POSFIXABLE(count)) {
5700 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5701 }
5702}
5703
5704VALUE
5705rb_get_coverages(void)
5706{
5707 return GET_VM()->coverages;
5708}
5709
5710int
5711rb_get_coverage_mode(void)
5712{
5713 return GET_VM()->coverage_mode;
5714}
5715
5716void
5717rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5718{
5719 GET_VM()->coverages = coverages;
5720 GET_VM()->me2counter = me2counter;
5721 GET_VM()->coverage_mode = mode;
5722}
5723
5724void
5725rb_resume_coverages(void)
5726{
5727 int mode = GET_VM()->coverage_mode;
5728 VALUE me2counter = GET_VM()->me2counter;
5729 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5730 if (mode & COVERAGE_TARGET_BRANCHES) {
5731 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5732 }
5733 if (mode & COVERAGE_TARGET_METHODS) {
5734 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5735 }
5736}
5737
5738void
5739rb_suspend_coverages(void)
5740{
5741 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5742 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5743 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5744 }
5745 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5746 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5747 }
5748}
5749
5750/* Make coverage arrays empty so old covered files are no longer tracked. */
5751void
5752rb_reset_coverages(void)
5753{
5754 rb_clear_coverages();
5755 rb_iseq_remove_coverage_all();
5756 GET_VM()->coverages = Qfalse;
5757}
5758
5759VALUE
5760rb_default_coverage(int n)
5761{
5762 VALUE coverage = rb_ary_hidden_new_fill(3);
5763 VALUE lines = Qfalse, branches = Qfalse;
5764 int mode = GET_VM()->coverage_mode;
5765
5766 if (mode & COVERAGE_TARGET_LINES) {
5767 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5768 }
5769 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5770
5771 if (mode & COVERAGE_TARGET_BRANCHES) {
5772 branches = rb_ary_hidden_new_fill(2);
5773 /* internal data structures for branch coverage:
5774 *
5775 * { branch base node =>
5776 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5777 * branch target id =>
5778 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5779 * ...
5780 * }],
5781 * ...
5782 * }
5783 *
5784 * Example:
5785 * { NODE_CASE =>
5786 * [1, 0, 4, 3, {
5787 * NODE_WHEN => [2, 8, 2, 9, 0],
5788 * NODE_WHEN => [3, 8, 3, 9, 1],
5789 * ...
5790 * }],
5791 * ...
5792 * }
5793 */
5794 VALUE structure = rb_hash_new();
5795 rb_obj_hide(structure);
5796 RARRAY_ASET(branches, 0, structure);
5797 /* branch execution counters */
5798 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5799 }
5800 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5801
5802 return coverage;
5803}
5804
5805static VALUE
5806uninterruptible_exit(VALUE v)
5807{
5808 rb_thread_t *cur_th = GET_THREAD();
5809 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5810
5811 cur_th->pending_interrupt_queue_checked = 0;
5812 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5813 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5814 }
5815 return Qnil;
5816}
5817
5818VALUE
5819rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5820{
5821 VALUE interrupt_mask = rb_ident_hash_new();
5822 rb_thread_t *cur_th = GET_THREAD();
5823
5824 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5825 OBJ_FREEZE_RAW(interrupt_mask);
5826 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5827
5828 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5829
5830 RUBY_VM_CHECK_INTS(cur_th->ec);
5831 return ret;
5832}
5833
5834static void
5835thread_specific_storage_alloc(rb_thread_t *th)
5836{
5837 VM_ASSERT(th->specific_storage == NULL);
5838
5839 if (UNLIKELY(specific_key_count > 0)) {
5840 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5841 }
5842}
5843
5844rb_internal_thread_specific_key_t
5846{
5847 rb_vm_t *vm = GET_VM();
5848
5849 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5850 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5851 }
5852 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5853 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5854 }
5855 else {
5856 rb_internal_thread_specific_key_t key = specific_key_count++;
5857
5858 if (key == 0) {
5859 // allocate
5860 rb_ractor_t *cr = GET_RACTOR();
5861 rb_thread_t *th;
5862
5863 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5864 thread_specific_storage_alloc(th);
5865 }
5866 }
5867 return key;
5868 }
5869}
5870
5871// async and native thread safe.
5872void *
5873rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5874{
5875 rb_thread_t *th = DATA_PTR(thread_val);
5876
5877 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5878 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5879 VM_ASSERT(th->specific_storage);
5880
5881 return th->specific_storage[key];
5882}
5883
5884// async and native thread safe.
5885void
5886rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5887{
5888 rb_thread_t *th = DATA_PTR(thread_val);
5889
5890 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5891 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5892 VM_ASSERT(th->specific_storage);
5893
5894 th->specific_storage[key] = data;
5895}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:970
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2336
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1096
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:879
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:866
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:136
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:395
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:296
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:482
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1337
VALUE rb_eIOError
IOError exception.
Definition io.c:178
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1341
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1344
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3779
VALUE rb_eFatal
fatal exception.
Definition error.c:1340
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:423
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1382
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1336
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:884
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4454
VALUE rb_eSignal
SignalException exception.
Definition error.c:1339
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2058
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:215
VALUE rb_cThread
Thread class.
Definition vm.c:524
VALUE rb_cModule
Module class.
Definition object.c:65
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3638
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:830
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:807
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1782
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3505
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1433
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3508
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2701
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2940
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
Definition thread.c:5258
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1369
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2642
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1401
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2852
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
Definition thread.c:5269
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4733
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1416
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2843
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2796
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
Definition thread.c:5299
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1376
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4728
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2919
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3780
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3656
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1464
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
Definition thread.c:5281
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2805
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1439
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1943
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2881
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1854
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1340
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:283
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1844
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1095
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12047
ID rb_to_id(VALUE str)
Definition string.c:12037
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3690
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:179
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5872
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5885
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5844
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1521
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1830
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1649
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
Definition thread.c:1656
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2254
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5470
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:219
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:383
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:180
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:402
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4267
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:296
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:302
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:284
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:290
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40