Ruby 3.3.2p78 (2024-05-30 revision e5a195edf62fe1bf7146a191da13fa1c4fecbd71)
thread_pthread.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_pthread.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "internal/gc.h"
15#include "rjit.h"
16
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
19#endif
20#ifdef HAVE_THR_STKSEGMENT
21#include <thread.h>
22#endif
23#if defined(HAVE_FCNTL_H)
24#include <fcntl.h>
25#elif defined(HAVE_SYS_FCNTL_H)
26#include <sys/fcntl.h>
27#endif
28#ifdef HAVE_SYS_PRCTL_H
29#include <sys/prctl.h>
30#endif
31#if defined(HAVE_SYS_TIME_H)
32#include <sys/time.h>
33#endif
34#if defined(__HAIKU__)
35#include <kernel/OS.h>
36#endif
37#ifdef __linux__
38#include <sys/syscall.h> /* for SYS_gettid */
39#endif
40#include <time.h>
41#include <signal.h>
42
43#if defined __APPLE__
44# include <AvailabilityMacros.h>
45#endif
46
47#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48# define USE_EVENTFD (1)
49# include <sys/eventfd.h>
50#else
51# define USE_EVENTFD (0)
52#endif
53
54#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
55 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
56 defined(HAVE_CLOCK_GETTIME)
57static pthread_condattr_t condattr_mono;
58static pthread_condattr_t *condattr_monotonic = &condattr_mono;
59#else
60static const void *const condattr_monotonic = NULL;
61#endif
62
63#include COROUTINE_H
64
65#ifndef HAVE_SYS_EVENT_H
66#define HAVE_SYS_EVENT_H 0
67#endif
68
69#ifndef HAVE_SYS_EPOLL_H
70#define HAVE_SYS_EPOLL_H 0
71#else
72// force setting for debug
73// #undef HAVE_SYS_EPOLL_H
74// #define HAVE_SYS_EPOLL_H 0
75#endif
76
77#ifndef USE_MN_THREADS
78 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
79 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
80 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
81 #define USE_MN_THREADS 0
82 #elif HAVE_SYS_EPOLL_H
83 #include <sys/epoll.h>
84 #define USE_MN_THREADS 1
85 #elif HAVE_SYS_EVENT_H
86 #include <sys/event.h>
87 #define USE_MN_THREADS 1
88 #else
89 #define USE_MN_THREADS 0
90 #endif
91#endif
92
93// native thread wrappers
94
95#define NATIVE_MUTEX_LOCK_DEBUG 0
96
97static void
98mutex_debug(const char *msg, void *lock)
99{
100 if (NATIVE_MUTEX_LOCK_DEBUG) {
101 int r;
102 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
103
104 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
105 fprintf(stdout, "%s: %p\n", msg, lock);
106 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
107 }
108}
109
110void
111rb_native_mutex_lock(pthread_mutex_t *lock)
112{
113 int r;
114 mutex_debug("lock", lock);
115 if ((r = pthread_mutex_lock(lock)) != 0) {
116 rb_bug_errno("pthread_mutex_lock", r);
117 }
118}
119
120void
121rb_native_mutex_unlock(pthread_mutex_t *lock)
122{
123 int r;
124 mutex_debug("unlock", lock);
125 if ((r = pthread_mutex_unlock(lock)) != 0) {
126 rb_bug_errno("pthread_mutex_unlock", r);
127 }
128}
129
130int
131rb_native_mutex_trylock(pthread_mutex_t *lock)
132{
133 int r;
134 mutex_debug("trylock", lock);
135 if ((r = pthread_mutex_trylock(lock)) != 0) {
136 if (r == EBUSY) {
137 return EBUSY;
138 }
139 else {
140 rb_bug_errno("pthread_mutex_trylock", r);
141 }
142 }
143 return 0;
144}
145
146void
147rb_native_mutex_initialize(pthread_mutex_t *lock)
148{
149 int r = pthread_mutex_init(lock, 0);
150 mutex_debug("init", lock);
151 if (r != 0) {
152 rb_bug_errno("pthread_mutex_init", r);
153 }
154}
155
156void
157rb_native_mutex_destroy(pthread_mutex_t *lock)
158{
159 int r = pthread_mutex_destroy(lock);
160 mutex_debug("destroy", lock);
161 if (r != 0) {
162 rb_bug_errno("pthread_mutex_destroy", r);
163 }
164}
165
166void
167rb_native_cond_initialize(rb_nativethread_cond_t *cond)
168{
169 int r = pthread_cond_init(cond, condattr_monotonic);
170 if (r != 0) {
171 rb_bug_errno("pthread_cond_init", r);
172 }
173}
174
175void
176rb_native_cond_destroy(rb_nativethread_cond_t *cond)
177{
178 int r = pthread_cond_destroy(cond);
179 if (r != 0) {
180 rb_bug_errno("pthread_cond_destroy", r);
181 }
182}
183
184/*
185 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
186 * EAGAIN after retrying 8192 times. You can see them in the following page:
187 *
188 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
189 *
190 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
191 * need to retrying until pthread functions don't return EAGAIN.
192 */
193
194void
195rb_native_cond_signal(rb_nativethread_cond_t *cond)
196{
197 int r;
198 do {
199 r = pthread_cond_signal(cond);
200 } while (r == EAGAIN);
201 if (r != 0) {
202 rb_bug_errno("pthread_cond_signal", r);
203 }
204}
205
206void
207rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
208{
209 int r;
210 do {
211 r = pthread_cond_broadcast(cond);
212 } while (r == EAGAIN);
213 if (r != 0) {
214 rb_bug_errno("rb_native_cond_broadcast", r);
215 }
216}
217
218void
219rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
220{
221 int r = pthread_cond_wait(cond, mutex);
222 if (r != 0) {
223 rb_bug_errno("pthread_cond_wait", r);
224 }
225}
226
227static int
228native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
229{
230 int r;
231 struct timespec ts;
232
233 /*
234 * An old Linux may return EINTR. Even though POSIX says
235 * "These functions shall not return an error code of [EINTR]".
236 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
237 * Let's hide it from arch generic code.
238 */
239 do {
240 rb_hrtime2timespec(&ts, abs);
241 r = pthread_cond_timedwait(cond, mutex, &ts);
242 } while (r == EINTR);
243
244 if (r != 0 && r != ETIMEDOUT) {
245 rb_bug_errno("pthread_cond_timedwait", r);
246 }
247
248 return r;
249}
250
251static rb_hrtime_t
252native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
253{
254 if (condattr_monotonic) {
255 return rb_hrtime_add(rb_hrtime_now(), rel);
256 }
257 else {
258 struct timespec ts;
259
260 rb_timespec_now(&ts);
261 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
262 }
263}
264
265void
266rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
267{
268 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
269 native_cond_timedwait(cond, mutex, &hrmsec);
270}
271
272// thread scheduling
273
274static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
275static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
276
277#if 0
278static const char *
279event_name(rb_event_flag_t event)
280{
281 switch (event) {
283 return "STARTED";
285 return "READY";
287 return "RESUMED";
289 return "SUSPENDED";
291 return "EXITED";
292 }
293 return "no-event";
294}
295
296#define RB_INTERNAL_THREAD_HOOK(event, th) \
297 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
298 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
299 rb_thread_execute_hooks(event, th); \
300 }
301#else
302#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
303#endif
304
305static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
306
307#if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__)
308# define USE_UBF_LIST 1
309#endif
310
311static void threadptr_trap_interrupt(rb_thread_t *);
312
313#ifdef HAVE_SCHED_YIELD
314#define native_thread_yield() (void)sched_yield()
315#else
316#define native_thread_yield() ((void)0)
317#endif
318
319/* 100ms. 10ms is too small for user level thread scheduling
320 * on recent Linux (tested on 2.6.35)
321 */
322#define TIME_QUANTUM_MSEC (100)
323#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
324#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
325
326static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
327static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
328static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
329
330static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
331static void timer_thread_wakeup(void);
332static void timer_thread_wakeup_locked(rb_vm_t *vm);
333static void timer_thread_wakeup_force(void);
334static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
335
336#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
337
338static bool
339th_has_dedicated_nt(const rb_thread_t *th)
340{
341 // TODO: th->has_dedicated_nt
342 return th->nt->dedicated > 0;
343}
344
346static void
347thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
348{
349 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
350 rb_thread_t *th;
351 int i = 0;
352 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
353 i++; if (i>10) rb_bug("too many");
354 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
355 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
356 th->nt ? (int)th->nt->serial : -1);
357 }
358}
359
360#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
361
363static void
364ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
365{
366 rb_ractor_t *r;
367
368 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
369
370 int i = 0;
371 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
372 i++;
373 if (i>10) rb_bug("!!");
374 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
375 }
376}
377
378#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
379#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
380
381static void
382thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
383{
384 rb_native_mutex_lock(&sched->lock_);
385
386#if VM_CHECK_MODE
387 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
388 VM_ASSERT(sched->lock_owner == NULL);
389 sched->lock_owner = th;
390#else
391 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
392#endif
393}
394
395static void
396thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
397{
398 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
399
400#if VM_CHECK_MODE
401 VM_ASSERT(sched->lock_owner == th);
402 sched->lock_owner = NULL;
403#endif
404
405 rb_native_mutex_unlock(&sched->lock_);
406}
407
408static void
409thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
410{
411 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
412
413#if VM_CHECK_MODE > 0
414 sched->lock_owner = th;
415#endif
416}
417
418static void
419ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
420{
421 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
422
423#if VM_CHECK_MODE
424 if (th) {
425 VM_ASSERT(sched->lock_owner == th);
426 }
427 else {
428 VM_ASSERT(sched->lock_owner != NULL);
429 }
430#endif
431}
432
433#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
434#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
435
437static unsigned int
438rb_ractor_serial(const rb_ractor_t *r) {
439 if (r) {
440 return rb_ractor_id(r);
441 }
442 else {
443 return 0;
444 }
445}
446
447static void
448ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
449{
450#if VM_CHECK_MODE > 0
451 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
452 VM_ASSERT(vm->ractor.sched.locked == false);
453
454 vm->ractor.sched.lock_owner = cr;
455 vm->ractor.sched.locked = true;
456#endif
457}
458
459static void
460ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
461{
462#if VM_CHECK_MODE > 0
463 VM_ASSERT(vm->ractor.sched.locked);
464 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
465
466 vm->ractor.sched.locked = false;
467 vm->ractor.sched.lock_owner = NULL;
468#endif
469}
470
471static void
472ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
473{
474 rb_native_mutex_lock(&vm->ractor.sched.lock);
475
476#if VM_CHECK_MODE
477 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
478#else
479 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
480#endif
481
482 ractor_sched_set_locked(vm, cr);
483}
484
485static void
486ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
487{
488 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
489
490 ractor_sched_set_unlocked(vm, cr);
491 rb_native_mutex_unlock(&vm->ractor.sched.lock);
492}
493
494static void
495ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
496{
497 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
498 VM_ASSERT(vm->ractor.sched.locked);
499 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
500}
501
503static bool
504ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
505{
506 rb_thread_t *rth;
507 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
508 if (rth == th) return true;
509 }
510 return false;
511}
512
514static unsigned int
515ractor_sched_running_threads_size(rb_vm_t *vm)
516{
517 rb_thread_t *th;
518 unsigned int i = 0;
519 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
520 i++;
521 }
522 return i;
523}
524
526static unsigned int
527ractor_sched_timeslice_threads_size(rb_vm_t *vm)
528{
529 rb_thread_t *th;
530 unsigned int i = 0;
531 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
532 i++;
533 }
534 return i;
535}
536
538static bool
539ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
540{
541 rb_thread_t *rth;
542 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
543 if (rth == th) return true;
544 }
545 return false;
546}
547
548static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
549static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
550
551// setup timeslice signals by the timer thread.
552static void
553thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
554 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
555{
556#if USE_RUBY_DEBUG_LOG
557 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
558#endif
559
560 rb_thread_t *del_timeslice_th;
561
562 if (del_th && sched->is_running_timeslice) {
563 del_timeslice_th = del_th;
564 sched->is_running_timeslice = false;
565 }
566 else {
567 del_timeslice_th = NULL;
568 }
569
570 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
571 rb_th_serial(add_th), rb_th_serial(del_th),
572 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
573
574 ractor_sched_lock(vm, cr);
575 {
576 // update running_threads
577 if (del_th) {
578 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
579 VM_ASSERT(del_timeslice_th != NULL ||
580 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
581
582 ccan_list_del_init(&del_th->sched.node.running_threads);
583 vm->ractor.sched.running_cnt--;
584
585 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
586 ractor_sched_barrier_join_signal_locked(vm);
587 }
588 sched->is_running = false;
589 }
590
591 if (add_th) {
592 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
593 RUBY_DEBUG_LOG("barrier-wait");
594
595 ractor_sched_barrier_join_signal_locked(vm);
596 ractor_sched_barrier_join_wait_locked(vm, add_th);
597 }
598
599 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
600 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
601
602 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
603 vm->ractor.sched.running_cnt++;
604 sched->is_running = true;
605 }
606
607 if (add_timeslice_th) {
608 // update timeslice threads
609 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
610 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
611 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
612 sched->is_running_timeslice = true;
613 if (was_empty) {
614 timer_thread_wakeup_locked(vm);
615 }
616 }
617
618 if (del_timeslice_th) {
619 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
620 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
621 }
622
623 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
624 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
625 }
626 ractor_sched_unlock(vm, cr);
627
628 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
629 // it can be after barrier synchronization by another ractor
630 rb_thread_t *lock_owner = NULL;
631#if VM_CHECK_MODE
632 lock_owner = sched->lock_owner;
633#endif
634 thread_sched_unlock(sched, lock_owner);
635 {
636 RB_VM_LOCK_ENTER();
637 RB_VM_LOCK_LEAVE();
638 }
639 thread_sched_lock(sched, lock_owner);
640 }
641
642 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
643 // rb_th_serial(add_th), rb_th_serial(del_th),
644 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
645 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
646}
647
648static void
649thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
650{
651 ASSERT_thread_sched_locked(sched, th);
652 VM_ASSERT(sched->running == th);
653
654 rb_vm_t *vm = th->vm;
655 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
656}
657
658static void
659thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
660{
661 ASSERT_thread_sched_locked(sched, th);
662
663 rb_vm_t *vm = th->vm;
664 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
665}
666
667void
668rb_add_running_thread(rb_thread_t *th)
669{
670 struct rb_thread_sched *sched = TH_SCHED(th);
671
672 thread_sched_lock(sched, th);
673 {
674 thread_sched_add_running_thread(sched, th);
675 }
676 thread_sched_unlock(sched, th);
677}
678
679void
680rb_del_running_thread(rb_thread_t *th)
681{
682 struct rb_thread_sched *sched = TH_SCHED(th);
683
684 thread_sched_lock(sched, th);
685 {
686 thread_sched_del_running_thread(sched, th);
687 }
688 thread_sched_unlock(sched, th);
689}
690
691// setup current or next running thread
692// sched->running should be set only on this function.
693//
694// if th is NULL, there is no running threads.
695static void
696thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
697{
698 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
699 VM_ASSERT(sched->running != th);
700
701 sched->running = th;
702}
703
705static bool
706thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
707{
708 rb_thread_t *rth;
709 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
710 if (rth == th) return true;
711 }
712 return false;
713}
714
715// deque thread from the ready queue.
716// if the ready queue is empty, return NULL.
717//
718// return deque'ed running thread (or NULL).
719static rb_thread_t *
720thread_sched_deq(struct rb_thread_sched *sched)
721{
722 ASSERT_thread_sched_locked(sched, NULL);
723 rb_thread_t *next_th;
724
725 VM_ASSERT(sched->running != NULL);
726
727 if (ccan_list_empty(&sched->readyq)) {
728 next_th = NULL;
729 }
730 else {
731 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
732
733 VM_ASSERT(sched->readyq_cnt > 0);
734 sched->readyq_cnt--;
735 ccan_list_node_init(&next_th->sched.node.readyq);
736 }
737
738 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
739
740 return next_th;
741}
742
743// enqueue ready thread to the ready queue.
744static void
745thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
746{
747 ASSERT_thread_sched_locked(sched, NULL);
748 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
749
750 VM_ASSERT(sched->running != NULL);
751 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
752
753 if (sched->is_running) {
754 if (ccan_list_empty(&sched->readyq)) {
755 // add sched->running to timeslice
756 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
757 }
758 }
759 else {
760 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
761 }
762
763 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
764 sched->readyq_cnt++;
765}
766
767// DNT: kick condvar
768// SNT: TODO
769static void
770thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
771{
772 ASSERT_thread_sched_locked(sched, NULL);
773 VM_ASSERT(sched->running == next_th);
774
775 if (next_th) {
776 if (next_th->nt) {
777 if (th_has_dedicated_nt(next_th)) {
778 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
779 rb_native_cond_signal(&next_th->nt->cond.readyq);
780 }
781 else {
782 // TODO
783 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
784 }
785 }
786 else {
787 if (will_switch) {
788 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
789 }
790 else {
791 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
792 ractor_sched_enq(next_th->vm, next_th->ractor);
793 }
794 }
795 }
796 else {
797 RUBY_DEBUG_LOG("no waiting threads%s", "");
798 }
799}
800
801// waiting -> ready (locked)
802static void
803thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
804{
805 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
806
807 VM_ASSERT(sched->running != th);
808 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
809 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
810
811 if (sched->running == NULL) {
812 thread_sched_set_running(sched, th);
813 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
814 }
815 else {
816 thread_sched_enq(sched, th);
817 }
818}
819
820// waiting -> ready
821//
822// `th` had became "waiting" state by `thread_sched_to_waiting`
823// and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
825static void
826thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
827{
828 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
829
830 thread_sched_lock(sched, th);
831 {
832 thread_sched_to_ready_common(sched, th, true, false);
833 }
834 thread_sched_unlock(sched, th);
835}
836
837// wait until sched->running is `th`.
838static void
839thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
840{
841 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
842
843 ASSERT_thread_sched_locked(sched, th);
844 VM_ASSERT(th == GET_THREAD());
845
846 if (th != sched->running) {
847 // already deleted from running threads
848 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
849
850 // wait for execution right
851 rb_thread_t *next_th;
852 while((next_th = sched->running) != th) {
853 if (th_has_dedicated_nt(th)) {
854 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
855
856 thread_sched_set_lock_owner(sched, NULL);
857 {
858 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
859 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
860 }
861 thread_sched_set_lock_owner(sched, th);
862
863 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
864 if (th == sched->running) {
865 rb_ractor_thread_switch(th->ractor, th);
866 }
867 }
868 else {
869 // search another ready thread
870 if (can_direct_transfer &&
871 (next_th = sched->running) != NULL &&
872 !next_th->nt // next_th is running or has dedicated nt
873 ) {
874
875 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
876
877 thread_sched_set_lock_owner(sched, NULL);
878 {
879 rb_ractor_set_current_ec(th->ractor, NULL);
880 thread_sched_switch(th, next_th);
881 }
882 thread_sched_set_lock_owner(sched, th);
883 }
884 else {
885 // search another ready ractor
886 struct rb_native_thread *nt = th->nt;
887 native_thread_assign(NULL, th);
888
889 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
890
891 thread_sched_set_lock_owner(sched, NULL);
892 {
893 rb_ractor_set_current_ec(th->ractor, NULL);
894 coroutine_transfer(th->sched.context, nt->nt_context);
895 }
896 thread_sched_set_lock_owner(sched, th);
897 }
898
899 VM_ASSERT(GET_EC() == th->ec);
900 }
901 }
902
903 VM_ASSERT(th->nt != NULL);
904 VM_ASSERT(GET_EC() == th->ec);
905 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
906
907 // add th to running threads
908 thread_sched_add_running_thread(sched, th);
909 }
910
911 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
912 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
913}
914
915// waiting -> ready -> running (locked)
916static void
917thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
918{
919 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
920
921 VM_ASSERT(sched->running != th);
922 VM_ASSERT(th_has_dedicated_nt(th));
923 VM_ASSERT(GET_THREAD() == th);
924
925 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
926
927 // waiting -> ready
928 thread_sched_to_ready_common(sched, th, false, false);
929
930 if (sched->running == th) {
931 thread_sched_add_running_thread(sched, th);
932 }
933
934 // TODO: check SNT number
935 thread_sched_wait_running_turn(sched, th, false);
936}
937
938// waiting -> ready -> running
939//
940// `th` had been waiting by `thread_sched_to_waiting()`
941// and run a dedicated task (like waitpid and so on).
942// After the dedicated task, this function is called
943// to join a normal thread-scheduling.
944static void
945thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
946{
947 thread_sched_lock(sched, th);
948 {
949 thread_sched_to_running_common(sched, th);
950 }
951 thread_sched_unlock(sched, th);
952}
953
954// resume a next thread in the thread ready queue.
955//
956// deque next running thread from the ready thread queue and
957// resume this thread if available.
958//
959// If the next therad has a dedicated native thraed, simply signal to resume.
960// Otherwise, make the ractor ready and other nt will run the ractor and the thread.
961static void
962thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
963{
964 ASSERT_thread_sched_locked(sched, th);
965
966 VM_ASSERT(sched->running == th);
967 VM_ASSERT(sched->running->nt != NULL);
968
969 rb_thread_t *next_th = thread_sched_deq(sched);
970
971 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
972 VM_ASSERT(th != next_th);
973
974 thread_sched_set_running(sched, next_th);
975 VM_ASSERT(next_th == sched->running);
976 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
977
978 if (th != next_th) {
979 thread_sched_del_running_thread(sched, th);
980 }
981}
982
983// running -> waiting
984//
985// to_dead: false
986// th will run dedicated task.
987// run another ready thread.
988// to_dead: true
989// th will be dead.
990// run another ready thread.
991static void
992thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
993{
994 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
995
996 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
997
998 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
999
1000 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
1001 thread_sched_wakeup_next_thread(sched, th, can_switch);
1002}
1003
1004// running -> dead (locked)
1005static void
1006thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1007{
1008 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1009 thread_sched_to_waiting_common0(sched, th, true);
1010 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1011}
1012
1013// running -> dead
1014static void
1015thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1016{
1017 thread_sched_lock(sched, th);
1018 {
1019 thread_sched_to_dead_common(sched, th);
1020 }
1021 thread_sched_unlock(sched, th);
1022}
1023
1024// running -> waiting (locked)
1025//
1026// This thread will run dedicated task (th->nt->dedicated++).
1027static void
1028thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1029{
1030 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1031 thread_sched_to_waiting_common0(sched, th, false);
1032}
1033
1034// running -> waiting
1035//
1036// This thread will run a dedicated task.
1037static void
1038thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1039{
1040 thread_sched_lock(sched, th);
1041 {
1042 thread_sched_to_waiting_common(sched, th);
1043 }
1044 thread_sched_unlock(sched, th);
1045}
1046
1047// mini utility func
1048static void
1049setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1050{
1051 rb_native_mutex_lock(&th->interrupt_lock);
1052 {
1053 th->unblock.func = func;
1054 th->unblock.arg = arg;
1055 }
1056 rb_native_mutex_unlock(&th->interrupt_lock);
1057}
1058
1059static void
1060ubf_waiting(void *ptr)
1061{
1062 rb_thread_t *th = (rb_thread_t *)ptr;
1063 struct rb_thread_sched *sched = TH_SCHED(th);
1064
1065 // only once. it is safe because th->interrupt_lock is already acquired.
1066 th->unblock.func = NULL;
1067 th->unblock.arg = NULL;
1068
1069 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1070
1071 thread_sched_lock(sched, th);
1072 {
1073 if (sched->running == th) {
1074 // not sleeping yet.
1075 }
1076 else {
1077 thread_sched_to_ready_common(sched, th, true, false);
1078 }
1079 }
1080 thread_sched_unlock(sched, th);
1081}
1082
1083// running -> waiting
1084//
1085// This thread will sleep until other thread wakeup the thread.
1086static void
1087thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1088{
1089 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1090
1091 RB_VM_SAVE_MACHINE_CONTEXT(th);
1092 setup_ubf(th, ubf_waiting, (void *)th);
1093
1094 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1095
1096 thread_sched_lock(sched, th);
1097 {
1098 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1099 bool can_direct_transfer = !th_has_dedicated_nt(th);
1100 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1101 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1102 }
1103 else {
1104 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1105 }
1106 }
1107 thread_sched_unlock(sched, th);
1108
1109 setup_ubf(th, NULL, NULL);
1110}
1111
1112// run another thread in the ready queue.
1113// continue to run if there are no ready threads.
1114static void
1115thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1116{
1117 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1118
1119 thread_sched_lock(sched, th);
1120 {
1121 if (!ccan_list_empty(&sched->readyq)) {
1122 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1123 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1124 bool can_direct_transfer = !th_has_dedicated_nt(th);
1125 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1126 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1127 }
1128 else {
1129 VM_ASSERT(sched->readyq_cnt == 0);
1130 }
1131 }
1132 thread_sched_unlock(sched, th);
1133}
1134
1135void
1136rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1137{
1138 rb_native_mutex_initialize(&sched->lock_);
1139
1140#if VM_CHECK_MODE
1141 sched->lock_owner = NULL;
1142#endif
1143
1144 ccan_list_head_init(&sched->readyq);
1145 sched->readyq_cnt = 0;
1146
1147#if USE_MN_THREADS
1148 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1149#endif
1150}
1151
1152static void
1153thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt)
1154{
1155 VM_ASSERT(!nt->dedicated);
1156 VM_ASSERT(next_th->nt == NULL);
1157
1158 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1159
1160 ruby_thread_set_native(next_th);
1161 native_thread_assign(nt, next_th);
1162 coroutine_transfer(current_cont, next_th->sched.context);
1163}
1164
1165static void
1166thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1167{
1168 struct rb_native_thread *nt = cth->nt;
1169 native_thread_assign(NULL, cth);
1170 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1171 thread_sched_switch0(cth->sched.context, next_th, nt);
1172}
1173
1174#if VM_CHECK_MODE > 0
1176static unsigned int
1177grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1178{
1179 ASSERT_ractor_sched_locked(vm, cr);
1180
1181 rb_ractor_t *r, *prev_r = NULL;
1182 unsigned int i = 0;
1183
1184 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1185 i++;
1186
1187 VM_ASSERT(r != prev_r);
1188 prev_r = r;
1189 }
1190 return i;
1191}
1192#endif
1193
1194static void
1195ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1196{
1197 struct rb_thread_sched *sched = &r->threads.sched;
1198 rb_ractor_t *cr = NULL; // timer thread can call this function
1199
1200 VM_ASSERT(sched->running != NULL);
1201 VM_ASSERT(sched->running->nt == NULL);
1202
1203 ractor_sched_lock(vm, cr);
1204 {
1205#if VM_CHECK_MODE > 0
1206 // check if grq contains r
1207 rb_ractor_t *tr;
1208 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1209 VM_ASSERT(r != tr);
1210 }
1211#endif
1212
1213 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1214 vm->ractor.sched.grq_cnt++;
1215 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1216
1217 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1218
1219 rb_native_cond_signal(&vm->ractor.sched.cond);
1220
1221 // ractor_sched_dump(vm);
1222 }
1223 ractor_sched_unlock(vm, cr);
1224}
1225
1226
1227#ifndef SNT_KEEP_SECONDS
1228#define SNT_KEEP_SECONDS 0
1229#endif
1230
1231#ifndef MINIMUM_SNT
1232// make at least MINIMUM_SNT snts for debug.
1233#define MINIMUM_SNT 0
1234#endif
1235
1236static rb_ractor_t *
1237ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1238{
1239 rb_ractor_t *r;
1240
1241 ractor_sched_lock(vm, cr);
1242 {
1243 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1244 // ractor_sched_dump(vm);
1245
1246 VM_ASSERT(rb_current_execution_context(false) == NULL);
1247 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1248
1249 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1250 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1251
1252#if SNT_KEEP_SECONDS > 0
1253 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1254 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1255 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1256 VM_ASSERT(r == NULL);
1257 vm->ractor.sched.snt_cnt--;
1258 vm->ractor.sched.running_cnt--;
1259 break;
1260 }
1261 else {
1262 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1263 }
1264#else
1265 ractor_sched_set_unlocked(vm, cr);
1266 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1267 ractor_sched_set_locked(vm, cr);
1268
1269 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1270#endif
1271 }
1272
1273 VM_ASSERT(rb_current_execution_context(false) == NULL);
1274
1275 if (r) {
1276 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1277 vm->ractor.sched.grq_cnt--;
1278 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1279 }
1280 else {
1281 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1282 // timeout
1283 }
1284 }
1285 ractor_sched_unlock(vm, cr);
1286
1287 return r;
1288}
1289
1290void rb_ractor_lock_self(rb_ractor_t *r);
1291void rb_ractor_unlock_self(rb_ractor_t *r);
1292
1293void
1294rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1295{
1296 // ractor lock of cr is acquired
1297 // r is sleeping statuss
1298 rb_thread_t *th = rb_ec_thread_ptr(ec);
1299 struct rb_thread_sched *sched = TH_SCHED(th);
1300 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1301
1302 setup_ubf(th, ubf, (void *)cr);
1303
1304 thread_sched_lock(sched, th);
1305 {
1306 rb_ractor_unlock_self(cr);
1307 {
1308 if (RUBY_VM_INTERRUPTED(th->ec)) {
1309 RUBY_DEBUG_LOG("interrupted");
1310 }
1311 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1312 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1313 }
1314 else {
1315 // sleep
1316 RB_VM_SAVE_MACHINE_CONTEXT(th);
1317 th->status = THREAD_STOPPED_FOREVER;
1318
1319 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1320
1321 bool can_direct_transfer = !th_has_dedicated_nt(th);
1322 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1323 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1324 th->status = THREAD_RUNNABLE;
1325 // wakeup
1326 }
1327 }
1328 }
1329 thread_sched_unlock(sched, th);
1330
1331 setup_ubf(th, NULL, NULL);
1332
1333 rb_ractor_lock_self(cr);
1334 cr->sync.wait.waiting_thread = NULL;
1335}
1336
1337void
1338rb_ractor_sched_wakeup(rb_ractor_t *r)
1339{
1340 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1341 // ractor lock of r is acquired
1342 struct rb_thread_sched *sched = TH_SCHED(r_th);
1343
1344 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1345
1346 thread_sched_lock(sched, r_th);
1347 {
1348 if (r_th->status == THREAD_STOPPED_FOREVER) {
1349 thread_sched_to_ready_common(sched, r_th, true, false);
1350 }
1351 }
1352 thread_sched_unlock(sched, r_th);
1353}
1354
1355static bool
1356ractor_sched_barrier_completed_p(rb_vm_t *vm)
1357{
1358 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1359 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1360 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1361}
1362
1363void
1364rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1365{
1366 VM_ASSERT(cr == GET_RACTOR());
1367 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1368 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1369 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1370
1371 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1372
1373 unsigned int lock_rec;
1374
1375 ractor_sched_lock(vm, cr);
1376 {
1377 vm->ractor.sched.barrier_waiting = true;
1378
1379 // release VM lock
1380 lock_rec = vm->ractor.sync.lock_rec;
1381 vm->ractor.sync.lock_rec = 0;
1382 vm->ractor.sync.lock_owner = NULL;
1383 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1384 {
1385 // interrupts all running threads
1386 rb_thread_t *ith;
1387 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1388 if (ith->ractor != cr) {
1389 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1390 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1391 }
1392 }
1393
1394 // wait for other ractors
1395 while (!ractor_sched_barrier_completed_p(vm)) {
1396 ractor_sched_set_unlocked(vm, cr);
1397 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1398 ractor_sched_set_locked(vm, cr);
1399 }
1400 }
1401 }
1402 ractor_sched_unlock(vm, cr);
1403
1404 // acquire VM lock
1405 rb_native_mutex_lock(&vm->ractor.sync.lock);
1406 vm->ractor.sync.lock_rec = lock_rec;
1407 vm->ractor.sync.lock_owner = cr;
1408
1409 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1410
1411 ractor_sched_lock(vm, cr);
1412 {
1413 vm->ractor.sched.barrier_waiting = false;
1414 vm->ractor.sched.barrier_serial++;
1415 vm->ractor.sched.barrier_waiting_cnt = 0;
1416 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1417 }
1418 ractor_sched_unlock(vm, cr);
1419}
1420
1421static void
1422ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1423{
1424 if (ractor_sched_barrier_completed_p(vm)) {
1425 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1426 }
1427}
1428
1429static void
1430ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1431{
1432 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1433
1434 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1435
1436 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1437 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1438 RB_VM_SAVE_MACHINE_CONTEXT(th);
1439
1440 rb_ractor_t *cr = th->ractor;
1441 ractor_sched_set_unlocked(vm, cr);
1442 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1443 ractor_sched_set_locked(vm, cr);
1444
1445 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1446 }
1447}
1448
1449void
1450rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1451{
1452 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1453 VM_ASSERT(cr == GET_RACTOR());
1454 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1455 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1456
1457#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1458 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1459#endif
1460
1461 RUBY_DEBUG_LOG("join");
1462
1463 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1464 {
1465 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1466 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1467
1468 ractor_sched_lock(vm, cr);
1469 {
1470 // running_cnt
1471 vm->ractor.sched.barrier_waiting_cnt++;
1472 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1473
1474 ractor_sched_barrier_join_signal_locked(vm);
1475 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1476 }
1477 ractor_sched_unlock(vm, cr);
1478 }
1479
1480 rb_native_mutex_lock(&vm->ractor.sync.lock);
1481 // VM locked here
1482}
1483
1484#if 0
1485// TODO
1486
1487static void clear_thread_cache_altstack(void);
1488
1489static void
1490rb_thread_sched_destroy(struct rb_thread_sched *sched)
1491{
1492 /*
1493 * only called once at VM shutdown (not atfork), another thread
1494 * may still grab vm->gvl.lock when calling gvl_release at
1495 * the end of thread_start_func_2
1496 */
1497 if (0) {
1498 rb_native_mutex_destroy(&sched->lock);
1499 }
1500 clear_thread_cache_altstack();
1501}
1502#endif
1503
1504#ifdef RB_THREAD_T_HAS_NATIVE_ID
1505static int
1506get_native_thread_id(void)
1507{
1508#ifdef __linux__
1509 return (int)syscall(SYS_gettid);
1510#elif defined(__FreeBSD__)
1511 return pthread_getthreadid_np();
1512#endif
1513}
1514#endif
1515
1516#if defined(HAVE_WORKING_FORK)
1517static void
1518thread_sched_atfork(struct rb_thread_sched *sched)
1519{
1520 current_fork_gen++;
1521 rb_thread_sched_init(sched, true);
1522 rb_thread_t *th = GET_THREAD();
1523 rb_vm_t *vm = GET_VM();
1524
1525 if (th_has_dedicated_nt(th)) {
1526 vm->ractor.sched.snt_cnt = 0;
1527 }
1528 else {
1529 vm->ractor.sched.snt_cnt = 1;
1530 }
1531 vm->ractor.sched.running_cnt = 0;
1532
1533 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1534 rb_native_cond_initialize(&vm->ractor.sched.cond);
1535 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1536 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1537
1538 ccan_list_head_init(&vm->ractor.sched.grq);
1539 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1540 ccan_list_head_init(&vm->ractor.sched.running_threads);
1541
1542 VM_ASSERT(sched->is_running);
1543 sched->is_running_timeslice = false;
1544
1545 if (sched->running != th) {
1546 thread_sched_to_running(sched, th);
1547 }
1548 else {
1549 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1550 }
1551
1552#ifdef RB_THREAD_T_HAS_NATIVE_ID
1553 if (th->nt) {
1554 th->nt->tid = get_native_thread_id();
1555 }
1556#endif
1557}
1558
1559#endif
1560
1561#ifdef RB_THREAD_LOCAL_SPECIFIER
1562static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1563#else
1564static pthread_key_t ruby_native_thread_key;
1565#endif
1566
1567static void
1568null_func(int i)
1569{
1570 /* null */
1571 // This function can be called from signal handler
1572 // RUBY_DEBUG_LOG("i:%d", i);
1573}
1574
1576ruby_thread_from_native(void)
1577{
1578#ifdef RB_THREAD_LOCAL_SPECIFIER
1579 return ruby_native_thread;
1580#else
1581 return pthread_getspecific(ruby_native_thread_key);
1582#endif
1583}
1584
1585int
1586ruby_thread_set_native(rb_thread_t *th)
1587{
1588 if (th) {
1589#ifdef USE_UBF_LIST
1590 ccan_list_node_init(&th->sched.node.ubf);
1591#endif
1592 }
1593
1594 // setup TLS
1595
1596 if (th && th->ec) {
1597 rb_ractor_set_current_ec(th->ractor, th->ec);
1598 }
1599#ifdef RB_THREAD_LOCAL_SPECIFIER
1600 ruby_native_thread = th;
1601 return 1;
1602#else
1603 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1604#endif
1605}
1606
1607static void native_thread_setup(struct rb_native_thread *nt);
1608static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1609
1610void
1611Init_native_thread(rb_thread_t *main_th)
1612{
1613#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1614 if (condattr_monotonic) {
1615 int r = pthread_condattr_init(condattr_monotonic);
1616 if (r == 0) {
1617 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1618 }
1619 if (r) condattr_monotonic = NULL;
1620 }
1621#endif
1622
1623#ifndef RB_THREAD_LOCAL_SPECIFIER
1624 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1625 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1626 }
1627 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1628 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1629 }
1630#endif
1631 ruby_posix_signal(SIGVTALRM, null_func);
1632
1633 // setup vm
1634 rb_vm_t *vm = main_th->vm;
1635 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1636 rb_native_cond_initialize(&vm->ractor.sched.cond);
1637 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1638 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1639
1640 ccan_list_head_init(&vm->ractor.sched.grq);
1641 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1642 ccan_list_head_init(&vm->ractor.sched.running_threads);
1643
1644 // setup main thread
1645 main_th->nt->thread_id = pthread_self();
1646 main_th->nt->serial = 1;
1647#ifdef RUBY_NT_SERIAL
1648 ruby_nt_serial = 1;
1649#endif
1650 ruby_thread_set_native(main_th);
1651 native_thread_setup(main_th->nt);
1652 native_thread_setup_on_thread(main_th->nt);
1653
1654 TH_SCHED(main_th)->running = main_th;
1655 main_th->has_dedicated_nt = 1;
1656
1657 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1658
1659 // setup main NT
1660 main_th->nt->dedicated = 1;
1661 main_th->nt->vm = vm;
1662
1663 // setup mn
1664 vm->ractor.sched.dnt_cnt = 1;
1665}
1666
1667extern int ruby_mn_threads_enabled;
1668
1669void
1670ruby_mn_threads_params(void)
1671{
1672 rb_vm_t *vm = GET_VM();
1673 rb_ractor_t *main_ractor = GET_RACTOR();
1674
1675 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1676 bool enable_mn_threads = false;
1677
1678 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1679 // enabled
1680 ruby_mn_threads_enabled = 1;
1681 }
1682 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1683
1684 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1685 const int default_max_cpu = 8; // TODO: CPU num?
1686 int max_cpu = default_max_cpu;
1687
1688 if (USE_MN_THREADS && max_cpu_cstr && (max_cpu = atoi(max_cpu_cstr)) > 0) {
1689 max_cpu = default_max_cpu;
1690 }
1691
1692 vm->ractor.sched.max_cpu = max_cpu;
1693}
1694
1695static void
1696native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1697{
1698 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1699
1700 if (nt->dedicated == 0) {
1701 ractor_sched_lock(vm, cr);
1702 {
1703 vm->ractor.sched.snt_cnt--;
1704 vm->ractor.sched.dnt_cnt++;
1705 }
1706 ractor_sched_unlock(vm, cr);
1707 }
1708
1709 nt->dedicated++;
1710}
1711
1712static void
1713native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1714{
1715 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1716 VM_ASSERT(nt->dedicated > 0);
1717 nt->dedicated--;
1718
1719 if (nt->dedicated == 0) {
1720 ractor_sched_lock(vm, cr);
1721 {
1722 nt->vm->ractor.sched.snt_cnt++;
1723 nt->vm->ractor.sched.dnt_cnt--;
1724 }
1725 ractor_sched_unlock(vm, cr);
1726 }
1727}
1728
1729static void
1730native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1731{
1732#if USE_RUBY_DEBUG_LOG
1733 if (nt) {
1734 if (th->nt) {
1735 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1736 }
1737 else {
1738 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1739 }
1740 }
1741 else {
1742 if (th->nt) {
1743 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1744 }
1745 else {
1746 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1747 }
1748 }
1749#endif
1750
1751 th->nt = nt;
1752}
1753
1754static void
1755native_thread_destroy(struct rb_native_thread *nt)
1756{
1757 if (nt) {
1758 rb_native_cond_destroy(&nt->cond.readyq);
1759
1760 if (&nt->cond.readyq != &nt->cond.intr) {
1761 rb_native_cond_destroy(&nt->cond.intr);
1762 }
1763
1764 RB_ALTSTACK_FREE(nt->altstack);
1765 ruby_xfree(nt->nt_context);
1766 ruby_xfree(nt);
1767 }
1768}
1769
1770#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1771#define STACKADDR_AVAILABLE 1
1772#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1773#define STACKADDR_AVAILABLE 1
1774#undef MAINSTACKADDR_AVAILABLE
1775#define MAINSTACKADDR_AVAILABLE 1
1776void *pthread_get_stackaddr_np(pthread_t);
1777size_t pthread_get_stacksize_np(pthread_t);
1778#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1779#define STACKADDR_AVAILABLE 1
1780#elif defined HAVE_PTHREAD_GETTHRDS_NP
1781#define STACKADDR_AVAILABLE 1
1782#elif defined __HAIKU__
1783#define STACKADDR_AVAILABLE 1
1784#endif
1785
1786#ifndef MAINSTACKADDR_AVAILABLE
1787# ifdef STACKADDR_AVAILABLE
1788# define MAINSTACKADDR_AVAILABLE 1
1789# else
1790# define MAINSTACKADDR_AVAILABLE 0
1791# endif
1792#endif
1793#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1794# define get_main_stack(addr, size) get_stack(addr, size)
1795#endif
1796
1797#ifdef STACKADDR_AVAILABLE
1798/*
1799 * Get the initial address and size of current thread's stack
1800 */
1801static int
1802get_stack(void **addr, size_t *size)
1803{
1804#define CHECK_ERR(expr) \
1805 {int err = (expr); if (err) return err;}
1806#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1807 pthread_attr_t attr;
1808 size_t guard = 0;
1809 STACK_GROW_DIR_DETECTION;
1810 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1811# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1812 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1813 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1814# else
1815 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1816 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1817# endif
1818# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1819 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1820# else
1821 guard = getpagesize();
1822# endif
1823 *size -= guard;
1824 pthread_attr_destroy(&attr);
1825#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1826 pthread_attr_t attr;
1827 CHECK_ERR(pthread_attr_init(&attr));
1828 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1829# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1830 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1831# else
1832 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1833 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1834# endif
1835 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1836 pthread_attr_destroy(&attr);
1837#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1838 pthread_t th = pthread_self();
1839 *addr = pthread_get_stackaddr_np(th);
1840 *size = pthread_get_stacksize_np(th);
1841#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1842 stack_t stk;
1843# if defined HAVE_THR_STKSEGMENT /* Solaris */
1844 CHECK_ERR(thr_stksegment(&stk));
1845# else /* OpenBSD */
1846 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1847# endif
1848 *addr = stk.ss_sp;
1849 *size = stk.ss_size;
1850#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1851 pthread_t th = pthread_self();
1852 struct __pthrdsinfo thinfo;
1853 char reg[256];
1854 int regsiz=sizeof(reg);
1855 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1856 &thinfo, sizeof(thinfo),
1857 &reg, &regsiz));
1858 *addr = thinfo.__pi_stackaddr;
1859 /* Must not use thinfo.__pi_stacksize for size.
1860 It is around 3KB smaller than the correct size
1861 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1862 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1863 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1864#elif defined __HAIKU__
1865 thread_info info;
1866 STACK_GROW_DIR_DETECTION;
1867 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1868 *addr = info.stack_base;
1869 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1870 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1871#else
1872#error STACKADDR_AVAILABLE is defined but not implemented.
1873#endif
1874 return 0;
1875#undef CHECK_ERR
1876}
1877#endif
1878
1879static struct {
1880 rb_nativethread_id_t id;
1881 size_t stack_maxsize;
1882 VALUE *stack_start;
1883} native_main_thread;
1884
1885#ifdef STACK_END_ADDRESS
1886extern void *STACK_END_ADDRESS;
1887#endif
1888
1889enum {
1890 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1891 RUBY_STACK_SPACE_RATIO = 5
1892};
1893
1894static size_t
1895space_size(size_t stack_size)
1896{
1897 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1898 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1899 return RUBY_STACK_SPACE_LIMIT;
1900 }
1901 else {
1902 return space_size;
1903 }
1904}
1905
1906#ifdef __linux__
1907static __attribute__((noinline)) void
1908reserve_stack(volatile char *limit, size_t size)
1909{
1910# ifdef C_ALLOCA
1911# error needs alloca()
1912# endif
1913 struct rlimit rl;
1914 volatile char buf[0x100];
1915 enum {stack_check_margin = 0x1000}; /* for -fstack-check */
1916
1917 STACK_GROW_DIR_DETECTION;
1918
1919 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
1920 return;
1921
1922 if (size < stack_check_margin) return;
1923 size -= stack_check_margin;
1924
1925 size -= sizeof(buf); /* margin */
1926 if (IS_STACK_DIR_UPPER()) {
1927 const volatile char *end = buf + sizeof(buf);
1928 limit += size;
1929 if (limit > end) {
1930 /* |<-bottom (=limit(a)) top->|
1931 * | .. |<-buf 256B |<-end | stack check |
1932 * | 256B | =size= | margin (4KB)|
1933 * | =size= limit(b)->| 256B | |
1934 * | | alloca(sz) | | |
1935 * | .. |<-buf |<-limit(c) [sz-1]->0> | |
1936 */
1937 size_t sz = limit - end;
1938 limit = alloca(sz);
1939 limit[sz-1] = 0;
1940 }
1941 }
1942 else {
1943 limit -= size;
1944 if (buf > limit) {
1945 /* |<-top (=limit(a)) bottom->|
1946 * | .. | 256B buf->| | stack check |
1947 * | 256B | =size= | margin (4KB)|
1948 * | =size= limit(b)->| 256B | |
1949 * | | alloca(sz) | | |
1950 * | .. | buf->| limit(c)-><0> | |
1951 */
1952 size_t sz = buf - limit;
1953 limit = alloca(sz);
1954 limit[0] = 0;
1955 }
1956 }
1957}
1958#else
1959# define reserve_stack(limit, size) ((void)(limit), (void)(size))
1960#endif
1961
1962#undef ruby_init_stack
1963void
1964ruby_init_stack(volatile VALUE *addr)
1965{
1966 native_main_thread.id = pthread_self();
1967
1968#if MAINSTACKADDR_AVAILABLE
1969 if (native_main_thread.stack_maxsize) return;
1970 {
1971 void* stackaddr;
1972 size_t size;
1973 if (get_main_stack(&stackaddr, &size) == 0) {
1974 native_main_thread.stack_maxsize = size;
1975 native_main_thread.stack_start = stackaddr;
1976 reserve_stack(stackaddr, size);
1977 goto bound_check;
1978 }
1979 }
1980#endif
1981#ifdef STACK_END_ADDRESS
1982 native_main_thread.stack_start = STACK_END_ADDRESS;
1983#else
1984 if (!native_main_thread.stack_start ||
1985 STACK_UPPER((VALUE *)(void *)&addr,
1986 native_main_thread.stack_start > addr,
1987 native_main_thread.stack_start < addr)) {
1988 native_main_thread.stack_start = (VALUE *)addr;
1989 }
1990#endif
1991 {
1992#if defined(HAVE_GETRLIMIT)
1993#if defined(PTHREAD_STACK_DEFAULT)
1994# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1995# error "PTHREAD_STACK_DEFAULT is too small"
1996# endif
1997 size_t size = PTHREAD_STACK_DEFAULT;
1998#else
1999 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
2000#endif
2001 size_t space;
2002 int pagesize = getpagesize();
2003 struct rlimit rlim;
2004 STACK_GROW_DIR_DETECTION;
2005 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
2006 size = (size_t)rlim.rlim_cur;
2007 }
2008 addr = native_main_thread.stack_start;
2009 if (IS_STACK_DIR_UPPER()) {
2010 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
2011 }
2012 else {
2013 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
2014 }
2015 native_main_thread.stack_maxsize = space;
2016#endif
2017 }
2018
2019#if MAINSTACKADDR_AVAILABLE
2020 bound_check:
2021#endif
2022 /* If addr is out of range of main-thread stack range estimation, */
2023 /* it should be on co-routine (alternative stack). [Feature #2294] */
2024 {
2025 void *start, *end;
2026 STACK_GROW_DIR_DETECTION;
2027
2028 if (IS_STACK_DIR_UPPER()) {
2029 start = native_main_thread.stack_start;
2030 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2031 }
2032 else {
2033 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2034 end = native_main_thread.stack_start;
2035 }
2036
2037 if ((void *)addr < start || (void *)addr > end) {
2038 /* out of range */
2039 native_main_thread.stack_start = (VALUE *)addr;
2040 native_main_thread.stack_maxsize = 0; /* unknown */
2041 }
2042 }
2043}
2044
2045#define CHECK_ERR(expr) \
2046 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2047
2048static int
2049native_thread_init_stack(rb_thread_t *th)
2050{
2051 rb_nativethread_id_t curr = pthread_self();
2052
2053 if (pthread_equal(curr, native_main_thread.id)) {
2054 th->ec->machine.stack_start = native_main_thread.stack_start;
2055 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2056 }
2057 else {
2058#ifdef STACKADDR_AVAILABLE
2059 if (th_has_dedicated_nt(th)) {
2060 void *start;
2061 size_t size;
2062
2063 if (get_stack(&start, &size) == 0) {
2064 uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
2065 th->ec->machine.stack_start = (VALUE *)&curr;
2066 th->ec->machine.stack_maxsize = size - diff;
2067 }
2068 }
2069#else
2070 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2071#endif
2072 }
2073
2074 return 0;
2075}
2076
2077struct nt_param {
2078 rb_vm_t *vm;
2079 struct rb_native_thread *nt;
2080};
2081
2082static void *
2083nt_start(void *ptr);
2084
2085static int
2086native_thread_create0(struct rb_native_thread *nt)
2087{
2088 int err = 0;
2089 pthread_attr_t attr;
2090
2091 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2092 const size_t space = space_size(stack_size);
2093
2094 nt->machine_stack_maxsize = stack_size - space;
2095
2096#ifdef USE_SIGALTSTACK
2097 nt->altstack = rb_allocate_sigaltstack();
2098#endif
2099
2100 CHECK_ERR(pthread_attr_init(&attr));
2101
2102# ifdef PTHREAD_STACK_MIN
2103 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2104 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2105# endif
2106
2107# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2108 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2109# endif
2110 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2111
2112 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2113
2114 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2115
2116 CHECK_ERR(pthread_attr_destroy(&attr));
2117
2118 return err;
2119}
2120
2121static void
2122native_thread_setup(struct rb_native_thread *nt)
2123{
2124 // init cond
2125 rb_native_cond_initialize(&nt->cond.readyq);
2126
2127 if (&nt->cond.readyq != &nt->cond.intr) {
2128 rb_native_cond_initialize(&nt->cond.intr);
2129 }
2130}
2131
2132static void
2133native_thread_setup_on_thread(struct rb_native_thread *nt)
2134{
2135 // init tid
2136#ifdef RB_THREAD_T_HAS_NATIVE_ID
2137 nt->tid = get_native_thread_id();
2138#endif
2139
2140 // init signal handler
2141 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2142}
2143
2144static struct rb_native_thread *
2145native_thread_alloc(void)
2146{
2147 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2148 native_thread_setup(nt);
2149
2150#if USE_MN_THREADS
2151 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2152#endif
2153
2154#if USE_RUBY_DEBUG_LOG
2155 static rb_atomic_t nt_serial = 2;
2156 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2157#endif
2158 return nt;
2159}
2160
2161static int
2162native_thread_create_dedicated(rb_thread_t *th)
2163{
2164 th->nt = native_thread_alloc();
2165 th->nt->vm = th->vm;
2166 th->nt->running_thread = th;
2167 th->nt->dedicated = 1;
2168
2169 // vm stack
2170 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2171 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2172 th->sched.malloc_stack = true;
2173 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2174 th->sched.context_stack = vm_stack;
2175
2176 // setup
2177 thread_sched_to_ready(TH_SCHED(th), th);
2178
2179 return native_thread_create0(th->nt);
2180}
2181
2182static void
2183call_thread_start_func_2(rb_thread_t *th)
2184{
2185 native_thread_init_stack(th);
2186 thread_start_func_2(th, th->ec->machine.stack_start);
2187}
2188
2189static void *
2190nt_start(void *ptr)
2191{
2192 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2193 rb_vm_t *vm = nt->vm;
2194
2195 native_thread_setup_on_thread(nt);
2196
2197 // init tid
2198#ifdef RB_THREAD_T_HAS_NATIVE_ID
2199 nt->tid = get_native_thread_id();
2200#endif
2201
2202#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2203 ruby_nt_serial = nt->serial;
2204#endif
2205
2206 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2207
2208 if (!nt->dedicated) {
2209 coroutine_initialize_main(nt->nt_context);
2210 }
2211
2212 while (1) {
2213 if (nt->dedicated) {
2214 // wait running turn
2215 rb_thread_t *th = nt->running_thread;
2216 struct rb_thread_sched *sched = TH_SCHED(th);
2217
2218 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2219 ruby_thread_set_native(th);
2220
2221 thread_sched_lock(sched, th);
2222 {
2223 if (sched->running == th) {
2224 thread_sched_add_running_thread(sched, th);
2225 }
2226 thread_sched_wait_running_turn(sched, th, false);
2227 }
2228 thread_sched_unlock(sched, th);
2229
2230 // start threads
2231 call_thread_start_func_2(th);
2232 break; // TODO: allow to change to the SNT
2233 }
2234 else {
2235 RUBY_DEBUG_LOG("check next");
2236 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2237
2238 if (r) {
2239 struct rb_thread_sched *sched = &r->threads.sched;
2240
2241 thread_sched_lock(sched, NULL);
2242 {
2243 rb_thread_t *next_th = sched->running;
2244
2245 if (next_th && next_th->nt == NULL) {
2246 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2247 thread_sched_switch0(nt->nt_context, next_th, nt);
2248 }
2249 else {
2250 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2251 }
2252 }
2253 thread_sched_unlock(sched, NULL);
2254 }
2255 else {
2256 // timeout -> deleted.
2257 break;
2258 }
2259 }
2260 }
2261
2262 return NULL;
2263}
2264
2265static int native_thread_create_shared(rb_thread_t *th);
2266
2267#if USE_MN_THREADS
2268static void nt_free_stack(void *mstack);
2269#endif
2270
2271void
2272rb_threadptr_remove(rb_thread_t *th)
2273{
2274#if USE_MN_THREADS
2275 if (th->sched.malloc_stack) {
2276 // dedicated
2277 return;
2278 }
2279 else {
2280 rb_vm_t *vm = th->vm;
2281 th->sched.finished = false;
2282
2283 RB_VM_LOCK_ENTER();
2284 {
2285 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2286 }
2287 RB_VM_LOCK_LEAVE();
2288 }
2289#endif
2290}
2291
2292void
2293rb_threadptr_sched_free(rb_thread_t *th)
2294{
2295#if USE_MN_THREADS
2296 if (th->sched.malloc_stack) {
2297 // has dedicated
2298 ruby_xfree(th->sched.context_stack);
2299 native_thread_destroy(th->nt);
2300 }
2301 else {
2302 nt_free_stack(th->sched.context_stack);
2303 // TODO: how to free nt and nt->altstack?
2304 }
2305
2306 if (th->sched.context) {
2307 ruby_xfree(th->sched.context);
2308 VM_ASSERT((th->sched.context = NULL) == NULL);
2309 }
2310#else
2311 ruby_xfree(th->sched.context_stack);
2312 native_thread_destroy(th->nt);
2313#endif
2314
2315 th->nt = NULL;
2316}
2317
2318void
2319rb_thread_sched_mark_zombies(rb_vm_t *vm)
2320{
2321 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2322 rb_thread_t *zombie_th, *next_zombie_th;
2323 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2324 if (zombie_th->sched.finished) {
2325 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2326 }
2327 else {
2328 rb_gc_mark(zombie_th->self);
2329 }
2330 }
2331 }
2332}
2333
2334static int
2335native_thread_create(rb_thread_t *th)
2336{
2337 VM_ASSERT(th->nt == 0);
2338 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2339 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2340
2341 if (!th->ractor->threads.sched.enable_mn_threads) {
2342 th->has_dedicated_nt = 1;
2343 }
2344
2345 if (th->has_dedicated_nt) {
2346 return native_thread_create_dedicated(th);
2347 }
2348 else {
2349 return native_thread_create_shared(th);
2350 }
2351}
2352
2353#if USE_NATIVE_THREAD_PRIORITY
2354
2355static void
2356native_thread_apply_priority(rb_thread_t *th)
2357{
2358#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2359 struct sched_param sp;
2360 int policy;
2361 int priority = 0 - th->priority;
2362 int max, min;
2363 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2364 max = sched_get_priority_max(policy);
2365 min = sched_get_priority_min(policy);
2366
2367 if (min > priority) {
2368 priority = min;
2369 }
2370 else if (max < priority) {
2371 priority = max;
2372 }
2373
2374 sp.sched_priority = priority;
2375 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2376#else
2377 /* not touched */
2378#endif
2379}
2380
2381#endif /* USE_NATIVE_THREAD_PRIORITY */
2382
2383static int
2384native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2385{
2386 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2387}
2388
2389static void
2390ubf_pthread_cond_signal(void *ptr)
2391{
2392 rb_thread_t *th = (rb_thread_t *)ptr;
2393 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2394 rb_native_cond_signal(&th->nt->cond.intr);
2395}
2396
2397static void
2398native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2399{
2400 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2401 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2402
2403 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2404 * current_time + 100,000,000. So cut up to 100,000,000. This is
2405 * considered as a kind of spurious wakeup. The caller to native_sleep
2406 * should care about spurious wakeup.
2407 *
2408 * See also [Bug #1341] [ruby-core:29702]
2409 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2410 */
2411 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2412
2413 THREAD_BLOCKING_BEGIN(th);
2414 {
2416 th->unblock.func = ubf_pthread_cond_signal;
2417 th->unblock.arg = th;
2418
2419 if (RUBY_VM_INTERRUPTED(th->ec)) {
2420 /* interrupted. return immediate */
2421 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2422 }
2423 else {
2424 if (!rel) {
2425 rb_native_cond_wait(cond, lock);
2426 }
2427 else {
2428 rb_hrtime_t end;
2429
2430 if (*rel > max) {
2431 *rel = max;
2432 }
2433
2434 end = native_cond_timeout(cond, *rel);
2435 native_cond_timedwait(cond, lock, &end);
2436 }
2437 }
2438 th->unblock.func = 0;
2439
2441 }
2442 THREAD_BLOCKING_END(th);
2443
2444 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2445}
2446
2447#ifdef USE_UBF_LIST
2448static CCAN_LIST_HEAD(ubf_list_head);
2449static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2450
2451static void
2452ubf_list_atfork(void)
2453{
2454 ccan_list_head_init(&ubf_list_head);
2455 rb_native_mutex_initialize(&ubf_list_lock);
2456}
2457
2459static bool
2460ubf_list_contain_p(rb_thread_t *th)
2461{
2462 rb_thread_t *list_th;
2463 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2464 if (list_th == th) return true;
2465 }
2466 return false;
2467}
2468
2469/* The thread 'th' is registered to be trying unblock. */
2470static void
2471register_ubf_list(rb_thread_t *th)
2472{
2473 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2474 struct ccan_list_node *node = &th->sched.node.ubf;
2475
2476 VM_ASSERT(th->unblock.func != NULL);
2477
2478 rb_native_mutex_lock(&ubf_list_lock);
2479 {
2480 // check not connected yet
2481 if (ccan_list_empty((struct ccan_list_head*)node)) {
2482 VM_ASSERT(!ubf_list_contain_p(th));
2483 ccan_list_add(&ubf_list_head, node);
2484 }
2485 }
2486 rb_native_mutex_unlock(&ubf_list_lock);
2487
2488 timer_thread_wakeup();
2489}
2490
2491/* The thread 'th' is unblocked. It no longer need to be registered. */
2492static void
2493unregister_ubf_list(rb_thread_t *th)
2494{
2495 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2496 struct ccan_list_node *node = &th->sched.node.ubf;
2497
2498 /* we can't allow re-entry into ubf_list_head */
2499 VM_ASSERT(th->unblock.func == NULL);
2500
2501 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2502 rb_native_mutex_lock(&ubf_list_lock);
2503 {
2504 VM_ASSERT(ubf_list_contain_p(th));
2505 ccan_list_del_init(node);
2506 }
2507 rb_native_mutex_unlock(&ubf_list_lock);
2508 }
2509}
2510
2511/*
2512 * send a signal to intent that a target thread return from blocking syscall.
2513 * Maybe any signal is ok, but we chose SIGVTALRM.
2514 */
2515static void
2516ubf_wakeup_thread(rb_thread_t *th)
2517{
2518 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2519
2520 pthread_kill(th->nt->thread_id, SIGVTALRM);
2521}
2522
2523static void
2524ubf_select(void *ptr)
2525{
2526 rb_thread_t *th = (rb_thread_t *)ptr;
2527 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2528 ubf_wakeup_thread(th);
2529 register_ubf_list(th);
2530}
2531
2532static bool
2533ubf_threads_empty(void)
2534{
2535 return ccan_list_empty(&ubf_list_head) != 0;
2536}
2537
2538static void
2539ubf_wakeup_all_threads(void)
2540{
2541 if (!ubf_threads_empty()) {
2542 rb_thread_t *th;
2543 rb_native_mutex_lock(&ubf_list_lock);
2544 {
2545 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2546 ubf_wakeup_thread(th);
2547 }
2548 }
2549 rb_native_mutex_unlock(&ubf_list_lock);
2550 }
2551}
2552
2553#else /* USE_UBF_LIST */
2554#define register_ubf_list(th) (void)(th)
2555#define unregister_ubf_list(th) (void)(th)
2556#define ubf_select 0
2557static void ubf_wakeup_all_threads(void) { return; }
2558static bool ubf_threads_empty(void) { return true; }
2559#define ubf_list_atfork() do {} while (0)
2560#endif /* USE_UBF_LIST */
2561
2562#define TT_DEBUG 0
2563#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2564
2565void
2566rb_thread_wakeup_timer_thread(int sig)
2567{
2568 // This function can be called from signal handlers so that
2569 // pthread_mutex_lock() should not be used.
2570
2571 // wakeup timer thread
2572 timer_thread_wakeup_force();
2573
2574 // interrupt main thread if main thread is available
2575 if (system_working) {
2576 rb_vm_t *vm = GET_VM();
2577 rb_thread_t *main_th = vm->ractor.main_thread;
2578
2579 if (main_th) {
2580 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2581
2582 if (main_th_ec) {
2583 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2584
2585 if (vm->ubf_async_safe && main_th->unblock.func) {
2586 (main_th->unblock.func)(main_th->unblock.arg);
2587 }
2588 }
2589 }
2590 }
2591}
2592
2593#define CLOSE_INVALIDATE_PAIR(expr) \
2594 close_invalidate_pair(expr,"close_invalidate: "#expr)
2595static void
2596close_invalidate(int *fdp, const char *msg)
2597{
2598 int fd = *fdp;
2599
2600 *fdp = -1;
2601 if (close(fd) < 0) {
2602 async_bug_fd(msg, errno, fd);
2603 }
2604}
2605
2606static void
2607close_invalidate_pair(int fds[2], const char *msg)
2608{
2609 if (USE_EVENTFD && fds[0] == fds[1]) {
2610 fds[1] = -1; // disable write port first
2611 close_invalidate(&fds[0], msg);
2612 }
2613 else {
2614 close_invalidate(&fds[1], msg);
2615 close_invalidate(&fds[0], msg);
2616 }
2617}
2618
2619static void
2620set_nonblock(int fd)
2621{
2622 int oflags;
2623 int err;
2624
2625 oflags = fcntl(fd, F_GETFL);
2626 if (oflags == -1)
2627 rb_sys_fail(0);
2628 oflags |= O_NONBLOCK;
2629 err = fcntl(fd, F_SETFL, oflags);
2630 if (err == -1)
2631 rb_sys_fail(0);
2632}
2633
2634/* communication pipe with timer thread and signal handler */
2635static void
2636setup_communication_pipe_internal(int pipes[2])
2637{
2638 int err;
2639
2640 if (pipes[0] > 0 || pipes[1] > 0) {
2641 VM_ASSERT(pipes[0] > 0);
2642 VM_ASSERT(pipes[1] > 0);
2643 return;
2644 }
2645
2646 /*
2647 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2648 * missing EFD_* flags, they can fall back to pipe
2649 */
2650#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2651 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2652
2653 if (pipes[0] >= 0) {
2654 rb_update_max_fd(pipes[0]);
2655 return;
2656 }
2657#endif
2658
2659 err = rb_cloexec_pipe(pipes);
2660 if (err != 0) {
2661 rb_bug("can not create communication pipe");
2662 }
2663 rb_update_max_fd(pipes[0]);
2664 rb_update_max_fd(pipes[1]);
2665 set_nonblock(pipes[0]);
2666 set_nonblock(pipes[1]);
2667}
2668
2669#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2670# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2671#endif
2672
2673enum {
2674 THREAD_NAME_MAX =
2675#if defined(__linux__)
2676 16
2677#elif defined(__APPLE__)
2678/* Undocumented, and main thread seems unlimited */
2679 64
2680#else
2681 16
2682#endif
2683};
2684
2685static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2686
2687static void
2688native_set_thread_name(rb_thread_t *th)
2689{
2690#ifdef SET_CURRENT_THREAD_NAME
2691 VALUE loc;
2692 if (!NIL_P(loc = th->name)) {
2693 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2694 }
2695 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2696 char *name, *p;
2697 char buf[THREAD_NAME_MAX];
2698 size_t len;
2699 int n;
2700
2701 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2702 p = strrchr(name, '/'); /* show only the basename of the path. */
2703 if (p && p[1])
2704 name = p + 1;
2705
2706 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2707 RB_GC_GUARD(loc);
2708
2709 len = (size_t)n;
2710 if (len >= sizeof(buf)) {
2711 buf[sizeof(buf)-2] = '*';
2712 buf[sizeof(buf)-1] = '\0';
2713 }
2714 SET_CURRENT_THREAD_NAME(buf);
2715 }
2716#endif
2717}
2718
2719static void
2720native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2721{
2722#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2723 char buf[THREAD_NAME_MAX];
2724 const char *s = "";
2725# if !defined SET_ANOTHER_THREAD_NAME
2726 if (!pthread_equal(pthread_self(), thread_id)) return;
2727# endif
2728 if (!NIL_P(name)) {
2729 long n;
2730 RSTRING_GETMEM(name, s, n);
2731 if (n >= (int)sizeof(buf)) {
2732 memcpy(buf, s, sizeof(buf)-1);
2733 buf[sizeof(buf)-1] = '\0';
2734 s = buf;
2735 }
2736 }
2737# if defined SET_ANOTHER_THREAD_NAME
2738 SET_ANOTHER_THREAD_NAME(thread_id, s);
2739# elif defined SET_CURRENT_THREAD_NAME
2740 SET_CURRENT_THREAD_NAME(s);
2741# endif
2742#endif
2743}
2744
2745#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2746static VALUE
2747native_thread_native_thread_id(rb_thread_t *target_th)
2748{
2749 if (!target_th->nt) return Qnil;
2750
2751#ifdef RB_THREAD_T_HAS_NATIVE_ID
2752 int tid = target_th->nt->tid;
2753 if (tid == 0) return Qnil;
2754 return INT2FIX(tid);
2755#elif defined(__APPLE__)
2756 uint64_t tid;
2757/* The first condition is needed because MAC_OS_X_VERSION_10_6
2758 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2759 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2760 so it has C++17 and everything needed to build modern Ruby. */
2761# if (!defined(MAC_OS_X_VERSION_10_6) || \
2762 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2763 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2764 const bool no_pthread_threadid_np = true;
2765# define NO_PTHREAD_MACH_THREAD_NP 1
2766# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2767 const bool no_pthread_threadid_np = false;
2768# else
2769# if !(defined(__has_attribute) && __has_attribute(availability))
2770 /* __API_AVAILABLE macro does nothing on gcc */
2771 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2772# endif
2773 /* Check weakly linked symbol */
2774 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2775# endif
2776 if (no_pthread_threadid_np) {
2777 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2778 }
2779# ifndef NO_PTHREAD_MACH_THREAD_NP
2780 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2781 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2782 return ULL2NUM((unsigned long long)tid);
2783# endif
2784#endif
2785}
2786# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2787#else
2788# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2789#endif
2790
2791static struct {
2792 rb_serial_t created_fork_gen;
2793 pthread_t pthread_id;
2794
2795 int comm_fds[2]; // r, w
2796
2797#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2798 int event_fd; // kernel event queue fd (epoll/kqueue)
2799#endif
2800#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2801#define EPOLL_EVENTS_MAX 0x10
2802 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2803#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2804#define KQUEUE_EVENTS_MAX 0x10
2805 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2806#endif
2807
2808 // waiting threads list
2809 struct ccan_list_head waiting; // waiting threads in ractors
2810 pthread_mutex_t waiting_lock;
2811} timer_th = {
2812 .created_fork_gen = 0,
2813};
2814
2815#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2816
2817static void timer_thread_check_timeslice(rb_vm_t *vm);
2818static int timer_thread_set_timeout(rb_vm_t *vm);
2819static void timer_thread_wakeup_thread(rb_thread_t *th);
2820
2821#include "thread_pthread_mn.c"
2822
2823static int
2824timer_thread_set_timeout(rb_vm_t *vm)
2825{
2826#if 0
2827 return 10; // ms
2828#else
2829 int timeout = -1;
2830
2831 ractor_sched_lock(vm, NULL);
2832 {
2833 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2834 || !ubf_threads_empty() // (1-3) Periodic UBF
2835 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2836 ) {
2837
2838 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2839 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2840 !ubf_threads_empty(),
2841 (vm->ractor.sched.grq_cnt > 0));
2842
2843 timeout = 10; // ms
2844 vm->ractor.sched.timeslice_wait_inf = false;
2845 }
2846 else {
2847 vm->ractor.sched.timeslice_wait_inf = true;
2848 }
2849 }
2850 ractor_sched_unlock(vm, NULL);
2851
2852 if (vm->ractor.sched.timeslice_wait_inf) {
2853 rb_native_mutex_lock(&timer_th.waiting_lock);
2854 {
2855 rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
2856 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2857 rb_hrtime_t now = rb_hrtime_now();
2858 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2859
2860 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2861
2862 // TODO: overflow?
2863 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2864 }
2865 }
2866 rb_native_mutex_unlock(&timer_th.waiting_lock);
2867 }
2868
2869 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2870
2871 // fprintf(stderr, "timeout:%d\n", timeout);
2872 return timeout;
2873#endif
2874}
2875
2876static void
2877timer_thread_check_signal(rb_vm_t *vm)
2878{
2879 // ruby_sigchld_handler(vm); TODO
2880
2881 int signum = rb_signal_buff_size();
2882 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2883 RUBY_DEBUG_LOG("signum:%d", signum);
2884 threadptr_trap_interrupt(vm->ractor.main_thread);
2885 }
2886}
2887
2888static bool
2889timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2890{
2891 if (abs < now) {
2892 return true;
2893 }
2894 else if (abs - now < RB_HRTIME_PER_MSEC) {
2895 return true; // too short time
2896 }
2897 else {
2898 return false;
2899 }
2900}
2901
2902static rb_thread_t *
2903timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2904{
2905 rb_thread_t *th = ccan_list_top(&timer_th.waiting, rb_thread_t, sched.waiting_reason.node);
2906
2907 if (th != NULL &&
2908 (th->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
2909 timer_thread_check_exceed(th->sched.waiting_reason.data.timeout, now)) {
2910
2911 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2912
2913 // delete from waiting list
2914 ccan_list_del_init(&th->sched.waiting_reason.node);
2915
2916 // setup result
2917 th->sched.waiting_reason.flags = thread_sched_waiting_none;
2918 th->sched.waiting_reason.data.result = 0;
2919
2920 return th;
2921 }
2922
2923 return NULL;
2924}
2925
2926static void
2927timer_thread_wakeup_thread(rb_thread_t *th)
2928{
2929 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2930 struct rb_thread_sched *sched = TH_SCHED(th);
2931
2932 thread_sched_lock(sched, th);
2933 {
2934 if (sched->running != th) {
2935 thread_sched_to_ready_common(sched, th, true, false);
2936 }
2937 else {
2938 // will be release the execution right
2939 }
2940 }
2941 thread_sched_unlock(sched, th);
2942}
2943
2944static void
2945timer_thread_check_timeout(rb_vm_t *vm)
2946{
2947 rb_hrtime_t now = rb_hrtime_now();
2948 rb_thread_t *th;
2949
2950 rb_native_mutex_lock(&timer_th.waiting_lock);
2951 {
2952 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2953 timer_thread_wakeup_thread(th);
2954 }
2955 }
2956 rb_native_mutex_unlock(&timer_th.waiting_lock);
2957}
2958
2959static void
2960timer_thread_check_timeslice(rb_vm_t *vm)
2961{
2962 // TODO: check time
2963 rb_thread_t *th;
2964 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2965 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
2966 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2967 }
2968}
2969
2970void
2971rb_assert_sig(void)
2972{
2973 sigset_t oldmask;
2974 pthread_sigmask(0, NULL, &oldmask);
2975 if (sigismember(&oldmask, SIGVTALRM)) {
2976 rb_bug("!!!");
2977 }
2978 else {
2979 RUBY_DEBUG_LOG("ok");
2980 }
2981}
2982
2983static void *
2984timer_thread_func(void *ptr)
2985{
2986 rb_vm_t *vm = (rb_vm_t *)ptr;
2987#if defined(RUBY_NT_SERIAL)
2988 ruby_nt_serial = (rb_atomic_t)-1;
2989#endif
2990
2991 RUBY_DEBUG_LOG("started%s", "");
2992
2993 while (system_working) {
2994 timer_thread_check_signal(vm);
2995 timer_thread_check_timeout(vm);
2996 ubf_wakeup_all_threads();
2997
2998 RUBY_DEBUG_LOG("system_working:%d", system_working);
2999 timer_thread_polling(vm);
3000 }
3001
3002 RUBY_DEBUG_LOG("terminated");
3003 return NULL;
3004}
3005
3006/* only use signal-safe system calls here */
3007static void
3008signal_communication_pipe(int fd)
3009{
3010#if USE_EVENTFD
3011 const uint64_t buff = 1;
3012#else
3013 const char buff = '!';
3014#endif
3015 ssize_t result;
3016
3017 /* already opened */
3018 if (fd >= 0) {
3019 retry:
3020 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3021 int e = errno;
3022 switch (e) {
3023 case EINTR: goto retry;
3024 case EAGAIN:
3025#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3026 case EWOULDBLOCK:
3027#endif
3028 break;
3029 default:
3030 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3031 }
3032 }
3033 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3034 }
3035 else {
3036 // ignore wakeup
3037 }
3038}
3039
3040static void
3041timer_thread_wakeup_force(void)
3042{
3043 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3044 signal_communication_pipe(timer_th.comm_fds[1]);
3045}
3046
3047static void
3048timer_thread_wakeup_locked(rb_vm_t *vm)
3049{
3050 // should be locked before.
3051 ASSERT_ractor_sched_locked(vm, NULL);
3052
3053 if (timer_th.created_fork_gen == current_fork_gen) {
3054 if (vm->ractor.sched.timeslice_wait_inf) {
3055 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3056 timer_thread_wakeup_force();
3057 }
3058 else {
3059 RUBY_DEBUG_LOG("will be wakeup...");
3060 }
3061 }
3062}
3063
3064static void
3065timer_thread_wakeup(void)
3066{
3067 rb_vm_t *vm = GET_VM();
3068
3069 ractor_sched_lock(vm, NULL);
3070 {
3071 timer_thread_wakeup_locked(vm);
3072 }
3073 ractor_sched_unlock(vm, NULL);
3074}
3075
3076static void
3077rb_thread_create_timer_thread(void)
3078{
3079 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3080
3081 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3082
3083 timer_th.created_fork_gen = current_fork_gen;
3084
3085 if (created_fork_gen != current_fork_gen) {
3086 if (created_fork_gen != 0) {
3087 RUBY_DEBUG_LOG("forked child process");
3088
3089 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3090#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3091 close_invalidate(&timer_th.event_fd, "close event_fd");
3092#endif
3093 rb_native_mutex_destroy(&timer_th.waiting_lock);
3094 }
3095
3096 ccan_list_head_init(&timer_th.waiting);
3097 rb_native_mutex_initialize(&timer_th.waiting_lock);
3098
3099 // open communication channel
3100 setup_communication_pipe_internal(timer_th.comm_fds);
3101
3102 // open event fd
3103 timer_thread_setup_mn();
3104 }
3105
3106 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3107}
3108
3109static int
3110native_stop_timer_thread(void)
3111{
3112 int stopped;
3113 stopped = --system_working <= 0;
3114
3115 if (stopped) {
3116 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3117 timer_thread_wakeup_force();
3118 RUBY_DEBUG_LOG("wakeup sent");
3119 pthread_join(timer_th.pthread_id, NULL);
3120 }
3121
3122 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3123 return stopped;
3124}
3125
3126static void
3127native_reset_timer_thread(void)
3128{
3129 //
3130}
3131
3132#ifdef HAVE_SIGALTSTACK
3133int
3134ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3135{
3136 void *base;
3137 size_t size;
3138 const size_t water_mark = 1024 * 1024;
3139 STACK_GROW_DIR_DETECTION;
3140
3141#ifdef STACKADDR_AVAILABLE
3142 if (get_stack(&base, &size) == 0) {
3143# ifdef __APPLE__
3144 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3145 struct rlimit rlim;
3146 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3147 size = (size_t)rlim.rlim_cur;
3148 }
3149 }
3150# endif
3151 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3152 }
3153 else
3154#endif
3155 if (th) {
3156 size = th->ec->machine.stack_maxsize;
3157 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3158 }
3159 else {
3160 return 0;
3161 }
3162 size /= RUBY_STACK_SPACE_RATIO;
3163 if (size > water_mark) size = water_mark;
3164 if (IS_STACK_DIR_UPPER()) {
3165 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3166 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3167 }
3168 else {
3169 if (size > (size_t)base) size = (size_t)base;
3170 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3171 }
3172 return 0;
3173}
3174#endif
3175
3176int
3177rb_reserved_fd_p(int fd)
3178{
3179 /* no false-positive if out-of-FD at startup */
3180 if (fd < 0) return 0;
3181
3182 if (fd == timer_th.comm_fds[0] ||
3183 fd == timer_th.comm_fds[1]
3184#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3185 || fd == timer_th.event_fd
3186#endif
3187 ) {
3188 goto check_fork_gen;
3189 }
3190 return 0;
3191
3192 check_fork_gen:
3193 if (timer_th.created_fork_gen == current_fork_gen) {
3194 /* async-signal-safe */
3195 return 1;
3196 }
3197 else {
3198 return 0;
3199 }
3200}
3201
3202rb_nativethread_id_t
3204{
3205 return pthread_self();
3206}
3207
3208#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3209/* TODO: don't ignore sigmask */
3210static int
3211ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3212 const struct timespec *ts, const sigset_t *sigmask)
3213{
3214 int timeout_ms;
3215
3216 if (ts) {
3217 int tmp, tmp2;
3218
3219 if (ts->tv_sec > INT_MAX/1000)
3220 timeout_ms = INT_MAX;
3221 else {
3222 tmp = (int)(ts->tv_sec * 1000);
3223 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3224 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3225 if (INT_MAX - tmp < tmp2)
3226 timeout_ms = INT_MAX;
3227 else
3228 timeout_ms = (int)(tmp + tmp2);
3229 }
3230 }
3231 else
3232 timeout_ms = -1;
3233
3234 return poll(fds, nfds, timeout_ms);
3235}
3236# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3237#endif
3238
3239/*
3240 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3241 * since threads may be too starved to enter the GVL waitqueue for
3242 * us to detect contention. Instead, we want to kick other threads
3243 * so they can run and possibly prevent us from entering slow paths
3244 * in ppoll() or similar syscalls.
3245 *
3246 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3247 * [ruby-core:90417] [Bug #15398]
3248 */
3249#define THREAD_BLOCKING_YIELD(th) do { \
3250 const rb_thread_t *next_th; \
3251 struct rb_thread_sched *sched = TH_SCHED(th); \
3252 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3253 thread_sched_to_waiting(sched, (th)); \
3254 next_th = sched->running; \
3255 rb_native_mutex_unlock(&sched->lock_); \
3256 native_thread_yield(); /* TODO: needed? */ \
3257 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3258 native_thread_yield(); \
3259 }
3260
3261static void
3262native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3263{
3264 struct rb_thread_sched *sched = TH_SCHED(th);
3265
3266 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3267 if (rel) {
3268 if (th_has_dedicated_nt(th)) {
3269 native_cond_sleep(th, rel);
3270 }
3271 else {
3272 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3273 }
3274 }
3275 else {
3276 thread_sched_to_waiting_until_wakeup(sched, th);
3277 }
3278
3279 RUBY_DEBUG_LOG("wakeup");
3280}
3281
3282// thread internal event hooks (only for pthread)
3283
3284struct rb_internal_thread_event_hook {
3285 rb_internal_thread_event_callback callback;
3286 rb_event_flag_t event;
3287 void *user_data;
3288
3289 struct rb_internal_thread_event_hook *next;
3290};
3291
3292static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3293
3294rb_internal_thread_event_hook_t *
3295rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3296{
3297 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3298 hook->callback = callback;
3299 hook->user_data = user_data;
3300 hook->event = internal_event;
3301
3302 int r;
3303 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3304 rb_bug_errno("pthread_rwlock_wrlock", r);
3305 }
3306
3307 hook->next = rb_internal_thread_event_hooks;
3308 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3309
3310 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3311 rb_bug_errno("pthread_rwlock_unlock", r);
3312 }
3313 return hook;
3314}
3315
3316bool
3317rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3318{
3319 int r;
3320 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3321 rb_bug_errno("pthread_rwlock_wrlock", r);
3322 }
3323
3324 bool success = FALSE;
3325
3326 if (rb_internal_thread_event_hooks == hook) {
3327 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3328 success = TRUE;
3329 }
3330 else {
3331 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3332
3333 do {
3334 if (h->next == hook) {
3335 h->next = hook->next;
3336 success = TRUE;
3337 break;
3338 }
3339 } while ((h = h->next));
3340 }
3341
3342 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3343 rb_bug_errno("pthread_rwlock_unlock", r);
3344 }
3345
3346 if (success) {
3347 ruby_xfree(hook);
3348 }
3349 return success;
3350}
3351
3352static void
3353rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3354{
3355 int r;
3356 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3357 rb_bug_errno("pthread_rwlock_rdlock", r);
3358 }
3359
3360 if (rb_internal_thread_event_hooks) {
3361 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3362 do {
3363 if (h->event & event) {
3364 rb_internal_thread_event_data_t event_data = {
3365 .thread = th->self,
3366 };
3367 (*h->callback)(event, &event_data, h->user_data);
3368 }
3369 } while((h = h->next));
3370 }
3371 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3372 rb_bug_errno("pthread_rwlock_unlock", r);
3373 }
3374}
3375
3376#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:91
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:396
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
VALUE rb_eNotImpError
NotImplementedError exception.
Definition error.c:1354
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3567
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
Definition error.c:1075
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
Definition io.c:405
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
Definition io.c:226
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1943
int len
Length of the buffer.
Definition io.h:8
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
Definition thread.h:212
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
Definition thread.h:226
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
Definition thread.h:219
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
Definition thread.h:198
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
Definition thread.h:205
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
Definition rstring.h:488
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
Definition string.c:7853
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40