5static void timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags);
10 bool canceled =
false;
12 if (th->sched.waiting_reason.flags) {
15 if (th->sched.waiting_reason.flags) {
17 ccan_list_del_init(&th->sched.waiting_reason.node);
18 if (th->sched.waiting_reason.flags & (thread_sched_waiting_io_read | thread_sched_waiting_io_write)) {
19 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
21 th->sched.waiting_reason.flags = thread_sched_waiting_none;
31ubf_event_waiting(
void *ptr)
36 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
38 VM_ASSERT(th->nt == NULL || !th_has_dedicated_nt(th));
41 th->unblock.func = NULL;
42 th->unblock.arg = NULL;
44 bool canceled = timer_thread_cancel_waiting(th);
46 thread_sched_lock(sched, th);
48 if (sched->running == th) {
49 RUBY_DEBUG_LOG(
"not waiting yet");
52 thread_sched_to_ready_common(sched, th,
true,
false);
55 RUBY_DEBUG_LOG(
"already not waiting");
58 thread_sched_unlock(sched, th);
61static bool timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel);
65thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
67 VM_ASSERT(!th_has_dedicated_nt(th));
69 volatile bool timedout =
false, need_cancel =
false;
71 if (timer_thread_register_waiting(th, fd, events, rel)) {
72 RUBY_DEBUG_LOG(
"wait fd:%d", fd);
74 RB_VM_SAVE_MACHINE_CONTEXT(th);
75 setup_ubf(th, ubf_event_waiting, (
void *)th);
79 thread_sched_lock(sched, th);
81 if (th->sched.waiting_reason.flags == thread_sched_waiting_none) {
84 else if (RUBY_VM_INTERRUPTED(th->ec)) {
88 RUBY_DEBUG_LOG(
"sleep");
90 th->status = THREAD_STOPPED_FOREVER;
91 thread_sched_wakeup_next_thread(sched, th,
true);
92 thread_sched_wait_running_turn(sched, th,
true);
94 RUBY_DEBUG_LOG(
"wakeup");
97 timedout = th->sched.waiting_reason.data.result == 0;
99 thread_sched_unlock(sched, th);
102 timer_thread_cancel_waiting(th);
105 setup_ubf(th, NULL, NULL);
107 th->status = THREAD_RUNNABLE;
110 RUBY_DEBUG_LOG(
"can not wait fd:%d", fd);
114 VM_ASSERT(sched->running == th);
122get_sysconf_page_size(
void)
124 static long page_size = 0;
126 if (UNLIKELY(page_size == 0)) {
127 page_size = sysconf(_SC_PAGESIZE);
128 VM_ASSERT(page_size < INT_MAX);
130 return (
int)page_size;
133#define MSTACK_CHUNK_SIZE (512 * 1024 * 1024)
134#define MSTACK_PAGE_SIZE get_sysconf_page_size()
135#define MSTACK_CHUNK_PAGE_NUM (MSTACK_CHUNK_SIZE / MSTACK_PAGE_SIZE - 1)
148static struct nt_stack_chunk_header {
149 struct nt_stack_chunk_header *prev_chunk;
150 struct nt_stack_chunk_header *prev_free_chunk;
153 uint16_t stack_count;
154 uint16_t uninitialized_stack_count;
156 uint16_t free_stack_pos;
157 uint16_t free_stack[];
158} *nt_stack_chunks = NULL,
159 *nt_free_stack_chunks = NULL;
161struct nt_machine_stack_footer {
162 struct nt_stack_chunk_header *ch;
166static rb_nativethread_lock_t nt_machine_stack_lock = RB_NATIVETHREAD_LOCK_INIT;
172nt_thread_stack_size(
void)
175 if (LIKELY(msz > 0))
return msz;
178 int sz = (int)(vm->default_params.thread_vm_stack_size + vm->default_params.thread_machine_stack_size + MSTACK_PAGE_SIZE);
179 int page_num = roomof(sz, MSTACK_PAGE_SIZE);
180 msz = (size_t)page_num * MSTACK_PAGE_SIZE;
184static struct nt_stack_chunk_header *
185nt_alloc_thread_stack_chunk(
void)
187 int mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE;
188#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
189 mmap_flags |= MAP_STACK;
192 const char *m = (
void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
193 if (m == MAP_FAILED) {
197 size_t msz = nt_thread_stack_size();
198 int header_page_cnt = 1;
199 int stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
200 int ch_size =
sizeof(
struct nt_stack_chunk_header) + sizeof(uint16_t) * stack_count;
202 if (ch_size > MSTACK_PAGE_SIZE * header_page_cnt) {
203 header_page_cnt = (ch_size + MSTACK_PAGE_SIZE - 1) / MSTACK_PAGE_SIZE;
204 stack_count = ((MSTACK_CHUNK_PAGE_NUM - header_page_cnt) * MSTACK_PAGE_SIZE) / msz;
207 VM_ASSERT(stack_count <= UINT16_MAX);
209 struct nt_stack_chunk_header *ch = (
struct nt_stack_chunk_header *)m;
211 ch->start_page = header_page_cnt;
212 ch->prev_chunk = nt_stack_chunks;
213 ch->prev_free_chunk = nt_free_stack_chunks;
214 ch->uninitialized_stack_count = ch->stack_count = (uint16_t)stack_count;
215 ch->free_stack_pos = 0;
217 RUBY_DEBUG_LOG(
"ch:%p start_page:%d stack_cnt:%d stack_size:%d", ch, (
int)ch->start_page, (
int)ch->stack_count, (
int)msz);
223nt_stack_chunk_get_stack_start(
struct nt_stack_chunk_header *ch,
size_t idx)
225 const char *m = (
char *)ch;
226 return (
void *)(m + ch->start_page * MSTACK_PAGE_SIZE + idx * nt_thread_stack_size());
229static struct nt_machine_stack_footer *
230nt_stack_chunk_get_msf(
const rb_vm_t *vm,
const char *mstack)
233 const size_t msz = vm->default_params.thread_machine_stack_size;
234 return (
struct nt_machine_stack_footer *)&mstack[msz -
sizeof(
struct nt_machine_stack_footer)];
238nt_stack_chunk_get_stack(
const rb_vm_t *vm,
struct nt_stack_chunk_header *ch,
size_t idx,
void **vm_stack,
void **machine_stack)
243 const char *vstack, *mstack;
244 const char *guard_page;
245 vstack = nt_stack_chunk_get_stack_start(ch, idx);
246 guard_page = vstack + vm->default_params.thread_vm_stack_size;
247 mstack = guard_page + MSTACK_PAGE_SIZE;
249 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(vm, mstack);
254 RUBY_DEBUG_LOG(
"msf:%p vstack:%p-%p guard_page:%p-%p mstack:%p-%p", msf,
255 vstack, (
void *)(guard_page-1),
256 guard_page, (
void *)(mstack-1),
257 mstack, (
void *)(msf));
260 *vm_stack = (
void *)vstack;
261 *machine_stack = (
void *)mstack;
263 return (
void *)guard_page;
268nt_stack_chunk_dump(
void)
270 struct nt_stack_chunk_header *ch;
273 fprintf(stderr,
"** nt_stack_chunks\n");
274 ch = nt_stack_chunks;
275 for (i=0; ch; i++, ch = ch->prev_chunk) {
276 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
279 fprintf(stderr,
"** nt_free_stack_chunks\n");
280 ch = nt_free_stack_chunks;
281 for (i=0; ch; i++, ch = ch->prev_free_chunk) {
282 fprintf(stderr,
"%d %p free_pos:%d\n", i, (
void *)ch, (
int)ch->free_stack_pos);
287nt_guard_page(
const char *p,
size_t len)
289 if (mprotect((
void *)p,
len, PROT_NONE) != -1) {
298nt_alloc_stack(
rb_vm_t *vm,
void **vm_stack,
void **machine_stack)
305 if (nt_free_stack_chunks) {
306 struct nt_stack_chunk_header *ch = nt_free_stack_chunks;
307 if (ch->free_stack_pos > 0) {
308 RUBY_DEBUG_LOG(
"free_stack_pos:%d", ch->free_stack_pos);
309 nt_stack_chunk_get_stack(vm, ch, ch->free_stack[--ch->free_stack_pos], vm_stack, machine_stack);
311 else if (ch->uninitialized_stack_count > 0) {
312 RUBY_DEBUG_LOG(
"uninitialized_stack_count:%d", ch->uninitialized_stack_count);
314 size_t idx = ch->stack_count - ch->uninitialized_stack_count--;
315 void *guard_page = nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack);
316 err = nt_guard_page(guard_page, MSTACK_PAGE_SIZE);
319 nt_free_stack_chunks = ch->prev_free_chunk;
320 ch->prev_free_chunk = NULL;
325 struct nt_stack_chunk_header *p = nt_alloc_thread_stack_chunk();
330 nt_free_stack_chunks = nt_stack_chunks = p;
341nt_free_stack(
void *mstack)
347 struct nt_machine_stack_footer *msf = nt_stack_chunk_get_msf(GET_VM(), mstack);
348 struct nt_stack_chunk_header *ch = msf->ch;
349 int idx = (int)msf->index;
350 void *stack = nt_stack_chunk_get_stack_start(ch, idx);
352 RUBY_DEBUG_LOG(
"stack:%p mstack:%p ch:%p index:%d", stack, mstack, ch, idx);
354 if (ch->prev_free_chunk == NULL) {
355 ch->prev_free_chunk = nt_free_stack_chunks;
356 nt_free_stack_chunks = ch;
358 ch->free_stack[ch->free_stack_pos++] = idx;
361#if defined(MADV_FREE)
362 int r = madvise(stack, nt_thread_stack_size(), MADV_FREE);
363#elif defined(MADV_DONTNEED)
364 int r = madvise(stack, nt_thread_stack_size(), MADV_DONTNEED);
369 if (r != 0) rb_bug(
"madvise errno:%d",
errno);
375native_thread_check_and_create_shared(
rb_vm_t *vm)
377 bool need_to_make =
false;
381 unsigned int snt_cnt = vm->ractor.sched.snt_cnt;
382 if (!vm->ractor.main_ractor->threads.sched.enable_mn_threads) snt_cnt++;
384 if (((
int)snt_cnt < MINIMUM_SNT) ||
385 (snt_cnt < vm->ractor.cnt &&
386 snt_cnt < vm->ractor.sched.max_cpu)) {
388 RUBY_DEBUG_LOG(
"added snt:%u dnt:%u ractor_cnt:%u grq_cnt:%u",
389 vm->ractor.sched.snt_cnt,
390 vm->ractor.sched.dnt_cnt,
392 vm->ractor.sched.grq_cnt);
394 vm->ractor.sched.snt_cnt++;
398 RUBY_DEBUG_LOG(
"snt:%d ractor_cnt:%d", (
int)vm->ractor.sched.snt_cnt, (
int)vm->ractor.cnt);
406 return native_thread_create0(nt);
418 VM_ASSERT(th->nt != NULL);
419 VM_ASSERT(th == sched->running);
420 VM_ASSERT(sched->lock_owner == NULL);
424 thread_sched_set_lock_owner(sched, th);
425 thread_sched_add_running_thread(TH_SCHED(th), th);
426 thread_sched_unlock(sched, th);
429 call_thread_start_func_2(th);
431 thread_sched_lock(sched, NULL);
433 RUBY_DEBUG_LOG(
"terminated th:%d", (
int)th->serial);
437 VM_ASSERT(!th_has_dedicated_nt(th));
440 bool has_ready_ractor = vm->ractor.sched.grq_cnt > 0;
444 native_thread_assign(NULL, th);
445 rb_ractor_set_current_ec(th->ractor, NULL);
447 if (!has_ready_ractor && next_th && !next_th->nt) {
449 thread_sched_set_lock_owner(sched, NULL);
450 thread_sched_switch0(th->sched.context, next_th, nt);
451 th->sched.finished =
true;
455 th->sched.finished =
true;
456 coroutine_transfer(self, nt->nt_context);
458 rb_bug(
"unreachable");
466 void *vm_stack = NULL, *machine_stack = NULL;
467 int err = nt_alloc_stack(vm, &vm_stack, &machine_stack);
470 VM_ASSERT(vm_stack < machine_stack);
473 size_t vm_stack_words = th->vm->default_params.thread_vm_stack_size/
sizeof(
VALUE);
474 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_words);
477 size_t machine_stack_size = vm->default_params.thread_machine_stack_size -
sizeof(
struct nt_machine_stack_footer);
478 th->ec->machine.stack_start = (
void *)((uintptr_t)machine_stack + machine_stack_size);
479 th->ec->machine.stack_maxsize = machine_stack_size;
480 th->sched.context_stack = machine_stack;
483 coroutine_initialize(th->sched.context, co_start, machine_stack, machine_stack_size);
484 th->sched.context->argument = th;
486 RUBY_DEBUG_LOG(
"th:%u vm_stack:%p machine_stack:%p", rb_th_serial(th), vm_stack, machine_stack);
487 thread_sched_to_ready(TH_SCHED(th), th);
490 return native_thread_check_and_create_shared(th->vm);
498 rb_bug(
"unreachable");
502thread_sched_wait_events(
struct rb_thread_sched *sched,
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag events, rb_hrtime_t *rel)
504 rb_bug(
"unreachable");
510#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
513fd_readable_nonblock(
int fd)
515 struct pollfd pfd = {
519 return poll(&pfd, 1, 0) != 0;
523fd_writable_nonblock(
int fd)
525 struct pollfd pfd = {
529 return poll(&pfd, 1, 0) != 0;
533verify_waiting_list(
void)
537 ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
540 rb_hrtime_t timeout = wth->sched.waiting_reason.data.timeout;
541 rb_hrtime_t prev_timeout = prev_wth->sched.waiting_reason.data.timeout;
542 VM_ASSERT(timeout == 0 || prev_timeout <= timeout);
551static enum thread_sched_waiting_flag
552kqueue_translate_filter_to_flags(int16_t filter)
556 return thread_sched_waiting_io_read;
558 return thread_sched_waiting_io_write;
560 return thread_sched_waiting_timeout;
562 rb_bug(
"kevent filter:%d not supported", filter);
571 int timeout_ms = timer_thread_set_timeout(vm);
573 if (timeout_ms >= 0) {
574 calculated_timeout.tv_sec = timeout_ms / 1000;
575 calculated_timeout.tv_nsec = (timeout_ms % 1000) * 1000000;
576 timeout = &calculated_timeout;
579 return kevent(timer_th.event_fd, NULL, 0, timer_th.finished_events, KQUEUE_EVENTS_MAX, timeout);
585 if ((timer_th.event_fd = kqueue()) == -1) rb_bug(
"kqueue creation failed (errno:%d)",
errno);
586 int flags = fcntl(timer_th.event_fd, F_GETFD);
588 rb_bug(
"kqueue GETFD failed (errno:%d)",
errno);
592 if (fcntl(timer_th.event_fd, F_SETFD, flags) == -1) {
593 rb_bug(
"kqueue SETFD failed (errno:%d)",
errno);
598kqueue_unregister_waiting(
int fd,
enum thread_sched_waiting_flag flags)
604 if (flags & thread_sched_waiting_io_read) {
605 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
608 if (flags & thread_sched_waiting_io_write) {
609 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
612 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
614 rb_bug(
"unregister/kevent fails. errno:%d",
errno);
620kqueue_already_registered(
int fd)
623 ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
626 if (wth->sched.waiting_reason.flags && wth->sched.waiting_reason.data.fd == fd) {
631 return found_wth != NULL;
638timer_thread_register_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags, rb_hrtime_t *rel)
640 RUBY_DEBUG_LOG(
"th:%u fd:%d flag:%d rel:%lu", rb_th_serial(th), fd, flags, rel ? (
unsigned long)*rel : 0);
642 VM_ASSERT(th == NULL || TH_SCHED(th)->running == th);
643 VM_ASSERT(flags != 0);
649 flags |= thread_sched_waiting_timeout;
656 if (rel && *rel > 0) {
657 flags |= thread_sched_waiting_timeout;
664 uint32_t epoll_events = 0;
666 if (flags & thread_sched_waiting_timeout) {
667 VM_ASSERT(rel != NULL);
668 abs = rb_hrtime_add(rb_hrtime_now(), *rel);
671 if (flags & thread_sched_waiting_io_read) {
672 if (!(flags & thread_sched_waiting_io_force) && fd_readable_nonblock(fd)) {
673 RUBY_DEBUG_LOG(
"fd_readable_nonblock");
679 EV_SET(&ke[num_events], fd, EVFILT_READ, EV_ADD, 0, 0, (
void *)th);
682 epoll_events |= EPOLLIN;
687 if (flags & thread_sched_waiting_io_write) {
688 if (!(flags & thread_sched_waiting_io_force) && fd_writable_nonblock(fd)) {
689 RUBY_DEBUG_LOG(
"fd_writable_nonblock");
695 EV_SET(&ke[num_events], fd, EVFILT_WRITE, EV_ADD, 0, 0, (
void *)th);
698 epoll_events |= EPOLLOUT;
706 if (num_events > 0) {
707 if (kqueue_already_registered(fd)) {
712 if (kevent(timer_th.event_fd, ke, num_events, NULL, 0, NULL) == -1) {
713 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
722 rb_bug(
"register/kevent failed(fd:%d, errno:%d)", fd,
errno);
725 RUBY_DEBUG_LOG(
"kevent(add, fd:%d) success", fd);
729 struct epoll_event event = {
730 .events = epoll_events,
735 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_ADD, fd, &event) == -1) {
736 RUBY_DEBUG_LOG(
"failed (%d)",
errno);
749 rb_bug(
"register/epoll_ctl failed(fd:%d, errno:%d)", fd,
errno);
752 RUBY_DEBUG_LOG(
"epoll_ctl(add, fd:%d, events:%d) success", fd, epoll_events);
757 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
761 th->sched.waiting_reason.flags = flags;
762 th->sched.waiting_reason.data.timeout = abs;
763 th->sched.waiting_reason.data.fd = fd;
764 th->sched.waiting_reason.data.result = 0;
768 VM_ASSERT(!(flags & thread_sched_waiting_timeout));
769 ccan_list_add_tail(&timer_th.waiting, &th->sched.waiting_reason.node);
772 RUBY_DEBUG_LOG(
"abs:%lu", abs);
773 VM_ASSERT(flags & thread_sched_waiting_timeout);
778 ccan_list_for_each(&timer_th.waiting, wth, sched.waiting_reason.node) {
779 if ((wth->sched.waiting_reason.flags & thread_sched_waiting_timeout) &&
780 wth->sched.waiting_reason.data.timeout < abs) {
789 ccan_list_add_after(&timer_th.waiting, &prev_wth->sched.waiting_reason.node, &th->sched.waiting_reason.node);
792 ccan_list_add(&timer_th.waiting, &th->sched.waiting_reason.node);
795 verify_waiting_list();
798 timer_thread_wakeup();
811timer_thread_unregister_waiting(
rb_thread_t *th,
int fd,
enum thread_sched_waiting_flag flags)
813 RUBY_DEBUG_LOG(
"th:%u fd:%d", rb_th_serial(th), fd);
815 kqueue_unregister_waiting(fd, flags);
818 if (epoll_ctl(timer_th.event_fd, EPOLL_CTL_DEL, fd, NULL) == -1) {
825 rb_bug(
"unregister/epoll_ctl fails. errno:%d",
errno);
832timer_thread_setup_mn(
void)
836 RUBY_DEBUG_LOG(
"kqueue_fd:%d", timer_th.event_fd);
838 if ((timer_th.event_fd = epoll_create1(EPOLL_CLOEXEC)) == -1) rb_bug(
"epoll_create (errno:%d)",
errno);
839 RUBY_DEBUG_LOG(
"epoll_fd:%d", timer_th.event_fd);
841 RUBY_DEBUG_LOG(
"comm_fds:%d/%d", timer_th.comm_fds[0], timer_th.comm_fds[1]);
843 timer_thread_register_waiting(NULL, timer_th.comm_fds[0], thread_sched_waiting_io_read | thread_sched_waiting_io_force, NULL);
850 int r = kqueue_wait(vm);
852 int r = epoll_wait(timer_th.event_fd, timer_th.finished_events, EPOLL_EVENTS_MAX, timer_thread_set_timeout(vm));
872timer_thread_polling(
rb_vm_t *vm)
874 int r = event_wait(vm);
876 RUBY_DEBUG_LOG(
"r:%d errno:%d", r,
errno);
880 RUBY_DEBUG_LOG(
"timeout%s",
"");
882 ractor_sched_lock(vm, NULL);
885 timer_thread_check_timeslice(vm);
888 if (vm->ractor.sched.grq_cnt > 0) {
889 RUBY_DEBUG_LOG(
"GRQ cnt: %u", vm->ractor.sched.grq_cnt);
893 ractor_sched_unlock(vm, NULL);
896 native_thread_check_and_create_shared(vm);
906 perror(
"event_wait");
907 rb_bug(
"event_wait errno:%d",
errno);
912 RUBY_DEBUG_LOG(
"%d event(s)", r);
915 for (
int i=0; i<r; i++) {
917 int fd = (int)timer_th.finished_events[i].ident;
918 int16_t filter = timer_th.finished_events[i].filter;
922 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
923 consume_communication_pipe(timer_th.comm_fds[0]);
926 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s",
928 (filter == EVFILT_READ) ?
"read/" :
"",
929 (filter == EVFILT_WRITE) ?
"write/" :
"");
933 if (th->sched.waiting_reason.flags) {
935 ccan_list_del_init(&th->sched.waiting_reason.node);
936 timer_thread_unregister_waiting(th, fd, kqueue_translate_filter_to_flags(filter));
938 th->sched.waiting_reason.flags = thread_sched_waiting_none;
939 th->sched.waiting_reason.data.fd = -1;
940 th->sched.waiting_reason.data.result = filter;
942 timer_thread_wakeup_thread(th);
951 for (
int i=0; i<r; i++) {
956 RUBY_DEBUG_LOG(
"comm from fd:%d", timer_th.comm_fds[1]);
957 consume_communication_pipe(timer_th.comm_fds[0]);
961 uint32_t events = timer_th.finished_events[i].events;
963 RUBY_DEBUG_LOG(
"io event. wakeup_th:%u event:%s%s%s%s%s%s",
965 (events & EPOLLIN) ?
"in/" :
"",
966 (events & EPOLLOUT) ?
"out/" :
"",
967 (events & EPOLLRDHUP) ?
"RDHUP/" :
"",
968 (events & EPOLLPRI) ?
"pri/" :
"",
969 (events & EPOLLERR) ?
"err/" :
"",
970 (events & EPOLLHUP) ?
"hup/" :
"");
974 if (th->sched.waiting_reason.flags) {
976 ccan_list_del_init(&th->sched.waiting_reason.node);
977 timer_thread_unregister_waiting(th, th->sched.waiting_reason.data.fd, th->sched.waiting_reason.flags);
979 th->sched.waiting_reason.flags = thread_sched_waiting_none;
980 th->sched.waiting_reason.data.fd = -1;
981 th->sched.waiting_reason.data.result = (int)events;
983 timer_thread_wakeup_thread(th);
999timer_thread_setup_mn(
void)
1005timer_thread_polling(
rb_vm_t *vm)
1007 int timeout = timer_thread_set_timeout(vm);
1009 struct pollfd pfd = {
1010 .fd = timer_th.comm_fds[0],
1014 int r = poll(&pfd, 1, timeout);
1021 timer_thread_check_timeslice(vm);
1033 rb_bug(
"poll errno:%d",
errno);
1038 consume_communication_pipe(timer_th.comm_fds[0]);
1042 rb_bug(
"unreachbale");
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define errno
Ractor-aware version of errno.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
uintptr_t VALUE
Type that represents a Ruby object.