12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
20#ifdef HAVE_THR_STKSEGMENT
23#if defined(HAVE_FCNTL_H)
25#elif defined(HAVE_SYS_FCNTL_H)
28#ifdef HAVE_SYS_PRCTL_H
31#if defined(HAVE_SYS_TIME_H)
38#include <sys/syscall.h>
44# include <AvailabilityMacros.h>
47#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48# define USE_EVENTFD (1)
49# include <sys/eventfd.h>
51# define USE_EVENTFD (0)
54#if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__)
55# define USE_UBF_LIST 1
83#define UBF_TIMER_NONE 0
84#define UBF_TIMER_POSIX 1
85#define UBF_TIMER_PTHREAD 2
88# if defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_CREATE) && \
89 defined(CLOCK_MONOTONIC) && defined(USE_UBF_LIST)
91# define UBF_TIMER UBF_TIMER_POSIX
92# elif defined(USE_UBF_LIST)
94# define UBF_TIMER UBF_TIMER_PTHREAD
97# define UBF_TIMER UBF_TIMER_NONE
101struct rb_internal_thread_event_hook {
102 rb_internal_thread_event_callback callback;
106 struct rb_internal_thread_event_hook *next;
109static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
110static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
112#define RB_INTERNAL_THREAD_HOOK(event) if (rb_internal_thread_event_hooks) { rb_thread_execute_hooks(event); }
114rb_internal_thread_event_hook_t *
117 rb_internal_thread_event_hook_t *hook =
ALLOC_N(rb_internal_thread_event_hook_t, 1);
118 hook->callback = callback;
119 hook->user_data = user_data;
120 hook->event = internal_event;
123 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
127 hook->next = rb_internal_thread_event_hooks;
128 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
130 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
140 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
144 bool success = FALSE;
146 if (rb_internal_thread_event_hooks == hook) {
147 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
151 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
154 if (h->next == hook) {
155 h->next = hook->next;
159 }
while ((h = h->next));
162 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
176 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
180 if (rb_internal_thread_event_hooks) {
181 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
183 if (h->event & event) {
184 (*h->callback)(event, NULL, h->user_data);
186 }
while((h = h->next));
188 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
202#if UBF_TIMER == UBF_TIMER_POSIX
203static const struct itimerspec zero;
212#define TIMER_STATE_DEBUG 0
215rtimer_state_name(
enum rtimer_state state)
218 case RTIMER_DISARM:
return "disarm";
219 case RTIMER_ARMING:
return "arming";
220 case RTIMER_ARMED:
return "armed";
221 case RTIMER_DEAD:
return "dead";
222 default:
rb_bug(
"unreachable");
226static enum rtimer_state
227timer_state_exchange(
enum rtimer_state state)
229 enum rtimer_state prev = ATOMIC_EXCHANGE(timer_posix.state_, state);
230 if (TIMER_STATE_DEBUG) fprintf(stderr,
"state (exc): %s->%s\n", rtimer_state_name(prev), rtimer_state_name(state));
234static enum rtimer_state
235timer_state_cas(
enum rtimer_state expected_prev,
enum rtimer_state state)
237 enum rtimer_state prev = ATOMIC_CAS(timer_posix.state_, expected_prev, state);
239 if (TIMER_STATE_DEBUG) {
240 if (prev == expected_prev) {
241 fprintf(stderr,
"state (cas): %s->%s\n", rtimer_state_name(prev), rtimer_state_name(state));
244 fprintf(stderr,
"state (cas): %s (expected:%s)\n", rtimer_state_name(prev), rtimer_state_name(expected_prev));
251#elif UBF_TIMER == UBF_TIMER_PTHREAD
252static void *timer_pthread_fn(
void *);
263static const rb_hrtime_t *sigwait_timeout(
rb_thread_t *,
int sigwait_fd,
266static void ubf_timer_disarm(
void);
267static void threadptr_trap_interrupt(
rb_thread_t *);
268static void ubf_wakeup_all_threads(
void);
269static int ubf_threads_empty(
void);
271#define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
274#define BUSY_WAIT_SIGNALS (0)
282#define THREAD_INVALID ((const rb_thread_t *)-1)
285#ifdef HAVE_SCHED_YIELD
286#define native_thread_yield() (void)sched_yield()
288#define native_thread_yield() ((void)0)
291#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
292 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
293 defined(HAVE_CLOCK_GETTIME)
294static pthread_condattr_t condattr_mono;
295static pthread_condattr_t *condattr_monotonic = &condattr_mono;
297static const void *
const condattr_monotonic = NULL;
303#define TIME_QUANTUM_MSEC (100)
304#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
305#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
307static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
308static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs);
319 last = ccan_list_tail(&sched->readyq,
rb_thread_t, sched.node.readyq);
338 static rb_hrtime_t abs;
345 if (sched->timer_err == ETIMEDOUT) {
346 abs = native_cond_timeout(&th->nt->cond.readyq, TIME_QUANTUM_NSEC);
348 sched->timer_err = native_cond_timedwait(&th->nt->cond.readyq, &sched->lock, &abs);
350 ubf_wakeup_all_threads();
351 ruby_sigchld_handler(vm);
353 if (UNLIKELY(rb_signal_buff_size())) {
354 if (th == vm->ractor.main_thread) {
355 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
358 threadptr_trap_interrupt(vm->ractor.main_thread);
367 if ((running = sched->running) != 0) {
369 RUBY_VM_SET_TIMER_INTERRUPT(running->ec);
377 ccan_list_add_tail(&sched->readyq, &th->sched.node.readyq);
383 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY);
384 if (sched->running) {
385 VM_ASSERT(th->unblock.func == 0 &&
386 "we must not be in ubf_list and GVL readyq at the same time");
389 thread_sched_to_ready_common(sched, th);
394 do_gvl_timer(sched, th);
399 }
while (sched->running);
401 ccan_list_del_init(&th->sched.node.readyq);
403 if (sched->need_yield) {
404 sched->need_yield = 0;
409 sched->timer_err = ETIMEDOUT;
415 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED);
418 if (!designate_timer_thread(sched) && !ubf_threads_empty()) {
419 rb_thread_wakeup_timer_thread(-1);
428 thread_sched_to_running_common(sched, th);
436 sched->running = NULL;
437 next = ccan_list_top(&sched->readyq,
rb_thread_t, sched.node.readyq);
446 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
448 thread_sched_to_waiting_common(sched);
461 ubf_wakeup_all_threads();
463 next = thread_sched_to_waiting_common(sched);
466 if (UNLIKELY(sched->wait_yield)) {
467 while (sched->wait_yield)
472 sched->need_yield = 1;
473 sched->wait_yield = 1;
474 while (sched->need_yield)
476 sched->wait_yield = 0;
481 native_thread_yield();
485 thread_sched_to_running_common(sched, th);
495 ccan_list_head_init(&sched->readyq);
496 sched->running = NULL;
498 sched->timer_err = ETIMEDOUT;
499 sched->need_yield = 0;
500 sched->wait_yield = 0;
506static void clear_thread_cache_altstack(
void);
521 clear_thread_cache_altstack();
525#if defined(HAVE_WORKING_FORK)
526static void thread_cache_reset(
void);
530 thread_cache_reset();
531 rb_thread_sched_init(sched);
532 thread_sched_to_running(sched, GET_THREAD());
536#define NATIVE_MUTEX_LOCK_DEBUG 0
539mutex_debug(
const char *msg,
void *lock)
541 if (NATIVE_MUTEX_LOCK_DEBUG) {
543 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
545 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
546 fprintf(stdout,
"%s: %p\n", msg, lock);
547 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
555 mutex_debug(
"lock", lock);
556 if ((r = pthread_mutex_lock(lock)) != 0) {
565 mutex_debug(
"unlock", lock);
566 if ((r = pthread_mutex_unlock(lock)) != 0) {
575 mutex_debug(
"trylock", lock);
576 if ((r = pthread_mutex_trylock(lock)) != 0) {
590 int r = pthread_mutex_init(lock, 0);
591 mutex_debug(
"init", lock);
600 int r = pthread_mutex_destroy(lock);
601 mutex_debug(
"destroy", lock);
610 int r = pthread_cond_init(cond, condattr_monotonic);
619 int r = pthread_cond_destroy(cond);
640 r = pthread_cond_signal(cond);
641 }
while (r == EAGAIN);
652 r = pthread_cond_broadcast(cond);
653 }
while (r == EAGAIN);
662 int r = pthread_cond_wait(cond, mutex);
669native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
681 rb_hrtime2timespec(&ts, abs);
682 r = pthread_cond_timedwait(cond, mutex, &ts);
683 }
while (r == EINTR);
685 if (r != 0 && r != ETIMEDOUT) {
695 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
696 native_cond_timedwait(cond, mutex, &hrmsec);
700native_cond_timeout(rb_nativethread_cond_t *cond,
const rb_hrtime_t rel)
702 if (condattr_monotonic) {
703 return rb_hrtime_add(rb_hrtime_now(), rel);
709 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
713#define native_cleanup_push pthread_cleanup_push
714#define native_cleanup_pop pthread_cleanup_pop
716#ifdef RB_THREAD_LOCAL_SPECIFIER
717static RB_THREAD_LOCAL_SPECIFIER
rb_thread_t *ruby_native_thread;
719static pthread_key_t ruby_native_thread_key;
729ruby_thread_from_native(
void)
731#ifdef RB_THREAD_LOCAL_SPECIFIER
732 return ruby_native_thread;
734 return pthread_getspecific(ruby_native_thread_key);
743 ccan_list_node_init(&th->sched.node.ubf);
750 rb_ractor_set_current_ec(th->ractor, th->ec);
752#ifdef RB_THREAD_LOCAL_SPECIFIER
753 ruby_native_thread = th;
756 return pthread_setspecific(ruby_native_thread_key, th) == 0;
760#ifdef RB_THREAD_T_HAS_NATIVE_ID
762get_native_thread_id(
void)
765 return (
int)syscall(SYS_gettid);
766#elif defined(__FreeBSD__)
767 return pthread_getthreadid_np();
775#ifdef RB_THREAD_T_HAS_NATIVE_ID
776 nt->tid = get_native_thread_id();
779 if (&nt->cond.readyq != &nt->cond.intr)
786#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
787 if (condattr_monotonic) {
788 int r = pthread_condattr_init(condattr_monotonic);
790 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
792 if (r) condattr_monotonic = NULL;
796#ifndef RB_THREAD_LOCAL_SPECIFIER
797 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
798 rb_bug(
"pthread_key_create failed (ruby_native_thread_key)");
800 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
801 rb_bug(
"pthread_key_create failed (ruby_current_ec_key)");
804 posix_signal(SIGVTALRM, null_func);
807 main_th->nt->thread_id = pthread_self();
808 ruby_thread_set_native(main_th);
809 native_thread_init(main_th->nt);
812#ifndef USE_THREAD_CACHE
813#define USE_THREAD_CACHE 1
823 if (&nt->cond.readyq != &nt->cond.intr)
830 if (USE_THREAD_CACHE)
831 ruby_thread_set_native(0);
835static rb_thread_t *register_cached_thread_and_wait(
void *);
838#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
839#define STACKADDR_AVAILABLE 1
840#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
841#define STACKADDR_AVAILABLE 1
842#undef MAINSTACKADDR_AVAILABLE
843#define MAINSTACKADDR_AVAILABLE 1
844void *pthread_get_stackaddr_np(pthread_t);
845size_t pthread_get_stacksize_np(pthread_t);
846#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
847#define STACKADDR_AVAILABLE 1
848#elif defined HAVE_PTHREAD_GETTHRDS_NP
849#define STACKADDR_AVAILABLE 1
850#elif defined __HAIKU__
851#define STACKADDR_AVAILABLE 1
854#ifndef MAINSTACKADDR_AVAILABLE
855# ifdef STACKADDR_AVAILABLE
856# define MAINSTACKADDR_AVAILABLE 1
858# define MAINSTACKADDR_AVAILABLE 0
861#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
862# define get_main_stack(addr, size) get_stack(addr, size)
865#ifdef STACKADDR_AVAILABLE
870get_stack(
void **addr,
size_t *size)
872#define CHECK_ERR(expr) \
873 {int err = (expr); if (err) return err;}
874#ifdef HAVE_PTHREAD_GETATTR_NP
877 STACK_GROW_DIR_DETECTION;
878 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
879# ifdef HAVE_PTHREAD_ATTR_GETSTACK
880 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
881 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
883 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
884 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
886# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
887 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
889 guard = getpagesize();
892 pthread_attr_destroy(&attr);
893#elif defined HAVE_PTHREAD_ATTR_GET_NP
895 CHECK_ERR(pthread_attr_init(&attr));
896 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
897# ifdef HAVE_PTHREAD_ATTR_GETSTACK
898 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
900 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
901 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
903 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
904 pthread_attr_destroy(&attr);
905#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
906 pthread_t th = pthread_self();
907 *addr = pthread_get_stackaddr_np(th);
908 *size = pthread_get_stacksize_np(th);
909#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
911# if defined HAVE_THR_STKSEGMENT
912 CHECK_ERR(thr_stksegment(&stk));
914 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
918#elif defined HAVE_PTHREAD_GETTHRDS_NP
919 pthread_t th = pthread_self();
920 struct __pthrdsinfo thinfo;
922 int regsiz=
sizeof(reg);
923 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
924 &thinfo,
sizeof(thinfo),
926 *addr = thinfo.__pi_stackaddr;
930 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
931 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
932#elif defined __HAIKU__
934 STACK_GROW_DIR_DETECTION;
935 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
936 *addr = info.stack_base;
937 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
938 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
940#error STACKADDR_AVAILABLE is defined but not implemented.
948 rb_nativethread_id_t id;
949 size_t stack_maxsize;
953#ifdef STACK_END_ADDRESS
954extern void *STACK_END_ADDRESS;
958 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
959 RUBY_STACK_SPACE_RATIO = 5
963space_size(
size_t stack_size)
965 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
966 if (space_size > RUBY_STACK_SPACE_LIMIT) {
967 return RUBY_STACK_SPACE_LIMIT;
976reserve_stack(
volatile char *limit,
size_t size)
979# error needs alloca()
982 volatile char buf[0x100];
983 enum {stack_check_margin = 0x1000};
985 STACK_GROW_DIR_DETECTION;
987 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
990 if (size < stack_check_margin)
return;
991 size -= stack_check_margin;
994 if (IS_STACK_DIR_UPPER()) {
995 const volatile char *end = buf +
sizeof(buf);
1005 size_t sz = limit - end;
1020 size_t sz = buf - limit;
1027# define reserve_stack(limit, size) ((void)(limit), (void)(size))
1030#undef ruby_init_stack
1034 native_main_thread.id = pthread_self();
1036#if MAINSTACKADDR_AVAILABLE
1037 if (native_main_thread.stack_maxsize)
return;
1041 if (get_main_stack(&stackaddr, &size) == 0) {
1042 native_main_thread.stack_maxsize = size;
1043 native_main_thread.stack_start = stackaddr;
1044 reserve_stack(stackaddr, size);
1049#ifdef STACK_END_ADDRESS
1050 native_main_thread.stack_start = STACK_END_ADDRESS;
1052 if (!native_main_thread.stack_start ||
1053 STACK_UPPER((
VALUE *)(
void *)&addr,
1054 native_main_thread.stack_start > addr,
1055 native_main_thread.stack_start < addr)) {
1056 native_main_thread.stack_start = (
VALUE *)addr;
1060#if defined(HAVE_GETRLIMIT)
1061#if defined(PTHREAD_STACK_DEFAULT)
1062# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1063# error "PTHREAD_STACK_DEFAULT is too small"
1065 size_t size = PTHREAD_STACK_DEFAULT;
1067 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1070 int pagesize = getpagesize();
1072 STACK_GROW_DIR_DETECTION;
1073 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1074 size = (size_t)rlim.rlim_cur;
1076 addr = native_main_thread.stack_start;
1077 if (IS_STACK_DIR_UPPER()) {
1078 space = ((size_t)((
char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1081 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
1083 native_main_thread.stack_maxsize = space;
1087#if MAINSTACKADDR_AVAILABLE
1094 STACK_GROW_DIR_DETECTION;
1096 if (IS_STACK_DIR_UPPER()) {
1097 start = native_main_thread.stack_start;
1098 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
1101 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
1102 end = native_main_thread.stack_start;
1105 if ((
void *)addr < start || (
void *)addr > end) {
1107 native_main_thread.stack_start = (
VALUE *)addr;
1108 native_main_thread.stack_maxsize = 0;
1113#define CHECK_ERR(expr) \
1114 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
1119 rb_nativethread_id_t curr = pthread_self();
1121 if (pthread_equal(curr, native_main_thread.id)) {
1122 th->ec->machine.stack_start = native_main_thread.stack_start;
1123 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
1126#ifdef STACKADDR_AVAILABLE
1130 if (get_stack(&start, &size) == 0) {
1131 uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
1132 th->ec->machine.stack_start = (
VALUE *)&curr;
1133 th->ec->machine.stack_maxsize = size - diff;
1144#define USE_NATIVE_THREAD_INIT 1
1148thread_start_func_1(
void *th_ptr)
1151 RB_ALTSTACK_INIT(
void *altstack, th->nt->altstack);
1156#if !defined USE_NATIVE_THREAD_INIT
1160#if defined USE_NATIVE_THREAD_INIT
1161 native_thread_init_stack(th);
1164 native_thread_init(th->nt);
1166 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED);
1169#if defined USE_NATIVE_THREAD_INIT
1170 thread_start_func_2(th, th->ec->machine.stack_start);
1172 thread_start_func_2(th, &stack_start);
1175 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED);
1179 if ((th = register_cached_thread_and_wait(RB_ALTSTACK(altstack))) != 0) {
1183 RB_ALTSTACK_FREE(altstack);
1188struct cached_thread_entry {
1189 rb_nativethread_cond_t cond;
1190 rb_nativethread_id_t thread_id;
1193 struct ccan_list_node node;
1197static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
1198static CCAN_LIST_HEAD(cached_thread_head);
1200# if defined(HAVE_WORKING_FORK)
1202thread_cache_reset(
void)
1205 ccan_list_head_init(&cached_thread_head);
1214#ifndef THREAD_CACHE_TIME
1215# define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
1219register_cached_thread_and_wait(
void *altstack)
1221 rb_hrtime_t end = THREAD_CACHE_TIME;
1222 struct cached_thread_entry entry;
1225 entry.altstack = altstack;
1227 entry.thread_id = pthread_self();
1228 end = native_cond_timeout(&entry.cond, end);
1232 ccan_list_add(&cached_thread_head, &entry.node);
1234 native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
1236 if (entry.th == NULL) {
1237 ccan_list_del(&entry.node);
1244 RB_ALTSTACK_FREE(entry.altstack);
1250# if defined(HAVE_WORKING_FORK)
1251static void thread_cache_reset(
void) { }
1259 struct cached_thread_entry *entry;
1262 entry = ccan_list_pop(&cached_thread_head,
struct cached_thread_entry, node);
1266 th->nt->thread_id = entry->thread_id;
1278clear_thread_cache_altstack(
void)
1281 struct cached_thread_entry *entry;
1284 ccan_list_for_each(&cached_thread_head, entry, node) {
1285 void MAYBE_UNUSED(*altstack) = entry->altstack;
1286 entry->altstack = 0;
1287 RB_ALTSTACK_FREE(altstack);
1299 VM_ASSERT(th->nt == 0);
1302 if (use_cached_thread(th)) {
1303 RUBY_DEBUG_LOG(
"use cached nt. th:%u", rb_th_serial(th));
1306 pthread_attr_t attr;
1307 const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
1308 const size_t space = space_size(stack_size);
1310#ifdef USE_SIGALTSTACK
1311 th->nt->altstack = rb_allocate_sigaltstack();
1313 th->ec->machine.stack_maxsize = stack_size - space;
1315 CHECK_ERR(pthread_attr_init(&attr));
1317# ifdef PTHREAD_STACK_MIN
1318 RUBY_DEBUG_LOG(
"stack size: %lu", (
unsigned long)stack_size);
1319 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
1322# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1323 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
1325 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1327 err = pthread_create(&th->nt->thread_id, &attr, thread_start_func_1, th);
1329 RUBY_DEBUG_LOG(
"th:%u err:%d", rb_th_serial(th), err);
1332 CHECK_ERR(pthread_attr_destroy(&attr));
1337#if USE_NATIVE_THREAD_PRIORITY
1342#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
1343 struct sched_param sp;
1345 int priority = 0 - th->priority;
1347 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
1348 max = sched_get_priority_max(policy);
1349 min = sched_get_priority_min(policy);
1351 if (min > priority) {
1354 else if (max < priority) {
1358 sp.sched_priority = priority;
1359 pthread_setschedparam(th->nt->thread_id, policy, &sp);
1370 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1374ubf_pthread_cond_signal(
void *ptr)
1377 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1382native_cond_sleep(
rb_thread_t *th, rb_hrtime_t *rel)
1384 rb_nativethread_lock_t *lock = &th->interrupt_lock;
1385 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
1395 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
1397 THREAD_BLOCKING_BEGIN(th);
1400 th->unblock.func = ubf_pthread_cond_signal;
1401 th->unblock.arg = th;
1403 if (RUBY_VM_INTERRUPTED(th->ec)) {
1405 RUBY_DEBUG_LOG(
"interrupted before sleep th:%u", rb_th_serial(th));
1418 end = native_cond_timeout(cond, *rel);
1419 native_cond_timedwait(cond, lock, &end);
1422 th->unblock.func = 0;
1426 THREAD_BLOCKING_END(th);
1428 RUBY_DEBUG_LOG(
"done th:%u", rb_th_serial(th));
1432static CCAN_LIST_HEAD(ubf_list_head);
1433static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
1436ubf_list_atfork(
void)
1438 ccan_list_head_init(&ubf_list_head);
1446 struct ccan_list_node *node = &th->sched.node.ubf;
1448 if (ccan_list_empty((
struct ccan_list_head*)node)) {
1450 ccan_list_add(&ubf_list_head, node);
1459 struct ccan_list_node *node = &th->sched.node.ubf;
1462 VM_ASSERT(th->unblock.func == 0);
1464 if (!ccan_list_empty((
struct ccan_list_head*)node)) {
1466 ccan_list_del_init(node);
1467 if (ccan_list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
1481 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1482 pthread_kill(th->nt->thread_id, SIGVTALRM);
1486ubf_select(
void *ptr)
1490 const rb_thread_t *cur = ruby_thread_from_native();
1492 register_ubf_list(th);
1504 if (cur != sched->timer && cur != sigwait_th) {
1511 if (!sched->timer) {
1512 rb_thread_wakeup_timer_thread(-1);
1518 ubf_wakeup_thread(th);
1522ubf_threads_empty(
void)
1524 return ccan_list_empty(&ubf_list_head);
1528ubf_wakeup_all_threads(
void)
1530 if (!ubf_threads_empty()) {
1534 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
1535 ubf_wakeup_thread(th);
1542#define register_ubf_list(th) (void)(th)
1543#define unregister_ubf_list(th) (void)(th)
1545static void ubf_wakeup_all_threads(
void) {
return; }
1546static int ubf_threads_empty(
void) {
return 1; }
1547#define ubf_list_atfork() do {} while (0)
1551#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1559 volatile rb_pid_t owner_process;
1560} signal_self_pipe = {
1567rb_thread_wakeup_timer_thread_fd(
int fd)
1570 const uint64_t buff = 1;
1572 const char buff =
'!';
1579 if ((result = write(fd, &buff,
sizeof(buff))) <= 0) {
1582 case EINTR:
goto retry;
1584#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1589 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
1592 if (TT_DEBUG) WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1604ubf_timer_arm(rb_pid_t current)
1606#if UBF_TIMER == UBF_TIMER_POSIX
1607 if ((!current || timer_posix.owner == current) &&
1608 timer_state_cas(RTIMER_DISARM, RTIMER_ARMING) == RTIMER_DISARM) {
1609 struct itimerspec it;
1611 it.it_interval.tv_sec = it.it_value.tv_sec = 0;
1612 it.it_interval.tv_nsec = it.it_value.tv_nsec = TIME_QUANTUM_NSEC;
1614 if (timer_settime(timer_posix.timerid, 0, &it, 0))
1615 rb_async_bug_errno(
"timer_settime (arm)", errno);
1617 switch (timer_state_cas(RTIMER_ARMING, RTIMER_ARMED)) {
1621 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1623 case RTIMER_ARMING:
return;
1634 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1637 rb_async_bug_errno(
"UBF_TIMER_POSIX unknown state", ERANGE);
1640#elif UBF_TIMER == UBF_TIMER_PTHREAD
1641 if (!current || current == timer_pthread.owner) {
1642 if (ATOMIC_EXCHANGE(timer_pthread.armed, 1) == 0)
1643 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1649rb_thread_wakeup_timer_thread(
int sig)
1655 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1664 if (signal_self_pipe.owner_process == current) {
1665 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1671 if (system_working > 0) {
1681 mth = vm->ractor.main_thread;
1682 if (!mth || system_working <= 0)
return;
1688 RUBY_VM_SET_TRAP_INTERRUPT(ec);
1689 ubf_timer_arm(current);
1692 if (vm->ubf_async_safe && mth->unblock.func) {
1693 (mth->unblock.func)(mth->unblock.arg);
1700#define CLOSE_INVALIDATE_PAIR(expr) \
1701 close_invalidate_pair(expr,"close_invalidate: "#expr)
1703close_invalidate(
int *fdp,
const char *msg)
1708 if (close(fd) < 0) {
1709 async_bug_fd(msg, errno, fd);
1714close_invalidate_pair(
int fds[2],
const char *msg)
1716 if (USE_EVENTFD && fds[0] == fds[1]) {
1717 close_invalidate(&fds[0], msg);
1721 close_invalidate(&fds[0], msg);
1722 close_invalidate(&fds[1], msg);
1732 oflags = fcntl(fd, F_GETFL);
1735 oflags |= O_NONBLOCK;
1736 err = fcntl(fd, F_SETFL, oflags);
1743setup_communication_pipe_internal(
int pipes[2])
1747 if (pipes[0] >= 0 || pipes[1] >= 0) {
1748 VM_ASSERT(pipes[0] >= 0);
1749 VM_ASSERT(pipes[1] >= 0);
1757#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
1758 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
1759 if (pipes[0] >= 0) {
1767 rb_warn(
"pipe creation failed for timer: %s, scheduling broken",
1773 set_nonblock(pipes[0]);
1774 set_nonblock(pipes[1]);
1778#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
1779# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
1784#if defined(__linux__)
1786#elif defined(__APPLE__)
1799#ifdef SET_CURRENT_THREAD_NAME
1801 if (!
NIL_P(loc = th->name)) {
1804 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
1806 char buf[THREAD_NAME_MAX];
1811 p = strrchr(name,
'/');
1819 if (len >=
sizeof(buf)) {
1820 buf[
sizeof(buf)-2] =
'*';
1821 buf[
sizeof(buf)-1] =
'\0';
1823 SET_CURRENT_THREAD_NAME(buf);
1829native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
1831#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
1832 char buf[THREAD_NAME_MAX];
1834# if !defined SET_ANOTHER_THREAD_NAME
1835 if (!pthread_equal(pthread_self(), thread_id))
return;
1840 if (n >= (
int)
sizeof(buf)) {
1841 memcpy(buf, s,
sizeof(buf)-1);
1842 buf[
sizeof(buf)-1] =
'\0';
1846# if defined SET_ANOTHER_THREAD_NAME
1847 SET_ANOTHER_THREAD_NAME(thread_id, s);
1848# elif defined SET_CURRENT_THREAD_NAME
1849 SET_CURRENT_THREAD_NAME(s);
1854#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
1856native_thread_native_thread_id(
rb_thread_t *target_th)
1858#ifdef RB_THREAD_T_HAS_NATIVE_ID
1859 int tid = target_th->nt->tid;
1860 if (tid == 0)
return Qnil;
1862#elif defined(__APPLE__)
1864# if (!defined(MAC_OS_X_VERSION_10_6) || \
1865 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
1866 defined(__POWERPC__) )
1867 const bool no_pthread_threadid_np =
true;
1868# define NO_PTHREAD_MACH_THREAD_NP 1
1869# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
1870 const bool no_pthread_threadid_np =
false;
1872# if !(defined(__has_attribute) && __has_attribute(availability))
1874 __attribute__((weak))
int pthread_threadid_np(pthread_t, uint64_t*);
1877 const bool no_pthread_threadid_np = !&pthread_threadid_np;
1879 if (no_pthread_threadid_np) {
1880 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
1882# ifndef NO_PTHREAD_MACH_THREAD_NP
1883 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
1885 return ULL2NUM((
unsigned long long)tid);
1889# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
1891# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
1895ubf_timer_invalidate(
void)
1897#if UBF_TIMER == UBF_TIMER_PTHREAD
1898 CLOSE_INVALIDATE_PAIR(timer_pthread.low);
1903ubf_timer_pthread_create(rb_pid_t current)
1905#if UBF_TIMER == UBF_TIMER_PTHREAD
1907 if (timer_pthread.owner == current)
1910 if (setup_communication_pipe_internal(timer_pthread.low) < 0)
1913 err = pthread_create(&timer_pthread.thid, 0, timer_pthread_fn, GET_VM());
1915 timer_pthread.owner = current;
1917 rb_warn(
"pthread_create failed for timer: %s, signals racy",
1923ubf_timer_create(rb_pid_t current)
1925#if UBF_TIMER == UBF_TIMER_POSIX
1927# define UBF_TIMER_CLOCK CLOCK_REALTIME
1929# define UBF_TIMER_CLOCK CLOCK_MONOTONIC
1932 struct sigevent sev;
1934 sev.sigev_notify = SIGEV_SIGNAL;
1935 sev.sigev_signo = SIGVTALRM;
1936 sev.sigev_value.sival_ptr = &timer_posix;
1938 if (!timer_create(UBF_TIMER_CLOCK, &sev, &timer_posix.timerid)) {
1939 rb_atomic_t prev = timer_state_exchange(RTIMER_DISARM);
1941 if (prev != RTIMER_DEAD) {
1942 rb_bug(
"timer_posix was not dead: %u\n", (
unsigned)prev);
1944 timer_posix.owner = current;
1947 rb_warn(
"timer_create failed: %s, signals racy", strerror(errno));
1950 if (UBF_TIMER == UBF_TIMER_PTHREAD)
1951 ubf_timer_pthread_create(current);
1955rb_thread_create_timer_thread(
void)
1958 rb_pid_t current = getpid();
1959 rb_pid_t owner = signal_self_pipe.owner_process;
1961 if (owner && owner != current) {
1962 CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
1963 CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
1964 ubf_timer_invalidate();
1967 if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0)
return;
1968 if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0)
return;
1970 ubf_timer_create(current);
1971 if (owner != current) {
1973 sigwait_th = THREAD_INVALID;
1974 signal_self_pipe.owner_process = current;
1979ubf_timer_disarm(
void)
1981#if UBF_TIMER == UBF_TIMER_POSIX
1984 if (timer_posix.owner && timer_posix.owner != getpid())
return;
1985 prev = timer_state_cas(RTIMER_ARMED, RTIMER_DISARM);
1987 case RTIMER_DISARM:
return;
1988 case RTIMER_ARMING:
return;
1990 if (timer_settime(timer_posix.timerid, 0, &zero, 0)) {
1993 if (err == EINVAL) {
1994 prev = timer_state_cas(RTIMER_DISARM, RTIMER_DISARM);
1997 if (prev == RTIMER_DEAD)
return;
2003 case RTIMER_DEAD:
return;
2005 rb_bug(
"UBF_TIMER_POSIX bad state: %u\n", (
unsigned)prev);
2008#elif UBF_TIMER == UBF_TIMER_PTHREAD
2009 ATOMIC_SET(timer_pthread.armed, 0);
2014ubf_timer_destroy(
void)
2016#if UBF_TIMER == UBF_TIMER_POSIX
2017 if (timer_posix.owner == getpid()) {
2019 size_t i, max = 10000000;
2022 for (i = 0; i < max; i++) {
2023 switch (timer_state_cas(expect, RTIMER_DEAD)) {
2025 if (expect == RTIMER_DISARM)
goto done;
2026 expect = RTIMER_DISARM;
2029 native_thread_yield();
2030 expect = RTIMER_ARMED;
2033 if (expect == RTIMER_ARMED) {
2034 if (timer_settime(timer_posix.timerid, 0, &zero, 0))
2038 expect = RTIMER_ARMED;
2041 rb_bug(
"RTIMER_DEAD unexpected");
2044 rb_bug(
"timed out waiting for timer to arm");
2046 if (timer_delete(timer_posix.timerid) < 0)
2049 VM_ASSERT(timer_state_exchange(RTIMER_DEAD) == RTIMER_DEAD);
2051#elif UBF_TIMER == UBF_TIMER_PTHREAD
2054 timer_pthread.owner = 0;
2056 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
2057 err = pthread_join(timer_pthread.thid, 0);
2065native_stop_timer_thread(
void)
2068 stopped = --system_working <= 0;
2070 ubf_timer_destroy();
2072 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
2077native_reset_timer_thread(
void)
2079 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
2082#ifdef HAVE_SIGALTSTACK
2084ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
2088 const size_t water_mark = 1024 * 1024;
2089 STACK_GROW_DIR_DETECTION;
2091#ifdef STACKADDR_AVAILABLE
2092 if (get_stack(&base, &size) == 0) {
2094 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
2096 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
2097 size = (size_t)rlim.rlim_cur;
2101 base = (
char *)base + STACK_DIR_UPPER(+size, -size);
2106 size = th->ec->machine.stack_maxsize;
2107 base = (
char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
2112 size /= RUBY_STACK_SPACE_RATIO;
2113 if (size > water_mark) size = water_mark;
2114 if (IS_STACK_DIR_UPPER()) {
2115 if (size > ~(
size_t)base+1) size = ~(
size_t)base+1;
2116 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
2119 if (size > (
size_t)base) size = (
size_t)base;
2120 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
2133#if UBF_TIMER == UBF_TIMER_PTHREAD
2134 if (fd == timer_pthread.low[0] || fd == timer_pthread.low[1])
2137 if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
2139 if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
2143 if (signal_self_pipe.owner_process == getpid())
2151 return pthread_self();
2157 if (signal_self_pipe.normal[0] >= 0) {
2158 VM_ASSERT(signal_self_pipe.owner_process == getpid());
2165 if (ATOMIC_PTR_CAS(sigwait_th, THREAD_INVALID, th) == THREAD_INVALID) {
2166 return signal_self_pipe.normal[0];
2177 VM_ASSERT(signal_self_pipe.normal[0] == fd);
2178 old = ATOMIC_PTR_EXCHANGE(sigwait_th, THREAD_INVALID);
2179 if (old != th) assert(old == th);
2185ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
2186 const struct timespec *ts,
const sigset_t *sigmask)
2193 if (ts->tv_sec > INT_MAX/1000)
2194 timeout_ms = INT_MAX;
2196 tmp = (int)(ts->tv_sec * 1000);
2198 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
2199 if (INT_MAX - tmp < tmp2)
2200 timeout_ms = INT_MAX;
2202 timeout_ms = (int)(tmp + tmp2);
2208 return poll(fds, nfds, timeout_ms);
2210# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
2214rb_sigwait_sleep(
rb_thread_t *th,
int sigwait_fd,
const rb_hrtime_t *rel)
2219 pfd.fd = sigwait_fd;
2220 pfd.events = POLLIN;
2222 if (!BUSY_WAIT_SIGNALS && ubf_threads_empty()) {
2223 (void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
2224 check_signals_nogvl(th, sigwait_fd);
2227 rb_hrtime_t to = RB_HRTIME_MAX, end = 0;
2232 end = rb_hrtime_add(rb_hrtime_now(), to);
2243 const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &to, &n);
2246 n = ppoll(&pfd, 1, rb_hrtime2timespec(&ts, sto), 0);
2247 if (check_signals_nogvl(th, sigwait_fd))
2249 if (n || (th && RUBY_VM_INTERRUPTED(th->ec)))
2251 if (rel && hrtime_update_expire(&to, end))
2263ubf_ppoll_sleep(
void *ignore)
2265 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
2278#define THREAD_BLOCKING_YIELD(th) do { \
2279 const rb_thread_t *next; \
2280 struct rb_thread_sched *sched = TH_SCHED(th); \
2281 RB_GC_SAVE_MACHINE_CONTEXT(th); \
2282 rb_native_mutex_lock(&sched->lock); \
2283 next = thread_sched_to_waiting_common(sched); \
2284 rb_native_mutex_unlock(&sched->lock); \
2285 if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
2286 native_thread_yield(); \
2300native_ppoll_sleep(
rb_thread_t *th, rb_hrtime_t *rel)
2303 th->unblock.func = ubf_ppoll_sleep;
2306 THREAD_BLOCKING_YIELD(th);
2308 if (!RUBY_VM_INTERRUPTED(th->ec)) {
2309 struct pollfd pfd[2];
2312 pfd[0].fd = signal_self_pipe.normal[0];
2313 pfd[1].fd = signal_self_pipe.ub_main[0];
2314 pfd[0].events = pfd[1].events = POLLIN;
2315 if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
2316 if (pfd[1].revents & POLLIN) {
2317 (void)consume_communication_pipe(pfd[1].fd);
2326 unblock_function_clear(th);
2328 THREAD_BLOCKING_END(th);
2334 int sigwait_fd = rb_sigwait_fd_get(th);
2335 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
2337 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED);
2339 if (sigwait_fd >= 0) {
2341 th->unblock.func = ubf_sigwait;
2344 THREAD_BLOCKING_YIELD(th);
2346 if (!RUBY_VM_INTERRUPTED(th->ec)) {
2347 rb_sigwait_sleep(th, sigwait_fd, rel);
2350 check_signals_nogvl(th, sigwait_fd);
2352 unblock_function_clear(th);
2354 THREAD_BLOCKING_END(th);
2356 rb_sigwait_fd_put(th, sigwait_fd);
2357 rb_sigwait_fd_migrate(th->vm);
2359 else if (th == th->vm->ractor.main_thread) {
2360 native_ppoll_sleep(th, rel);
2363 native_cond_sleep(th, rel);
2366 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
2369#if UBF_TIMER == UBF_TIMER_PTHREAD
2371timer_pthread_fn(
void *p)
2374 pthread_t main_thread_id = vm->ractor.main_thread->nt->thread_id;
2379 pfd.fd = timer_pthread.low[0];
2380 pfd.events = POLLIN;
2382 while (system_working > 0) {
2383 (void)poll(&pfd, 1, timeout);
2384 ccp = consume_communication_pipe(pfd.fd);
2386 if (system_working > 0) {
2387 if (ATOMIC_CAS(timer_pthread.armed, 1, 1)) {
2388 pthread_kill(main_thread_id, SIGVTALRM);
2390 if (rb_signal_buff_size() || !ubf_threads_empty()) {
2391 timeout = TIME_QUANTUM_MSEC;
2394 ATOMIC_SET(timer_pthread.armed, 0);
2399 pthread_kill(main_thread_id, SIGVTALRM);
2400 ATOMIC_SET(timer_pthread.armed, 0);
2411ubf_caller(
void *ignore)
2424rb_thread_start_unblock_thread(
void)
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
uint32_t rb_event_flag_t
Represents event(s).
#define INT2FIX
Old name of RB_INT2FIX.
#define ZALLOC
Old name of RB_ZALLOC.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define NUM2INT
Old name of RB_NUM2INT.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
VALUE rb_eNotImpError
NotImplementedError exception.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
void rb_sys_fail(const char *mesg)
Converts a C errno into a Ruby exception, then raises it.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
VALUE rb_eThreadError
ThreadError exception.
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
VALUE rb_thread_create(VALUE(*f)(void *g), void *g)
Creates a Ruby thread that is backended by a C function.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define rb_fd_select
Waits for multiple file descriptors at once.
#define RARRAY_AREF(a, i)
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.